hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
66751b94fff83bdacd031d0bcc851fdda5cb0daf
| 3,172
|
py
|
Python
|
snark/synnetdb.py
|
hiraokusky/snark
|
e3080d6d005cd126751efb3e25f158ee258cbc08
|
[
"Apache-2.0"
] | 6
|
2018-12-22T13:15:50.000Z
|
2020-04-12T14:18:34.000Z
|
snark/synnetdb.py
|
hiraokusky/snark
|
e3080d6d005cd126751efb3e25f158ee258cbc08
|
[
"Apache-2.0"
] | null | null | null |
snark/synnetdb.py
|
hiraokusky/snark
|
e3080d6d005cd126751efb3e25f158ee258cbc08
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019 hiraokusky
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
import re
import json
import sys,os
# pip install git+https://github.com/hiraokusky/snark
from snark import wordnetdb, kanadb
class SynNetDb:
"""
wordnetを簡単に処理するための一時記憶
RDF DBも併せて処理する
word in synset . 同義語, 個物化
synset isa synset . 抽象化, 属性
synset hasa synset . 部分/属性/材質/所有(目的語を抱えている状態の主語)
synset then synset . 状態遷移(主語から主語へ移る), 条件つきの場合は条件の付いたsynsetができる
"""
startdict = None
v = False
def __init__(self, opts=''):
self.v = 'v' in opts
def load_file(self, path):
self.startdict = pd.read_csv(path)
self.startdict = self.startdict.fillna('')
def save_file(self, path):
self.startdict.to_csv(path, index=False)
def load_same_words_from_db(self, path, key):
wn = wordnetdb.WordNetDb(path)
cur = wn.get_same_words_by_lemma(key)
for c in cur:
self.add_link(c[3], 'in', c[1])
def select(self, key):
d = self.startdict
res = d[d['synset1'] == key]
return res
def select_link(self, key, link='isa'):
d = self.startdict
res = d[(d['synset1'] == key) & (d['link'] == link)]
l = []
for r in res.values:
l.append(r[2])
return l
def select_link_ref(self, key, link='isa'):
d = self.startdict
res = d[(d['synset2'] == key) & (d['link'] == link)]
l = []
for r in res.values:
l.append(r[0])
return l
def select_isa(self, key):
"""
isaリンクとinリンクを持つ全synsetを取得する
Returns
-------
[synset, ...]
"""
d = self.startdict
res = d[(d['synset1'] == key) & ((d['link'] == 'in') | (d['link'] == 'isa'))]
l = []
for r in res.values:
l.append(r[2])
return l
def select_same(self, key):
"""
keyと同じinを持つ全synsetを取得する
Returns
-------
[(synset, [word, ...]), ...]
"""
isa = self.select_link(key, 'in')
res = []
for i in isa:
ref = self.select_link_ref(i, 'in')
res.append((i, ref))
return res
def add_link(self, synset1, link, synset2):
"""
リンクを追加する
同じデータがあれば追加しない
"""
d = self.startdict
res = d[(d['synset1'] == synset1) & (d['link'] == link) & (d['synset2'] == synset2) ]
if len(res) == 0:
tmp_se = pd.Series([synset1, link, synset2], index=['synset1', 'link', 'synset2'])
self.startdict = self.startdict.append(tmp_se, ignore_index=True)
| 27.582609
| 94
| 0.571248
|
1d2bd351e185a5bd4eb5d1af9d4607073ca56545
| 429
|
py
|
Python
|
scripts_ufes/paper.py
|
lmlima/DropoutRL
|
00db2e901c320cf12c60c5039561999d45591bd1
|
[
"BSD-3-Clause"
] | null | null | null |
scripts_ufes/paper.py
|
lmlima/DropoutRL
|
00db2e901c320cf12c60c5039561999d45591bd1
|
[
"BSD-3-Clause"
] | null | null | null |
scripts_ufes/paper.py
|
lmlima/DropoutRL
|
00db2e901c320cf12c60c5039561999d45591bd1
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import MinMaxScaler
from scripts_ufes.data.evasao.gen_discrete_student import plot3d_state_discrete
def fig3():
df = pd.read_pickle("tmp/data/xmeans/2/pca_data_discretization.dataframe.pkl")
view = {'elev': 62, 'azim': -32}
plot3d_state_discrete(df, output='/tmp', view=view)
| 30.642857
| 82
| 0.780886
|
4bccacf5aa62294eed7144f7a62e1fd8e056c579
| 1,238
|
py
|
Python
|
src/test/resource/simpleWebInPython2.py
|
SmallTianTian/sse
|
cfbbb2d7d7795519345ce19ee4b0599c212cbcf9
|
[
"Apache-2.0"
] | null | null | null |
src/test/resource/simpleWebInPython2.py
|
SmallTianTian/sse
|
cfbbb2d7d7795519345ce19ee4b0599c212cbcf9
|
[
"Apache-2.0"
] | 1
|
2018-02-20T10:17:20.000Z
|
2018-02-20T10:17:20.000Z
|
src/test/resource/simpleWebInPython2.py
|
SmallTianTian/sse
|
cfbbb2d7d7795519345ce19ee4b0599c212cbcf9
|
[
"Apache-2.0"
] | 1
|
2019-06-25T08:41:57.000Z
|
2019-06-25T08:41:57.000Z
|
#coding=utf-8
from wsgiref.simple_server import make_server
import time
import sys
def sse_content():
file_name = sys.path[0] + '/SSENormText.txt'
with open(file_name, 'r') as f:
return f.readlines()
sse_content = sse_content()
def application(environ, start_response):
if environ['PATH_INFO'] == '/sse_without_cookie':
return sse_without_cookie(environ, start_response)
start_response('200 OK',[('Content-Type','text/html')])
return '<h1>Hello, web! </h1>'
def sse_without_cookie(environ, start_response):
charset = environ['HTTP_ACCEPT_CHARSET'] if environ.has_key('HTTP_ACCEPT_CHARSET') else 'UTF-8'
start_response('200 OK',[('Content-Type','text/event-stream'), ('charset', charset)])
last_event_id = 'id: ' + environ['HTTP_LAST_EVENT_ID'] + '\n' if environ.has_key('HTTP_LAST_EVENT_ID') else None
start_in_content = sse_content.index(last_event_id) if last_event_id else 0
content = sse_content[start_in_content:]
for item in content:
time.sleep(1)
yield item.decode('utf-8').encode(charset)
if __name__ == '__main__':
httpd = make_server('', 8888, application)
print("Serving HTTP on port 8888...")
# 开始监听HTTP请求:
httpd.serve_forever()
| 36.411765
| 116
| 0.699515
|
aeb64ebb8f1e363a30bcd327dd4302e75c8c9d3c
| 8,032
|
py
|
Python
|
tests/unit/configure/test_TableConfigCompleter.py
|
shane-breeze/AlphaTwirl
|
59dbd5348af31d02e133d43fd5bfaad6b99a155e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/configure/test_TableConfigCompleter.py
|
shane-breeze/AlphaTwirl
|
59dbd5348af31d02e133d43fd5bfaad6b99a155e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/configure/test_TableConfigCompleter.py
|
shane-breeze/AlphaTwirl
|
59dbd5348af31d02e133d43fd5bfaad6b99a155e
|
[
"BSD-3-Clause"
] | null | null | null |
# Tai Sakuma <tai.sakuma@gmail.com>
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.configure import TableConfigCompleter
##__________________________________________________________________||
class MockDefaultSummary:
pass
class MockSummary2:
pass
class MockWeight:
pass
class MockBinning:
pass
##__________________________________________________________________||
defaultWeight = MockWeight()
@pytest.fixture()
def obj():
return TableConfigCompleter(
defaultSummaryClass=MockDefaultSummary,
defaultWeight=defaultWeight,
defaultOutDir='tmp'
)
##__________________________________________________________________||
def test_repr(obj):
repr(obj)
##__________________________________________________________________||
binning1 = MockBinning()
binning2 = MockBinning()
@pytest.mark.parametrize('arg, expected', [
pytest.param(
dict(),
dict(
keyAttrNames=(),
keyIndices=None,
binnings=None,
keyOutColumnNames=(),
valAttrNames=None,
valIndices=None,
summaryClass=MockDefaultSummary,
valOutColumnNames=('n', 'nvar'),
weight=defaultWeight,
sort=True,
nevents=None,
outFile=True,
outFileName='tbl_n.txt',
outFilePath='tmp/tbl_n.txt',
),
id='empty'
),
pytest.param(
dict(
keyAttrNames=('met_pt', ),
binnings=(binning1, ),
),
dict(
keyAttrNames=('met_pt',),
keyIndices=None,
binnings=(binning1, ),
keyOutColumnNames=('met_pt',),
valAttrNames=None,
valIndices=None,
summaryClass=MockDefaultSummary,
valOutColumnNames=('n', 'nvar'),
weight=defaultWeight,
sort=True,
nevents=None,
outFile=True,
outFileName='tbl_n.met_pt.txt',
outFilePath='tmp/tbl_n.met_pt.txt',
),
id='simple'
),
pytest.param(
dict(
keyAttrNames=( ),
binnings=( )
),
dict(
keyAttrNames=(),
keyIndices=None,
binnings=(),
keyOutColumnNames=(),
valAttrNames=None,
valIndices=None,
summaryClass=MockDefaultSummary,
valOutColumnNames=('n', 'nvar'),
weight=defaultWeight,
sort=True,
nevents=None,
outFile=True,
outFileName='tbl_n.txt',
outFilePath='tmp/tbl_n.txt',
),
id='empty-key'
),
pytest.param(
dict(
keyAttrNames=( ),
binnings=( ),
summaryClass=MockSummary2,
),
dict(
keyAttrNames=(),
keyIndices=None,
binnings=(),
keyOutColumnNames=(),
valAttrNames=None,
valIndices=None,
summaryClass=MockSummary2,
valOutColumnNames=(),
weight=defaultWeight,
sort=True,
nevents=None,
outFile=True,
outFileName='tbl_MockSummary2.txt',
outFilePath='tmp/tbl_MockSummary2.txt',
),
id='summary-class-empty-key-empty-val'
),
pytest.param(
dict(
keyAttrNames=('key1', 'key2'),
binnings=(binning1, binning2),
summaryClass=MockSummary2,
),
dict(
keyAttrNames=('key1', 'key2'),
keyIndices=None,
binnings=(binning1, binning2),
keyOutColumnNames=('key1', 'key2'),
valAttrNames=None,
valIndices=None,
summaryClass=MockSummary2,
valOutColumnNames=(),
weight=defaultWeight,
sort=True,
nevents=None,
outFile=True,
outFileName='tbl_MockSummary2.key1.key2.txt',
outFilePath='tmp/tbl_MockSummary2.key1.key2.txt',
),
id='summary-class-2-keys-empty-vals'
),
pytest.param(
dict(
keyAttrNames=('key1', 'key2'),
binnings=(binning1, binning2),
valAttrNames=('val1', 'val2'),
summaryClass=MockSummary2,
),
dict(
keyAttrNames=('key1', 'key2'),
keyIndices=None,
binnings=(binning1, binning2),
keyOutColumnNames=('key1', 'key2'),
valAttrNames=('val1', 'val2'),
valIndices=None,
summaryClass=MockSummary2,
valOutColumnNames=('val1', 'val2'),
weight=defaultWeight,
sort=True,
nevents=None,
outFile=True,
outFileName='tbl_MockSummary2.key1.key2.val1.val2.txt',
outFilePath='tmp/tbl_MockSummary2.key1.key2.val1.val2.txt',
),
id='summary-class-2-keys-2-vals'
),
pytest.param(
dict(
keyAttrNames=('key1', 'key2'),
binnings=(binning1, binning2),
keyIndices=(None, 1),
valAttrNames=('val1', 'val2'),
summaryClass=MockSummary2,
),
dict(
keyAttrNames=('key1', 'key2'),
keyIndices=(None, 1),
binnings=(binning1, binning2),
keyOutColumnNames=('key1', 'key2'),
valAttrNames=('val1', 'val2'),
valIndices=None,
summaryClass=MockSummary2,
valOutColumnNames=('val1', 'val2'),
weight=defaultWeight,
sort=True,
nevents=None,
outFile=True,
outFileName='tbl_MockSummary2.key1.key2-1.val1.val2.txt',
outFilePath='tmp/tbl_MockSummary2.key1.key2-1.val1.val2.txt',
),
id='summary-class-2-keys-2-vals-key-indices'
),
pytest.param(
dict(
keyAttrNames=('key1', 'key2'),
binnings=(binning1, binning2),
valAttrNames=('val1', 'val2'),
summaryClass=MockSummary2,
valIndices=(2, None),
),
dict(
keyAttrNames=('key1', 'key2'),
keyIndices=None,
binnings=(binning1, binning2),
keyOutColumnNames=('key1', 'key2'),
valAttrNames=('val1', 'val2'),
valIndices=(2, None),
summaryClass=MockSummary2,
valOutColumnNames=('val1', 'val2'),
weight=defaultWeight,
sort=True,
nevents=None,
outFile=True,
outFileName='tbl_MockSummary2.key1.key2.val1-2.val2.txt',
outFilePath='tmp/tbl_MockSummary2.key1.key2.val1-2.val2.txt',
),
id='summary-class-2-keys-2-vals-val-indices'
),
pytest.param(
dict(
keyAttrNames=('key1', 'key2'),
binnings=(binning1, binning2),
keyIndices=(None, 1),
valAttrNames=('val1', 'val2'),
summaryClass=MockSummary2,
valIndices=(2, 3),
),
dict(
keyAttrNames=('key1', 'key2'),
keyIndices=(None, 1),
binnings=(binning1, binning2),
keyOutColumnNames=('key1', 'key2'),
valAttrNames=('val1', 'val2'),
valIndices=(2, 3),
summaryClass=MockSummary2,
valOutColumnNames=('val1', 'val2'),
weight=defaultWeight,
sort=True,
nevents=None,
outFile=True,
outFileName='tbl_MockSummary2.key1.key2-1.val1-2.val2-3.txt',
outFilePath='tmp/tbl_MockSummary2.key1.key2-1.val1-2.val2-3.txt',
),
id='summary-class-2-keys-2-vals-key-indices-val-indices'
),
])
def test_complete(obj, arg, expected):
actual = obj.complete(arg)
assert expected == actual
assert arg is not actual
##__________________________________________________________________||
| 29.748148
| 77
| 0.538347
|
cd864ed1e821fec7a935af7feab16b488f0dec3d
| 738
|
py
|
Python
|
scripts/despacify.py
|
kennethreitz/em
|
9565bdde0b1acfb53d849cc526aea7ec1acf6217
|
[
"0BSD"
] | 198
|
2016-03-10T06:43:47.000Z
|
2017-07-13T19:35:14.000Z
|
scripts/despacify.py
|
not-kennethreitz/em-keyboard
|
9565bdde0b1acfb53d849cc526aea7ec1acf6217
|
[
"0BSD"
] | 22
|
2016-03-10T23:29:36.000Z
|
2017-07-03T00:55:06.000Z
|
scripts/despacify.py
|
not-kennethreitz/em-keyboard
|
9565bdde0b1acfb53d849cc526aea7ec1acf6217
|
[
"0BSD"
] | 24
|
2016-05-16T19:42:41.000Z
|
2017-07-27T15:15:03.000Z
|
"""
Replace spaces in emoji keywords with underscores
"""
from __future__ import annotations
import json
from em import parse_emojis
INPUT_EMOJILIB_PATH = "em/emoji-en-US.json"
OUTPUT_EMOJI_PATH = "em/emojis.json"
def save_emojis(data: dict[str, list[str]], filename: str) -> None:
with open(filename, "w") as outfile:
json.dump(data, outfile, indent=None, separators=(",", ":"))
def main() -> None:
data = parse_emojis(INPUT_EMOJILIB_PATH)
for emoji, keywords in data.items():
keywords = [keyword.replace(" ", "_") for keyword in keywords]
data[emoji] = keywords
save_emojis(data, OUTPUT_EMOJI_PATH)
print(f"Emojis saved to {OUTPUT_EMOJI_PATH}")
if __name__ == "__main__":
main()
| 24.6
| 70
| 0.685637
|
366891a03d3e99a5ba0a4d3bbacec240e73a1fae
| 116
|
py
|
Python
|
simeng/test_objects/moving_point.py
|
OWigginsHay/simulation_engine
|
bb2ab5578d05b0235fd90e4b858f5e83b20257cc
|
[
"MIT"
] | null | null | null |
simeng/test_objects/moving_point.py
|
OWigginsHay/simulation_engine
|
bb2ab5578d05b0235fd90e4b858f5e83b20257cc
|
[
"MIT"
] | 2
|
2021-12-01T16:58:37.000Z
|
2021-12-03T23:11:56.000Z
|
simeng/test_objects/moving_point.py
|
OWigginsHay/simulation_engine
|
bb2ab5578d05b0235fd90e4b858f5e83b20257cc
|
[
"MIT"
] | null | null | null |
from ..object import Object
class MovingPoint(Object):
def behaviour(self):
return super().behaviour()
| 19.333333
| 34
| 0.689655
|
a6df7e3bbb0fb4efe2e64f3c1fc7754bbf23b97a
| 4,105
|
py
|
Python
|
Plots/Bar/NCL_bar_7.py
|
NCAR/GeoCAT-examples
|
fba1b045ba5145fa48cf2f3c1e3b3c7c863b0b5b
|
[
"Apache-2.0"
] | 42
|
2020-03-03T16:19:30.000Z
|
2022-03-18T09:03:26.000Z
|
Plots/Bar/NCL_bar_7.py
|
netgodz/GeoCAT-examples
|
5ed9a1d68b69a921d0f1fee1160e109853926ed9
|
[
"Apache-2.0"
] | 351
|
2019-12-20T22:10:47.000Z
|
2022-03-16T20:46:09.000Z
|
Plots/Bar/NCL_bar_7.py
|
netgodz/GeoCAT-examples
|
5ed9a1d68b69a921d0f1fee1160e109853926ed9
|
[
"Apache-2.0"
] | 32
|
2020-01-06T21:18:48.000Z
|
2022-03-31T13:45:01.000Z
|
"""
NCL_bar_7.py
===============
This script illustrates the following concepts:
- Drawing filled bars
- Filling the bars in a bar plot with different colors
- Setting the minimum/maximum value of the Y axis in a bar plot
- Adding text to a plot
- Rotating text 45 degrees
- Drawing a custom legend
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/bar_7.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/bar_7_1_lg.png and https://www.ncl.ucar.edu/Applications/Images/bar_7_2_lg.png
"""
###############################################################################
# Import packages:
import numpy as np
import matplotlib.pyplot as plt
from geocat.viz import util as gvutil
###############################################################################
# Generate data:
x = [1, 2, 3, 4, 5, 6, 7, 8]
data = [154900, 56600, 40000, 30200, 29700, 24400, 21700, 13900]
labels = [
'Lung', 'Colon/rectum', 'Breast', 'Prostate', 'Pancreas',
'Non-Hodgkin\'s Lymphoma', 'Leukemias', 'Ovary'
]
###############################################################################
# Create the custom color list.
color_list = [
'firebrick', 'red', 'orange', 'green', 'navy', 'blue', 'skyblue',
'slateblue'
]
###############################################################################
# Specify some plot settings to use in both plots:
# Title settings
title = 'Estimated Cancer Deaths for 2002'
title_fontsize = 16
# Axis Settings
plot_y_max = 180_000
###############################################################################
# Plot 1 (Bar chart):
# Generate figure (set its size (width, height) in inches) and axes
plt.figure(1, figsize=(6, 5))
ax = plt.gca()
# Bar-plot the data
plt.bar(x, data, color=color_list, edgecolor='black')
plt.title(title, fontsize=title_fontsize, y=1.04)
# Add a rotated label to each bar.
for k, label in enumerate(labels):
plt.text(x[k], data[k] + 2000, label, rotation=45)
# Use geocat.viz.util convenience function to set axes limits & tick values without calling several matplotlib functions
gvutil.set_axes_limits_and_ticks(ax,
ylim=(0, plot_y_max),
xticks=[],
yticks=np.linspace(0, plot_y_max, 7))
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax,
x_minor_per_major=0,
y_minor_per_major=3,
labelsize=12)
# Draw plot on the screen.
plt.show()
###############################################################################
# Plot 2 (Bar chart with a legend):
# Generate figure (set its size (width, height) in inches) and axes
plt.figure(2, figsize=(6, 5))
ax = plt.gca()
# Bar-plot the data
bar_handle = plt.bar(x, data, color=color_list, edgecolor='black')
# Reverse the legend ordering to match NCL
bars_reversed = bar_handle[::-1]
labels_reversed = labels[::-1]
# Add the legend
plt.legend(bars_reversed, labels_reversed)
# Use geocat.viz.util convenience function to set axes limits & tick values without calling several matplotlib functions
gvutil.set_axes_limits_and_ticks(ax,
ylim=(0, plot_y_max),
xticks=[],
yticks=np.linspace(0, plot_y_max, 7))
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax,
x_minor_per_major=0,
y_minor_per_major=3,
labelsize=12)
# Use geocat.viz.util convenience function to set titles and labels without calling several matplotlib functions
gvutil.set_titles_and_labels(ax,
maintitle=title,
maintitlefontsize=title_fontsize,
ylabel="Number of Deaths")
# Show the plot
plt.tight_layout()
plt.show()
| 34.495798
| 148
| 0.568331
|
f461b5ea3f7d8fdc3436b93aac92e5a01a276662
| 398
|
py
|
Python
|
apps/discussion/urls.py
|
umairqadir97/learning-management-system
|
9924326e77146830f3fb05a9d86f876f86c4d9b8
|
[
"MIT"
] | 7
|
2020-06-03T15:31:44.000Z
|
2021-11-21T21:19:59.000Z
|
apps/discussion/urls.py
|
umairqadir97/learning-management-system
|
9924326e77146830f3fb05a9d86f876f86c4d9b8
|
[
"MIT"
] | null | null | null |
apps/discussion/urls.py
|
umairqadir97/learning-management-system
|
9924326e77146830f3fb05a9d86f876f86c4d9b8
|
[
"MIT"
] | 10
|
2020-11-06T00:40:01.000Z
|
2022-01-12T03:20:14.000Z
|
from django.conf.urls import url, include
from . import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('topics', views.TopicViewSet)
router.register('entries', views.EntryViewSet)
# router.register('replies', views.ReplyViewSet)
urlpatterns = [
# url(r'^', views.TopicListView.as_view(), name='list_topic'),
url(r'^', include(router.urls)),
]
| 24.875
| 66
| 0.738693
|
6cc23801008d1402a598c1c7a3af190e1fb1e8db
| 447
|
py
|
Python
|
hw_asr/model/__init__.py
|
s-isaev/HW.01.ASR
|
12963865ed29cbb29e33658f25f7da20f67d8421
|
[
"MIT"
] | null | null | null |
hw_asr/model/__init__.py
|
s-isaev/HW.01.ASR
|
12963865ed29cbb29e33658f25f7da20f67d8421
|
[
"MIT"
] | null | null | null |
hw_asr/model/__init__.py
|
s-isaev/HW.01.ASR
|
12963865ed29cbb29e33658f25f7da20f67d8421
|
[
"MIT"
] | null | null | null |
from hw_asr.model.baseline_model import BaselineModel
from hw_asr.model.overfit_model import OverfitModel
from hw_asr.model.model_01 import Model01
from hw_asr.model.model_02 import Model02
from hw_asr.model.pet_conv_batchnorm_model import PetConvBatchnormModel
from hw_asr.model.conv_batchnorm_model import ConvBatchnormModel
__all__ = [
"BaselineModel", "OverfitModel", "Model01", "Model02", "PetConvBatchnormModel", "ConvBatchnormModel"
]
| 40.636364
| 104
| 0.841163
|
28b482cfa404b067c39cddf1feca2ecec93b75da
| 470
|
py
|
Python
|
rapport/sentence_tokenizer.py
|
oceandelee/tac
|
62ffbcb31b374a9fa83a1ee6010b2e00f2de8a7c
|
[
"MIT"
] | 2
|
2021-10-17T00:49:16.000Z
|
2021-10-17T00:49:23.000Z
|
rapport/sentence_tokenizer.py
|
oceandelee/tac
|
62ffbcb31b374a9fa83a1ee6010b2e00f2de8a7c
|
[
"MIT"
] | null | null | null |
rapport/sentence_tokenizer.py
|
oceandelee/tac
|
62ffbcb31b374a9fa83a1ee6010b2e00f2de8a7c
|
[
"MIT"
] | null | null | null |
"""Tokenize sentences"""
import os
import sys
import nltk
from nltk.tokenize import sent_tokenize
nltk.data.path.append("/home/max/nltk_data")
infile = f"data/all.txt"
outfile = f"data/sents.txt"
with open(outfile, 'w', encoding="utf-8") as output:
with open(infile, encoding="utf-8", errors="backslashreplace") as f:
for line in f:
sentences = sent_tokenize(line)
for sent in sentences:
output.write(sent + "\n")
| 24.736842
| 72
| 0.653191
|
b831e28f53cf6c4f24e6f0208acf486fa4d39390
| 1,702
|
py
|
Python
|
config/settings/test.py
|
scottbarnes/foxtail
|
b9faf95ad622d7aa967288698018202cc22b4893
|
[
"MIT"
] | 5
|
2021-01-17T22:55:24.000Z
|
2021-01-18T21:05:18.000Z
|
config/settings/test.py
|
scottbarnes/foxtail
|
b9faf95ad622d7aa967288698018202cc22b4893
|
[
"MIT"
] | null | null | null |
config/settings/test.py
|
scottbarnes/foxtail
|
b9faf95ad622d7aa967288698018202cc22b4893
|
[
"MIT"
] | null | null | null |
"""
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="DYA3L39SFqVYVdYCpbnEEvv8DnYjNZuDA38OxTn2GVyqQWJtaz2K2AzB0XUJWTqG",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# Your stuff...
# ------------------------------------------------------------------------------
| 32.730769
| 80
| 0.49765
|
84929dabe0abe8606d0d1cbc75f4ebb2cc37d708
| 1,363
|
py
|
Python
|
tests/components/zwave_mqtt/test_switch.py
|
silvertoken/core
|
9b8688b0fc624e7bfcb6cac81bcdadd9d2b4be79
|
[
"Apache-2.0"
] | 2
|
2020-03-06T14:18:59.000Z
|
2020-03-06T14:24:43.000Z
|
tests/components/zwave_mqtt/test_switch.py
|
silvertoken/core
|
9b8688b0fc624e7bfcb6cac81bcdadd9d2b4be79
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:55:25.000Z
|
2022-03-12T00:51:18.000Z
|
tests/components/zwave_mqtt/test_switch.py
|
silvertoken/core
|
9b8688b0fc624e7bfcb6cac81bcdadd9d2b4be79
|
[
"Apache-2.0"
] | 1
|
2020-05-18T09:13:52.000Z
|
2020-05-18T09:13:52.000Z
|
"""Test Z-Wave Switches."""
from .common import setup_zwave
async def test_switch(hass, generic_data, sent_messages, switch_msg):
"""Test setting up config entry."""
receive_message = await setup_zwave(hass, fixture=generic_data)
# Test loaded
state = hass.states.get("switch.smart_plug_switch")
assert state is not None
assert state.state == "off"
# Test turning on
await hass.services.async_call(
"switch", "turn_on", {"entity_id": "switch.smart_plug_switch"}, blocking=True
)
assert len(sent_messages) == 1
msg = sent_messages[0]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": True, "ValueIDKey": 541671440}
# Feedback on state
switch_msg.decode()
switch_msg.payload["Value"] = True
switch_msg.encode()
receive_message(switch_msg)
await hass.async_block_till_done()
state = hass.states.get("switch.smart_plug_switch")
assert state is not None
assert state.state == "on"
# Test turning off
await hass.services.async_call(
"switch", "turn_off", {"entity_id": "switch.smart_plug_switch"}, blocking=True
)
assert len(sent_messages) == 2
msg = sent_messages[1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 541671440}
| 32.452381
| 86
| 0.674982
|
336b8439066526aaabf09a1774d133bb22c7a5be
| 27,946
|
py
|
Python
|
alex/applications/PublicTransportInfoCS/crws_enums.py
|
oplatek/alex
|
73af644ec35c8a1cd0c37cd478c2afc1db717e0b
|
[
"Apache-2.0"
] | 184
|
2015-02-11T04:14:41.000Z
|
2022-03-24T21:43:58.000Z
|
alex/applications/PublicTransportInfoCS/crws_enums.py
|
oplatek/alex
|
73af644ec35c8a1cd0c37cd478c2afc1db717e0b
|
[
"Apache-2.0"
] | 69
|
2015-01-11T04:57:22.000Z
|
2019-04-24T10:25:56.000Z
|
alex/applications/PublicTransportInfoCS/crws_enums.py
|
oplatek/alex
|
73af644ec35c8a1cd0c37cd478c2afc1db717e0b
|
[
"Apache-2.0"
] | 61
|
2015-03-04T10:52:13.000Z
|
2022-03-04T12:14:06.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various enums, semi-automatically adapted from the CHAPS CRWS enum list written in C#.
Comments come originally from the CRWS description and are in Czech.
"""
def enum(**enums):
return type('Enum', (), enums)
# priznaky pozadavku na podrobnosti vracenych dat
TTDETAILS = enum(
# vracet pevne kody
FIXED_CODES=1L << 0,
# vracet u pevnych kodu take pole sDescExt
FIXED_CODES_DESC_EXT=1L << 1 | 1L << 0,
# u datumovych poznamek davat omezeni od zacatku platnosti jizdniho radu, tj. nikoli od dneska
REM_DATE_ALL=1L << 2,
# pri vypoctu datumove poznamky sloucit varianty vlaku na useku (viz take REM_DISJOINT_VARIANTS)
REM_MERGE_VARIANTS=1L << 3,
# vracet k jedn. poznamkam i legendu znacek pouzitych v poznamkach (pro tisk)
REM_LEGEND=1L << 4,
# vracet hlavickove info o vlacich (TrainInfo - cislo, nazev, typ, ..)
TRAIN_INFO=1L << 5,
# vracet URL informacni soubory stanic (viz StationInfo.sInfoFile)
INFO_FILES=1L << 6,
# vracet informace pro rezervace (rezervacni klice, priznak moznosti predrezervace)
RESERV=1L << 7,
# vracet souradnice
COOR=1L << 8,
# vracet plne nazvy stanic vcetne statu a okresu (REG.ALWAYS)
ST_NAMES_REG_ALWAYS=1L << 9,
# misto informacniho souboru stanice davat text s linkami na zastavce (jen u JR autobusu a MHD)
ST_LINES=1L << 10,
# vracet vsechny stanice na trase spojeni (nejen prestupni) (zahrnuje i ROUTE_CHANGE)
ROUTE_USED=1L << 11,
# vracet stanice nastupu a vystupu (prestupni stanice)
ROUTE_CHANGE=1L << 22,
# vracet pocatecni a koncovou stanici kazdeho pouziteho spoje
ROUTE_FROMTO=1L << 12,
# vracet celou trasu pouzitych spoju (zahrnuje i ROUTE_USED a ROUTE_FROMTO)
ROUTE_FULL=1L << 13,
# vracet celou trasu pouzitych spoju jen v pripade, ze jizdni rad je vlakovy, jinak se ridi dle vlajek ROUTE_USED, ROUTE_CHANGE, ROUTE_FROMTO a ROUTE_FULL
ROUTE_FULL_TRAIN=1L << 14,
# vracet na draze take zalamane vedeni (TrainRouteInfo.adCoorX a TrainRouteInfo.adCoorY)
ROUTE_COOR=1L << 15,
# vracet poznamky k celemu spojeni; funguje bez ohledu na REMMASK
# (nema vliv na poznamky k jednotlivym vlakum spojeni - ty se ridi parametrem iRemMask, viz enum REMMASK)
# v poznamkach vsak mohou byt pouzity znacky TT; pro tisk pak je uzitecna legenda (RemarksList.aoLegend)
REMARKS=1L << 16,
# vracet i poznamku "jede denne"? (je ucinne pouze, pokud je soucasne zapnute REMARKS)
REMARKS_DAILY=1L << 17,
# vracet i seznam dopravcu? (je ucinne pouze, pokud je soucasne zapnute REMARKS, vyjimkou je pole DepartureTrain.sOwner)
REMARKS_OWNERS=1L << 18,
# nevracet v ramci poznamek ke spojeni datumove omezeni nikdy (souvisi se soucasne zapnutym REMARKS)
REMARKS_SKIPDATE=1L << 19,
# vracet v ramci poznamek ke spojeni take data o vylukach a mimoradnostech,
# viz take REMMASK.EXCLUSION_DATA a REMMASK.EXCEPTION_DATA (souvisi se soucasne zapnutym REMARKS)
REMARKS_EXCLUSION_DATA=1L << 24,
# vratit v poznamkach ke spojeni take umelou poznamku typu REMMASK.DATE_FLAGS s priznaky jizdy (souvisi se soucasne zapnutym REMARKS)
DATE_FLAGS=1L << 20,
# vracet priznak moznosti polohy vlaku (viz take DELAY_CD_ONLY, DELAY_ARRDEP)
DELAY=1L << 21,
# pro odjezdove/prijezdove tabule: rezim CD: natvrdo davat smerovani dle pozadavku CD (max. 3 stanice) a vytvorit sestavu na 24 hodin
DEP_TABLE_CD=1 << 23,
# vracet sumarni legendu znacek na urovni celkoveho seznamu spojeni (pro tisk, obsahuje znacky pouzite v poznamkach i u stanic/spoju)
LEGEND_ALL=1L << 25,
# ma smysl jen ve spojeni s LEGEND_ALL, indikuje omezeni zahrnuti pevnych kodu jen na nastupni a vystupni stanici
LEGEND_FROMTO_ONLY=1L << 26,
# ma smysl jen ve spojeni s LEGEND_ALL, prikazuje pridat do legendy i znacky, ktere nejsou v TT fontu, resp. nejsou substituovany do ikony
# kody na draze, ktere obsahuji cislo (typicky nastupiste a tar. pasma) se ale nezahrnuji
LEGEND_ADD_NON_TT=1L << 27,
# ma smysl jen ve spojeni s LEGEND_ALL, prikazuje nepridavat do legendy pevne kody hlavicky spoje
LEGEND_SKIP_TRAIN=1L << 29,
# v parametru iStation je ve skutecnosti na vstupu klic stanice (uplatni se ve funkci GetTrainDataInfo)
KEY_IN_STATION=1L << 28,
# nezahrnovat ve vystupu prime vozy (vyhledani spoje dle masky, odjezdy/prijezdy, spojeni)
NO_CARS=1L << 30,
# nezahrnovat ve vystupu spoje soukromych dopravcu, kteri nemaji smlouvu s CD (vyhledani spoje dle masky, odjezdy/prijezdy)
CD_ONLY=1L << 31,
# nevracet jednotlive spoje, ale od kazde linky jen jednoho reprezentanta
SEARCH_LINES=1L << 32,
# vracet v TrainInfo.sInfo informaci o lince/vedeni spoje
LINE_IN_INFO=1L << 33,
# vracet spoje po kazde variante smerovani dle ZJR (jen JR typu MHD)
ZJR_DIRS=1L << 34,
# vracet ve StationInfo take aiTrTypeID (viz take obdobny priznak v TTINFODETAILS)
TRTYPEID_STATION=1L << 35,
# nedavat duplicitni spoje (kontroluje se jen sNum1)
TRAININFO_SKIPDUP=1L << 36,
# cisla stanovist (ArrDepTrainInfo.sStand a DepartureTrain.sStand/sTrack)
STANDS=1L << 37,
# linky IDS pro vlaky (ArrDepTrainInfo.sIDSLine a DepartureTrain.sIDSLine)
IDS_LINES=1L << 38,
# ceny (jizdne)
PRICES=1L << 39, # obe ceny i s detaily (zahrnuje i PRICE2)
PRICE1=1L << 40, # pouze jedna cena do ConnectionInfo.sPrice, jen jako string (bez detailu - useku), jen je-li uplna (za celou trasu)
PRICE2=1L << 41, # celkova cena formatovana jako pro www (bez detailu - useku)
# - je-li soucasne zapnuto PRICE1 i PRICE2, pak se vraci celkova cena formatovana jako pro www, ale jen pokud je znama za celou trasu
# vracet u spojeni take cilovou obec?
DEST_CITY=1L << 42,
# vracet u jednotlivych spojeni ID kombinace (ma smysl, jen pokud se muze u spojeni lisit)
CONN_COMBID=1L << 43,
# v pripade volani funkce SearchConnectionInfo2 pro spojeni s prestupy bez zadanych prestupnich mist zkusit nejprve otipovat prestupy
CONN2_SET_CHANGE=1L << 44,
# pri hledani spoje (linky) akceptovat jen presnou shodu s maskou, tj. neakceptovat podretezec
MATCH_EXACT=1L << 45,
# vracet v ramci ZJRInfo take pole aoAltDirections s alternativnimi smery ZJR
ZJR_ALT_DIR=1L << 46,
# vracet v ramci ZJRInfo davat smery i ze vsech zastavek uzlu
MERGE_ST=1L << 47,
# vracet spoje po kazde variante smerovani linky
LINE_DIRS=1L << 48,
# vracet priznak moznosti polohy vlaku a zpozdeni jen pro vlaky dopravce CD (plati jen soucasne s DELAY)
DELAY_CD_ONLY=1L << 49,
# pri vyhledani N odjezdu/prijezdu do budoucna zahrnout na zacatek i zpozdene spoje (plati jen soucasne s DELAY)
DELAY_ARRDEP=1L << 50,
# pri vypoctu datumove poznamky davat omezeni u variant vlaku disjunktne (potlacuje REM_MERGE_VARIANTS)
REM_DISJOINT_VARIANTS=1L << 51,
)
# priznaky pozadavku na podrobnosti k objektu typu TimetableObjectInfo
TTINFODETAILS = enum(
ITEM=1 << 0, # vratit pole oItem
STATIONS=1 << 1, # vratit pole aoStations
LINES_ITEM=1 << 2, # vratit sumar linek v sLines (jen pro JR typu MHD a Bus)
LINES_STATION=1 << 3, # vratit sumar linek ve StationInfo.sInfoFile (jen pro JR typu MHD a Bus)
TRTYPEID_ITEM=1 << 4, # vratit sumar ID druhu prostredku u polozky
TRTYPEID_STATION=1 << 5, # vratit sumar ID druhu prostredku u stanic
COOR=1 << 6, # vratit souradnice
STCOUNT=1 << 7, # vratit pocet fyzickych stanic objektu
STATE_ITEM=1 << 8, # vratit vysvetleni statu v sState
STATE_ITEM_ALWAYS=1 << 9, # vratit vysvetleni statu v sState, i kdyz neni primo v nazvu objektu uveden
REGION_ITEM=1 << 10, # vratit vysvetleni okresu v sRegion
REGION_ITEM_ALWAYS=1 << 11, # vratit vysvetleni okresu v sRegion, i kdyz neni primo v nazvu objektu uveden
REGION_DELETE=1 << 12, # vymazat data o regionu z nazvu objektu/adresy, idealne kombinovat s iRegMode:=REG.NONE
LINES_BRIEF=1 << 13, # zkraceny sumar linek (jen ve spojitosti s LINES_ITEM a LINES_STATION)
TYPE_ITEM=1 << 14, # vratit typ objektu v sType
LINES_ITEM_MHD=1 << 15, # vratit sumar linek v sLines jen pro objekty z JR typu MHD (jen ve spojitosti s LINES_ITEM)
REGION_NEEDED=1 << 16, # vratit v bRegion priznak, zdali je region pro rozliseni objektu nutny (jen ve spojitosti s REGION_ITEM_ALWAYS)
TR_CATEGORY=1 << 17, # vratit v sTrCategory druhy dopravy (aplikuje se jen na kombinacich, ktere zahrnuji vlaky i busy)
STATIONSEXT=1 << 18 | 1 << 1 # vratit pole aoStations jen pokud se vraci prvky seznamu stanic (LISTID.STATIONSEXT)
)
# kategorie dopravy
TRCAT = enum(
ALL=0, # nerozliseno
TRAIN=1, # vlaky
BUS=2, # linkove autobusy
CITY=3, # MHD
AIR=4, # letadla
SHIP=5 # lode
)
# podkategorie dopravy (jen vybrane, rozsiruje se prubezne dle potreby)
TRSUBCAT = enum(
ALL=0,
PRAHA=1,
BRNO=2,
OSTRAVA=3,
CESBUD=4
)
# jazyky
TTLANG = enum(
CZECH=0,
ENGLISH=1,
GERMAN=2,
SLOVAK=3,
POLISH=4,
COUNT=5,
)
# ID zakladnich virtualnich seznamu
LISTID = enum(
CITY=1, # mesta a obce
CITYPART=2, # mestske casti
STATIONSEXT=3, # vsechny stanice + nejaka rozsireni (realne byva dekorovan cislem kategorie a podkategorie, viz iListID)
ADDRESS=8, # symbolicky seznam adres: ve skutecnosti neni soucasti JR, ale je obsluhovan serverem adres
STATIONS=9 # symbolicky seznam fyzickych stanic: nelze jej pouzit ke globalnimu vyhledavani!
# - lze jej pouzit pouze na pozici GlobalListItemInfo.iListID,
# v tomto pripade musi klient nastavit do GlobalListItemInfo.iItem index stanice dle StationInfo.iStation
)
# kody vyjimek, ktere posila primo jadro (jizdni rad)
TTERR = enum(
# pri nacitani dat doslo k chybe
LOAD=1,
# pri nacitani dat doslo k vyjimce
LOAD_EX=2,
# pri paralelnim nacitani jizdnich radu se nektery nenacetl
LOAD_MULTI=3,
# nacitani dat prave probiha, nelze jej spustit soucasne znovu
LOAD_RUNNING=4,
# chybný index stanice
BAD_ST_INDEX=5,
# chybný index spoje
BAD_TR_INDEX=6,
# chybný index jizdniho radu
BAD_TT_INDEX=7,
# chybný index seznamu
BAD_VIRT_LIST_INDEX=8,
# chybny index objektu
BAD_VIRT_LIST_ITEM_INDEX=9,
# chybný index poznamky spoje
BAD_TR_REM_INDEX=10,
# substituce s ID nebyla nalezena
BAD_SUBST_ID=11,
# ocekava se 32-mista unikatni identifikace kombinace
COMB_GUID_EXPECTED=12,
# prazdne ID kombinace
COMB_ID_EXPECTED=13,
# chybný index globalniho seznamu
BAD_GLOBAL_LIST_INDEX=14,
# chybny index polozky globalniho seznamu
BAD_GLOBAL_LIST_ITEM_INDEX=15,
# pri pokusu o nacteni konfiguracniho souboru doslo k vyjimce
TT_CONFIG=16,
# kombinace pro zadane absolutni ID nebyla nalezena
COMB_GUID_NOT_FOUND=17,
# kombinace pro zadane ID nebyla nalezena
COMB_NOT_FOUND=18,
# chybna hlavicka datoveho souboru
BAD_DATA_FILE_HEADER=19,
# chyba CRC datoveho souboru
BAD_DATA_FILE_CRC=20,
# neplatny handle seznamu spojeni
CONN_HANDLE_BAD=21,
# seznam spojeni jiz byl uvolnen, provedte nove hledani
CONN_HANDLE_RELEASED=22,
# k seznamu spojeni s danym handle se prave pristupuje, zkuste to za chvili
CONN_HANDLE_LOCKED=23,
# jako cilovy seznam pro kopirovani spojeni nelze zadat bezny seznam (pouze kosik spojeni)
CONN_HANDLE_STANDARD=24,
# handle seznamu spojeni neodpovida pracovnimu procesu
CONN_HANDLE_WORKER_BAD=25,
# spojeni se zadanym ID nebylo nalezeno
BAD_CONN_ID=26,
# kombinaci nelze pro dany ucel pouzit
BAD_COMB_USAGE=27,
# chybne datum
BAD_DATE=28,
# chybny popis
BAD_AUX_DESC=29,
# chybny popis spoje nebo spoj nenalezen
BAD_AUX_DESC2=30,
# chybny parametr funkce
BAD_FUNC_PARAM=31,
)
# kody klientskych vyjimek CRWS
CLIENTEXCEPTION_CODE = enum(
# Neplatné ID uživatele (GUID)
INVALIDUSERID=1000,
# Neplatný přístupový kód (handle) seznamu spojení (evidentne spatny - napr. neni kladny nebo neodpovida absolutnimu ID kombinace)
INVALIDCONNHANDLE=1001,
# Neplatné ID kombinace (GUID)
INVALIDCOMBGUID=1002,
# Přístupový kód (handle) seznamu spojení již není platný, proveďte nové hledání spojení
CONNHANDLETIMEOUT=1003,
# Nepovolená kombinace "{0}". (nema na ni prava nebo zadal nesmysl)
INVALIDCOMBID=1005,
# Nemáte nastavena práva na žádnou kombinaci.
NOCOMBINATIONENABLED=1006,
# Právě probíhá restart serveru jízdních řádů. Zopakujte prosím požadavek později.
APPRESTART=1007,
# (zjištění polohy vlaku:) Informace o vlaku {0} nejsou k dispozici. {1}
NOINFO=1010,
# (zjištění polohy spoje:) Informace o spoji {0} nejsou k dispozici. {1}
NOBUSINFO=1011,
# (zjištění polohy vlaku:) Poloha vlaku není k dispozici. Zkuste to prosím později.
TRAINPOSITION=1012,
# (zjištění polohy spoje:) Poloha spoje není k dispozici. Zkuste to prosím později.
BUSPOSITION=1013,
# (zjištění polohy spoje:) Chybný řetězec pro zjištění polohy spoje ({0}).
DELAYQUERYSOURCE=1016,
# (zjištění polohy spoje:) Prázdný řetězec pro zjištění polohy spoje.
DELAYQUERYSOURCEEMPTY=1017,
# (info o mimořádnosti nebo výluce:) Chybný řetězec pro zjištění informace o mimořádnosti nebo výluce ({0}).
EXCLUSIONQUERYSOURCE=1030,
# (info o mimořádnosti nebo výluce:) Prázdný řetězec pro zjištění informace o mimořádnosti nebo výluce.
EXCLUSIONQUERYSOURCEEMPTY=1031,
# (info o mimořádnosti nebo výluce:) Informace o mimořádnosti nebo výluce {0} nejsou k dispozici. {1}
NOEXCLUSIONINFO=1032,
# (info o mimořádnosti nebo výluce:) Informace o mimořádnosti nebo výluce nejsou k dispozici. Zkuste to prosím později.
EXCLUSIONINFO=1033,
# obecna chyba, zkuste pozdeji
TRYLATER=1020,
)
# bitove priznaky ke stanicim
ST = enum(
CHANGE=0x1, # prestupni stanice
INTL=0x2, # zahranicni stanice
REG=0x4, # vkladat region do nazvu
STATE=0x8, # vkladat stat do nazvu
RESERV=0x10, # ve stanici pojizdi vlak s rezervacemi
EXTERNLINK=0x20, # obsahuje externi hrany
PREFCHANGE=0x40, # preferovany prestupni bod
CAPITAL=0x80, # je v hlavnim meste statu
REGION2=0x100, # je v krajskem meste
REGION3=0x200, # je v okresnim meste
LOWDECK=0x400, # bezbarierova
TERM=0x800, # konecna
SKIP_CITY=0x1000, # nezahrnovat do obce
PPS=0x2000, # vlakova PPS
GC=0x4000, # ma garantovane prestupy
LOWDECK_CHANGE=0x8000 # bezbarierova pro prestup
)
# rezimy vkladani regionu (stat,okres) do nazvu
REG = enum(
SMART=0, # vkladat regiony dle potreby
ALWAYS=1, # vkladat regiony i staty vzdy
NO=2, # nevkladat nikdy
ALWAYS_SMART=3, # vkladat regiony vzdy, jsou-li k dispozici
ALWAYS_REDUCED=4 # vkladat regiony vzdy, staty skryvat vzdy, je-li region k dispozici
)
# priznaky kombinace jizdnich radu
COMBFLAGS = enum(
# krajske mesto
REGION=1 << 0,
# IDS
IDS=1 << 1,
# pouzit jako vychozi nazev kombinace zkracenou verzi
BRIEF_NAME=1 << 2,
# nenabizet v panelu nabidek
HIDE=1 << 3,
# lze pozadovat zadani adresy
HAS_ADDRESS=1 << 4,
# nahrat kombinaci, i kdyz je prosla
LOAD_OLD=1 << 5,
# pripravit dopredu graf site pro pokryti
BUILD_GRAPH=1 << 6
)
# priznaky jednotliveho jizdniho radu
TIMETABLE_FLAGS = enum(
# k dispozici jsou data o souradnicich zastavek
HAS_MAP=1 << 0,
# k dispozici jsou data o vedeni spoju mezi zastavkami (z priznaku vyplyva i HAS_MAP)
HAS_ARCS=1 << 1,
# k dispozici je graf pokryti site (z priznaku vyplyva i HAS_ARCS)
HAS_COVER=1 << 2,
)
# zpusob hledani v globalnim seznamu objektu
SEARCHMODE = enum(
NONE=0, # prazdny priznak
EXACT=1 << 0, # vracet pri presne shode jen ji
CITY_AND_PART=1 << 1, # navic pri presne shode obce hledat jeste presnou shodu casti (plati jen ve spojeni s EXACT)
SCAN_ALL_LISTS=1 << 2, # prochazet vsechny seznamy do vycerpani max. poctu
NO_ADD_EQUAL=1 << 3, # nepridavat objekt z dalsiho seznamu, pokud se mapuje do stejnych zastavek
# jako jiz pridany se stejnym jmenem z drivejsiho seznamu
SCAN_ALL_LEVELS=1 << 4, # hledat shodu se zadanou maskou od zacatku,
# pokud nenaplnim pozadovany pocet, tak od druheho slova, atd.
SORT_LIST_ID=1 << 5, # tridit po seznamech (plati jen ve spojeni s SCAN_ALL_LEVELS)
# normalne je shoda na nultem slove ve vsech seznamech, pak na prvnim slove ve vsech seznamech, atd.
SKIP_CITY_ALIAS=1 << 6, # pokud je to mozne, nezarazovat aliasy obci a casti obci
ALLOW_ADDRESS=1 << 7, # je-li konfigurovan pristup k serveru adres, zkus na zaver resit jeste jako adresu
# aplikuje se jen v pripade, ze se hleda bez omezeni na konkretni seznam nebo s omezenim na seznam LISTID.ADDRESS
# a soucasne prislusna kombinace JR adresy pripousti
USE_PRIORITY=1 << 8, # pouzit prioritni nabidku (aplikuje se jen pri hledani, ktere neni omezeno na jeden seznam, tj. iListID==0)
FORCE_ADDRESS_REG=1 << 9, # pozadovat od serveru adres, aby okres daval za nazvem obce nebo casti vzdy a nikoli jen v pripade nejednoznacnosti nazvu
# - je to nutnost v pripade pouziti priznaku TTINFODETAILS.REGION_ITEM_ALWAYS
# - nasledne lze ovsem okres vymazat pomoci TTINFODETAILS.REGION_DELETE
# - server adres vraci v tomto pripade zavazne okresy a velkymi pismeny a nezavazne malymi
CHECK_UNIQUE_ONLY=1 << 10, # ma smysl jen ve spojeni s vyhledavanim do ObjectsInfo, je-li zapnuto, tak se
# - v pripade, za masce hovi vice moznosti, vrati se v ObjectsInfo.aoMasks hodnota null a v iStatus se vrati STATUS_NOT_UNIQUE
# - dale pak v pripade, ze maska je jednoznacna a iTTInfoDetails je nenulove, vyplni take ObjectsInfo.oTimetableObject
NO_ADD_EQUAL_NAME=1 << 11, # nepridavat objekt z dalsiho seznamu se stejnym nazvem
USE_COOR_VICINITY=1 << 12 # vracet body v blizkosti souradnice
# - souradnice pro hledani se zadava jako soucast masky za znakem §
# - interni priznak, jeho nastaveni na vstupu se ignoruje
)
# masky poznamek ke spojum
REMMASK = enum(
NONE=0, # prazdna maska
LINE_NAME=0x1, # nazev linky
OWNER=0x2, # dopravce
DATE=0x4, # datumove omezeni
INFO=0x8, # informacni poznamka
INFO_IMPORTANT=0x10, # dulezita informacni poznamka
LINE=0x20, # informacni poznamka k lince
RESERV=0x40, # poznamka o povinne rezervaci
RESERV2=0x80, # poznamka o volitelne rezervaci
DIRECTCARS=0x100, # poznamka o primem voze
OWNER_WWW=0x200, # je-li zadana (a soucasne i OWNER),
# tak se do nazvu dopravce nageneruje link na www dopravce
DATE_FLAGS=0x400, # vratit take umelou poznamku z datumovych vlajek
# je nezavisla na ostatnich maskach
OWNER_NUM=0x800, # je-li zadana (a soucasne i OWNER),
# tak se misto nazvu dopravce nageneruje jeho cislo
EXCLUSION=0x1000, # informace o vyluce
EXCLUSION_DATA=0x2000, # informace o vyluce jako data
# polozky oddelene znakem pipe:
# 0 - trat
# 1 - usek
# 2 - priznak, ze vyluka je PRED spojenim (0 nebo 1)
# 3 - index pocatecni stanice do drahy spoje nebo -1 u spojeni
# 4 - index koncove stanice do drahy spoje nebo -1 u spojeni
# 5 - ID vyluky, ID opatreni, datum - parametry pro ziskani podrobnosti o vyluce (oddelovac carka)
EXCEPTION_DATA=0x4000, # informace o mimoradnosti jako data
# polozky odde)lene znakem pipe:
# 0 - trat
# 1 - usek
# 2 - pricina
# 3 - datum a cas platnosti od
# 4 - datum a cas platnosti do
# 5 - datum a cas aktualizace zaznamu o mimoradnosti
# 6 - priznak, ze mimoradnost je PRED spojenim (0 nebo 1)
# 7 - index pocatecni stanice do drahy spoje nebo -1 u spojeni
# 8 - index koncove stanice do drahy spoje nebo -1 u spojeni
# 9 - ID mimoradnosti - parametr pro ziskani podrobnosti o mimoradnosti
DELAY_QUERY=0x8000, # dotaz na polohu vlaku pro nasledne volani funkce DelayQuery
AUX_DESC=0x10000, # popis vlaku pro nasledne volani funkce MapTrainDataInfoAuxDesc nebo MapConnectionAuxDesc
# (vlozi se referencni usek dle dotazu)
AUX_DESC_FULL=0x20000, # popis vlaku pro nasledne volani funkce MapTrainDataInfoAuxDesc nebo MapConnectionAuxDesc
# (vlozi se usek za celou drahu vlaku)
# veskere informacni poznamky
ALLINFO=0x8 | 0x10 | 0x20 | 0x40 | 0x80 | 0x100 | 0x1000,
# ...navic nazev linky, doprace a datumove omezeni
ALL=0x1 | 0x2 | 0x4 | 0x8 | 0x10 | 0x20 | 0x40 | 0x80 | 0x100 | 0x1000,
)
# priznaky typu spoje (viz. TrainInfo.iFlags)
# (mohou byt pouzity i dalsi bity, zde jsou vyvedeny vybrane staticke hodnoty)
VF = enum(
INTLONLY=0x80000000, # vnitrostatni preprava vyloucena
INTL=0x40000000, # mezinarodni spoj (ne ciste domaci)
VAR=0x10000000, # varianta vlaku
CARS=0x8000000, # primy vuz
HASBEDS=0x2000000, # veze take lehatka nebo luzka
HASONLYBEDS=0x1000000, # veze jen lehatka nebo luzka
RESERV=0x800000, # je predmetem rezervace
NOLINEDIR=0x400000, # nepouzivat pro generovani vedeni linky
LINEDIRBACK=0x200000, # jede smerem ZPET
LOWDECK=0x100000 # nizkopodlazni spoj (bezbarierovy pristup)
)
# priznaky na draze
ROUTE_FLAGS = enum(
EMPTY=0, # prazdna hodnota
NO_EXIT=1 << 0, # neni vystup
NO_ENTER=1 << 1, # neni nastup
PARA=1 << 2, # priznak paragraf
ZOLL=1 << 3, # hranicni bod (zde v zasade jen indikuje, ze neni nastup ani vystup)
CHANGE_INFO=1 << 4, # priznak, ze k zastaveni se vaze omezeni prestupu
REMARK=1 << 5, # priznak, za k zastaveni je poznamka
PPS=1 << 6 # vlakova pohranicni prechodova stanice (zastaveni je neverejne a tedy by se nemelo zobrazovat)
)
# absolutni pevne kody pro hledani spojeni
FCS = enum(
WHEELCHAIR=1, # pro cestující na vozíku
CHILDREN=2, # pro cestující s dětmi
BIKE=3, # pro cestující s kolem
CD=4, # vlak Českých drah
NOT_HISTORICAL=5, # není zvláštní historický vlak
CD_ONLY=6, # vlak ČD a smluvních dopravců
NOT_RESERVE=7 # vlak bez povinne rezervace
)
# souradnicove systemy pro vzajemnou konverzi
COOR = enum(
EMPTY= -1,
S42=0,
JTSK=1,
UTM=2,
WGS84=3, # svetove souradnice v radianech (jako X je zde zem. delka a jako Y pak sirka)
WGS84_D=4, # svetove souradnice ve stupnich (jako X je zde zem. sirka a jako Y pak delka)
MERCATOR=5,
PUWG_2000=6,
DEFAULT=4 # souradny system, v kterem jsou drzena data v pameti
)
# navratovy typ pro hledani spojeni/odjezdu/ZJR (hodnota iResult)
EXFUNCTIONRESULT = enum(
OK=0, # vyhledane objekty byly unikatni a postoupilo se tedy ke hledani spojeni/odjezdu/ZJR a ziskaly se nejake vysledky
NOT_FOUND=1 << 0, # vyhledane objekty byly unikatni a postoupilo se tedy ke hledani spojeni/odjezdu/ZJR, avsak nic se nenaslo
DATE_OUT_OF_RANGE=1 << 1, # datum pro hledani je mimo pripustny rozsah
FROM_TO_OVERLAP=1 << 4, # prekryti Z/Pres/Do (stejne objekty)
FROM_ERROR=1 << 5, # chyba hledani Z (masce neodpovida zadny objekt)
FROM_NOT_UNIQUE=1 << 6, # vyhledany nejake objekty Z, ale zadani neni jednoznacne
FROM_MISSING=1 << 7, # Z chybi a dle kontextu by melo byt pritomne
FROM_BAD=1 << 8, # k Z nebyly nalezeny zadne pouzitelne stanice, viz ObjectsInfo.iStatus
TO_ERROR=1 << 10, # chyba hledani Do (masce neodpovida zadny objekt)
TO_NOT_UNIQUE=1 << 11, # vyhledany nejake objekty Do, ale zadani neni jednoznacne
TO_MISSING=1 << 12, # Do chybi a dle kontextu by melo byt pritomne
TO_BAD=1 << 13, # k Do nebyly nalezeny zadne pouzitelne stanice, viz ObjectsInfo.iStatus
VIA_ERROR=1 << 15, # chyba hledani Pres (masce neodpovida zadny objekt)
VIA_NOT_UNIQUE=1 << 16, # vyhledany nejake objekty Pres, ale zadani neni jednoznacne
VIA_MISSING=1 << 17, # Pres chybi a dle kontextu by melo byt pritomne
VIA_BAD=1 << 18, # k Pres nebyly nalezeny zadne pouzitelne stanice, viz ObjectsInfo.iStatus
CHANGE_ERROR=1 << 20, # chyba hledani Prestup (masce neodpovida zadny objekt)
CHANGE_NOT_UNIQUE=1 << 21, # vyhledany nejake objekty Prestup, ale zadani neni jednoznacne
CHANGE_MISSING=1 << 22, # Prestup chybi a dle kontextu by melo byt pritomne
CHANGE_BAD=1 << 23, # k Prestup nebyly nalezeny zadne pouzitelne stanice, viz ObjectsInfo.iStatus
)
# typ odchylky od nejkratsi cesty
DELTAMAX = enum(
NO=0, # nezadana
PERCENT=1, # v procentech
METERS=2 # v metrech
)
# priznak pouziti lehatek/luzek
BEDS = enum(
USEANY=0, # pouzivat bez omezeni
ONLYBEDS=1, # cestovat pouze s lehatkem/luzkem
NOBEDS=2 # pouze mista k sezeni
)
# stav reseni objektu v ObjectsInfo
OBJECT_STATUS = enum(
# objekt je v poradku
OK=0,
# zadny objekt dle masky nebyl nalezen
NOT_FOUND=1,
# indikuje, ze nabidka objektu neni jednoznacna
NOT_UNIQUE=2,
# indikuje, ze k prislusne souradnici nejsou v blizkosti zastavky pro hledani spojeni
COOR_BAD=3,
# indikuje, ze k prislusnemu objektu nejsou zastavky pro hledani spojeni
# (mozne priciny - jizdni rad v dany den jizdy neplati
# nebo nesplnuji omezeni na bezbarierovy pristup
# nebo bylo zadano omezeni na dopravni prostredky a zadne na objektu nestoji)
OBJECT_BAD=4
)
# globalni priznak k napocitanemu jizdnemu
TTGP = enum(
ALL_OK=0, # cena je k dispozici pro cely usek
PART_OK=1, # cena je k dispozici pro cast cesty
MISSING=2 # cena chybi
)
# vycet stavu pro sluzbu poloha spoje
SVCSTATE = enum(
CRSERVER= -1, # vraci primo CRServer
CZ=0,
SK=1,
TELMAX1=2,
MAX=1 # jen pro pohodli kodu
)
# priznaky odjezdove tabule
DEP_TABLE = enum(
SHOW_STAND=1 << 0, # zobrazovat pole pro nastupiste/stanoviste (i kdyz v konkretnim vystupu nemusi byt zadano)
SHOW_TRACK=1 << 1, # zobrazovat pole pro kolej (i kdyz v konkretnim vystupu nemusi byt zadana)
BUILT_FROM_TT=1 << 2, # vyrobena na zaklade jizdniho radu (nebyla primo importovana)
POS_USED=1 << 3 # pri sestaveni byly pro prislusny jizdni rad k dispozici polohy (zpozdeni) spoju
# (tento priznak muze byt pouzit jen soucasne s BUILT_FROM_TT)
)
# ruzne konstanty
class CRCONST:
# zdroje zpozdeni
# Ceske drahy
DELAY_CD = "CD:"
# ZSR
DELAY_ZSR = "ZSR:"
# TELMAX
DELAY_TELMAX1 = "TELMAX1:"
# interni zpozdeni (vklada se take kategorie a podkategorie)
DELAY_INTERN = "X{0}_{1}:"
# rozsirene interni zpozdeni (vklada se take kategorie a podkategorie),
# zde se daji oproti DELAY_INTERN nasledne zadat i rozsirene masky, odelovac mezi polozkami je vzdy carka
DELAY_INTERN_EXT = "Y{0}_{1}:"
# zdroje vyluk a mimoradnosti
# Ceske drahy
EXCEPTIONEXCLUSION_CD = "CD:"
| 47.527211
| 164
| 0.686431
|
5b1e251e4bad0c54f3b506360177143da63c29d4
| 5,466
|
py
|
Python
|
pajbot/modules/lastfm.py
|
JoachimFlottorp/pajbot
|
4fb88c403dedb20d95be80e38da72be1ed064901
|
[
"MIT"
] | 128
|
2015-12-28T01:02:30.000Z
|
2019-05-24T21:20:50.000Z
|
pajbot/modules/lastfm.py
|
JoachimFlottorp/pajbot
|
4fb88c403dedb20d95be80e38da72be1ed064901
|
[
"MIT"
] | 277
|
2015-05-03T18:48:57.000Z
|
2019-05-23T17:41:28.000Z
|
pajbot/modules/lastfm.py
|
JoachimFlottorp/pajbot
|
4fb88c403dedb20d95be80e38da72be1ed064901
|
[
"MIT"
] | 96
|
2015-08-07T18:49:50.000Z
|
2019-05-20T19:49:27.000Z
|
import logging
from pajbot.models.command import Command, CommandExample
from pajbot.modules import BaseModule, ModuleSetting
from pajbot.streamhelper import StreamHelper
log = logging.getLogger(__name__)
class LastfmModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "LastFM module"
DESCRIPTION = "This uses the LastFM api to fetch the current artist and songname that the streamer is listening to on spotify or youtube."
CATEGORY = "Feature"
SETTINGS = [
ModuleSetting(
key="api_key",
label="LastFM Api Key",
type="text",
required=True,
placeholder="i.e. abcedfg1235hfhafafajhf",
default="",
),
ModuleSetting(
key="username",
label="LastFM Username",
type="text",
required=True,
placeholder="i.e. anniefuchsia",
default="",
),
ModuleSetting(
key="no_song",
label="Message to send when no song is playing | Available arguments: {source}, {streamer}",
type="text",
required=True,
placeholder="{source}, {streamer} isn't playing any music right now... FeelsBadMan",
default="{source}, {streamer} isn't playing any music right now... FeelsBadMan",
),
ModuleSetting(
key="current_song",
label="Message to send when a song is playing | Available arguments: {source}, {streamer}, {song}",
type="text",
required=True,
placeholder="{source}, Current song is 🎵 🎶 {song} 🎶 🎵",
default="{source}, Current song is 🎵 🎶 {song} 🎶 🎵",
),
ModuleSetting(
key="cannot_fetch_song",
label="Message to send when unable to fetch the song | Availably arguments: {source}",
type="text",
required=True,
placeholder="{source}, I'm having trouble fetching the song name... Please try again FeelsBadMan",
default="{source}, I'm having trouble fetching the song name... Please try again FeelsBadMan",
),
ModuleSetting(
key="global_cd",
label="Global cooldown (seconds)",
type="number",
required=True,
placeholder="",
default=30,
constraints={"min_value": 0, "max_value": 120},
),
ModuleSetting(
key="user_cd",
label="Per-user cooldown (seconds)",
type="number",
required=True,
placeholder="",
default=60,
constraints={"min_value": 0, "max_value": 240},
),
ModuleSetting(
key="online_only",
label="Only allow the LastFM commands to be run while the stream is online",
type="boolean",
required=True,
default=True,
),
]
def load_commands(self, **options):
# TODO: Aliases should be set in settings?
# This way, it can be run alongside other modules
self.commands["song"] = Command.raw_command(
self.song,
delay_all=self.settings["global_cd"],
delay_user=self.settings["user_cd"],
description="Check what that is playing on the stream",
examples=[
CommandExample(
None,
"Check the current song",
chat="user:!song\n"
"bot: "
+ self.settings["current_song"].format(
source="pajlada",
streamer=StreamHelper.get_streamer(),
song="Adele - Hello",
),
description="Bot mentions the name of the song and the artist currently playing on stream",
).parse()
],
)
self.commands["currentsong"] = self.commands["song"]
self.commands["nowplaying"] = self.commands["song"]
self.commands["playing"] = self.commands["song"]
def song(self, bot, source, **rest):
if self.settings["online_only"] and not self.bot.is_online:
return False
try:
import pylast
except ImportError:
log.error("Missing required library for the LastFM Module: pylast")
return False
API_KEY = self.settings["api_key"]
lastfmname = self.settings["username"]
if len(API_KEY) < 10 or len(lastfmname) < 2:
log.warning("You need to set up the Last FM API stuff in the Module settings.")
return False
try:
network = pylast.LastFMNetwork(api_key=API_KEY, api_secret="", username=lastfmname, password_hash="")
user = network.get_user(lastfmname)
currentTrack = user.get_now_playing()
if currentTrack is None:
bot.me(self.settings["no_song"].format(source=source, streamer=self.bot.streamer_display))
else:
bot.me(
self.settings["current_song"].format(
source=source, streamer=self.bot.streamer_display, song=currentTrack
)
)
except pylast.WSError:
log.error("LastFm username not found")
except IndexError:
bot.me(self.settings["cannot_fetch_song"].format(source=source))
| 37.696552
| 142
| 0.54775
|
f00e3d6bea506d4dd608e78520a24690393eddca
| 1,582
|
py
|
Python
|
deploy/shakenfist_ci/tests/test_system_namespace.py
|
shakenfist/shakenfist
|
fc8e5fec21bfc3f0ddb9952822c1a8dfdddcc546
|
[
"Apache-2.0"
] | 24
|
2020-07-20T23:47:52.000Z
|
2022-02-10T05:12:01.000Z
|
deploy/shakenfist_ci/tests/test_system_namespace.py
|
shakenfist/shakenfist
|
fc8e5fec21bfc3f0ddb9952822c1a8dfdddcc546
|
[
"Apache-2.0"
] | 637
|
2020-06-19T06:57:30.000Z
|
2022-03-31T08:58:54.000Z
|
deploy/shakenfist_ci/tests/test_system_namespace.py
|
shakenfist/shakenfist
|
fc8e5fec21bfc3f0ddb9952822c1a8dfdddcc546
|
[
"Apache-2.0"
] | 7
|
2020-07-14T20:48:40.000Z
|
2021-12-18T10:06:51.000Z
|
import time
from shakenfist_client import apiclient
from shakenfist_ci import base
class TestSystemNamespace(base.BaseTestCase):
def test_system_namespace(self):
self.assertEqual('system', self.system_client.namespace)
net = self.system_client.allocate_network(
'192.168.242.0/24', True, True,
'ci-system-net')
nets = []
for n in self.system_client.get_networks():
nets.append(n['uuid'])
self.assertIn(net['uuid'], nets)
inst = self.system_client.create_instance(
'cirros', 1, 1024,
[
{
'network_uuid': net['uuid']
}
],
[
{
'size': 8,
'base': 'sf://upload/system/cirros',
'type': 'disk'
}
], None, None)
self.assertIsNotNone(inst['uuid'])
self.assertIsNotNone(inst['node'])
insts = []
for i in self.system_client.get_instances():
insts.append(i['uuid'])
self.assertIn(inst['uuid'], insts)
self.system_client.delete_instance(inst['uuid'])
start_time = time.time()
while time.time() - start_time < 300:
if not list(self.system_client.get_instances()):
break
time.sleep(5)
self.system_client.delete_network(net['uuid'])
self.assertRaises(
apiclient.ResourceCannotBeDeletedException,
self.system_client.delete_namespace, None)
| 28.763636
| 64
| 0.536662
|
71a62f6b3b8b61350718210302532f2890cf9978
| 58,578
|
py
|
Python
|
sdk/storage/azure-storage-file-share/tests/test_file.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-file-share/tests/test_file.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-file-share/tests/test_file.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import base64
import os
import unittest
from datetime import datetime, timedelta
import requests
import pytest
from azure.core import MatchConditions
from azure.core.exceptions import HttpResponseError, ResourceNotFoundError, ResourceExistsError
from azure.storage.fileshare import (
generate_account_sas,
generate_file_sas,
ShareFileClient,
ShareServiceClient,
ContentSettings,
FileSasPermissions,
AccessPolicy,
ResourceTypes,
AccountSasPermissions,
StorageErrorCode,
NTFSAttributes)
from filetestcase import (
FileTestCase,
TestMode,
record,
)
# ------------------------------------------------------------------------------
TEST_SHARE_PREFIX = 'share'
TEST_DIRECTORY_PREFIX = 'dir'
TEST_FILE_PREFIX = 'file'
INPUT_FILE_PATH = 'file_input.temp.dat'
OUTPUT_FILE_PATH = 'file_output.temp.dat'
LARGE_FILE_SIZE = 64 * 1024 + 5
# ------------------------------------------------------------------------------
class StorageFileTest(FileTestCase):
def setUp(self):
super(StorageFileTest, self).setUp()
url = self.get_file_url()
credential = self.get_shared_key_credential()
# test chunking functionality by reducing the threshold
# for chunking and the size of each chunk, otherwise
# the tests would take too long to execute
self.fsc = ShareServiceClient(url, credential=credential, max_range_size=4 * 1024)
self.share_name = self.get_resource_name('utshare')
if not self.is_playback():
self.fsc.create_share(self.share_name)
self.short_byte_data = self.get_random_bytes(1024)
remote_url = self.get_remote_file_url()
remote_credential = self.get_remote_shared_key_credential()
self.fsc2 = ShareServiceClient(remote_url, credential=remote_credential)
self.remote_share_name = None
def tearDown(self):
if not self.is_playback():
try:
self.fsc.delete_share(self.share_name, delete_snapshots='include')
except:
pass
if self.remote_share_name:
try:
self.fs2.delete_share(self.remote_share_name, delete_snapshots='include')
except:
pass
if os.path.isfile(INPUT_FILE_PATH):
try:
os.remove(INPUT_FILE_PATH)
except:
pass
if os.path.isfile(OUTPUT_FILE_PATH):
try:
os.remove(OUTPUT_FILE_PATH)
except:
pass
return super(StorageFileTest, self).tearDown()
# --Helpers-----------------------------------------------------------------
def _get_file_reference(self):
return self.get_resource_name(TEST_FILE_PREFIX)
def _create_file(self, file_name=None):
file_name = self._get_file_reference() if file_name is None else file_name
share_client = self.fsc.get_share_client(self.share_name)
file_client = share_client.get_file_client(file_name)
file_client.upload_file(self.short_byte_data)
return file_client
def _create_empty_file(self, file_name=None, file_size=2048):
file_name = self._get_file_reference() if file_name is None else file_name
share_client = self.fsc.get_share_client(self.share_name)
file_client = share_client.get_file_client(file_name)
file_client.create_file(file_size)
return file_client
def _get_file_client(self):
file_name = self._get_file_reference()
share_client = self.fsc.get_share_client(self.share_name)
file_client = share_client.get_file_client(file_name)
return file_client
def _create_remote_share(self):
self.remote_share_name = self.get_resource_name('remoteshare')
remote_share = self.fsc2.get_share_client(self.remote_share_name)
try:
remote_share.create_share()
except ResourceExistsError:
pass
return remote_share
def _create_remote_file(self, file_data=None):
if not file_data:
file_data = b'12345678' * 1024 * 1024
source_file_name = self._get_file_reference()
remote_share = self.fsc2.get_share_client(self.remote_share_name)
remote_file = remote_share.get_file_client(source_file_name)
remote_file.upload_file(file_data)
return remote_file
def _wait_for_async_copy(self, share_name, file_path):
count = 0
share_client = self.fsc.get_share_client(share_name)
file_client = share_client.get_file_client(file_path)
properties = file_client.get_file_properties()
while properties.copy.status != 'success':
count = count + 1
if count > 10:
self.fail('Timed out waiting for async copy to complete.')
self.sleep(6)
properties = file_client.get_file_properties()
self.assertEqual(properties.copy.status, 'success')
def assertFileEqual(self, file_client, expected_data):
actual_data = file_client.download_file().readall()
self.assertEqual(actual_data, expected_data)
class NonSeekableFile(object):
def __init__(self, wrapped_file):
self.wrapped_file = wrapped_file
def write(self, data):
self.wrapped_file.write(data)
def read(self, count):
return self.wrapped_file.read(count)
# --Test cases for files ----------------------------------------------
@record
def test_make_file_url(self):
# Arrange
share = self.fsc.get_share_client("vhds")
file_client = share.get_file_client("vhd_dir/my.vhd")
# Act
res = file_client.url
# Assert
self.assertEqual(res, 'https://' + self.settings.STORAGE_ACCOUNT_NAME
+ '.file.core.windows.net/vhds/vhd_dir/my.vhd')
@record
def test_make_file_url_no_directory(self):
# Arrange
share = self.fsc.get_share_client("vhds")
file_client = share.get_file_client("my.vhd")
# Act
res = file_client.url
# Assert
self.assertEqual(res, 'https://' + self.settings.STORAGE_ACCOUNT_NAME
+ '.file.core.windows.net/vhds/my.vhd')
@record
def test_make_file_url_with_protocol(self):
# Arrange
url = self.get_file_url().replace('https', 'http')
fsc = ShareServiceClient(url, credential=self.settings.STORAGE_ACCOUNT_KEY)
share = fsc.get_share_client("vhds")
file_client = share.get_file_client("vhd_dir/my.vhd")
# Act
res = file_client.url
# Assert
self.assertEqual(res, 'http://' + self.settings.STORAGE_ACCOUNT_NAME
+ '.file.core.windows.net/vhds/vhd_dir/my.vhd')
@record
def test_make_file_url_with_sas(self):
# Arrange
sas = '?sv=2015-04-05&st=2015-04-29T22%3A18%3A26Z&se=2015-04-30T02%3A23%3A26Z&sr=b&sp=rw&sip=168.1.5.60-168.1.5.70&spr=https&sig=Z%2FRHIX5Xcg0Mq2rqI3OlWTjEg2tYkboXr1P9ZUXDtkk%3D'
file_client = ShareFileClient(
self.get_file_url(),
share_name="vhds",
file_path="vhd_dir/my.vhd",
credential=sas
)
# Act
res = file_client.url
# Assert
self.assertEqual(res, 'https://' + self.settings.STORAGE_ACCOUNT_NAME +
'.file.core.windows.net/vhds/vhd_dir/my.vhd{}'.format(sas))
@record
def test_create_file(self):
# Arrange
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
# Act
resp = file_client.create_file(1024, file_attributes="hidden")
# Assert
props = file_client.get_file_properties()
self.assertIsNotNone(props)
self.assertEqual(props.etag, resp['etag'])
self.assertEqual(props.last_modified, resp['last_modified'])
@record
def test_create_file_with_metadata(self):
# Arrange
metadata = {'hello': 'world', 'number': '42'}
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
# Act
resp = file_client.create_file(1024, metadata=metadata)
# Assert
props = file_client.get_file_properties()
self.assertIsNotNone(props)
self.assertEqual(props.etag, resp['etag'])
self.assertEqual(props.last_modified, resp['last_modified'])
self.assertDictEqual(props.metadata, metadata)
def test_create_file_when_file_permission_is_too_long(self):
file_client = self._get_file_client()
permission = str(self.get_random_bytes(8 * 1024 + 1))
with self.assertRaises(ValueError):
file_client.create_file(1024, file_permission=permission)
@record
def test_create_file_with_invalid_file_permission(self):
# Arrange
file_name = self._get_file_client()
with self.assertRaises(HttpResponseError):
file_name.create_file(1024, file_permission="abcde")
@record
def test_create_file_will_set_all_smb_properties(self):
# Arrange
file_client = self._get_file_client()
# Act
file_client.create_file(1024)
file_properties = file_client.get_file_properties()
# Assert
self.assertIsNotNone(file_properties)
self.assertIsNotNone(file_properties.change_time)
self.assertIsNotNone(file_properties.creation_time)
self.assertIsNotNone(file_properties.file_attributes)
self.assertIsNotNone(file_properties.last_write_time)
@record
def test_file_exists(self):
# Arrange
file_client = self._create_file()
# Act
exists = file_client.get_file_properties()
# Assert
self.assertTrue(exists)
@record
def test_file_not_exists(self):
# Arrange
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path="missingdir/" + file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
# Act
with self.assertRaises(ResourceNotFoundError):
file_client.get_file_properties()
# Assert
@record
def test_file_exists_with_snapshot(self):
# Arrange
file_client = self._create_file()
share_client = self.fsc.get_share_client(self.share_name)
snapshot = share_client.create_snapshot()
file_client.delete_file()
# Act
snapshot_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client.file_name,
snapshot=snapshot,
credential=self.settings.STORAGE_ACCOUNT_KEY)
props = snapshot_client.get_file_properties()
# Assert
self.assertTrue(props)
@record
def test_file_not_exists_with_snapshot(self):
# Arrange
share_client = self.fsc.get_share_client(self.share_name)
snapshot = share_client.create_snapshot()
file_client = self._create_file()
# Act
snapshot_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client.file_name,
snapshot=snapshot,
credential=self.settings.STORAGE_ACCOUNT_KEY)
# Assert
with self.assertRaises(ResourceNotFoundError):
snapshot_client.get_file_properties()
@record
def test_resize_file(self):
# Arrange
file_client = self._create_file()
# Act
file_client.resize_file(5)
# Assert
props = file_client.get_file_properties()
self.assertEqual(props.size, 5)
@record
def test_set_file_properties(self):
# Arrange
file_client = self._create_file()
# Act
content_settings = ContentSettings(
content_language='spanish',
content_disposition='inline')
resp = file_client.set_http_headers(content_settings=content_settings)
# Assert
properties = file_client.get_file_properties()
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self.assertEqual(properties.content_settings.content_disposition, content_settings.content_disposition)
self.assertIsNotNone(properties.last_write_time)
self.assertIsNotNone(properties.creation_time)
self.assertIsNotNone(properties.permission_key)
@record
def test_set_file_properties_with_file_permission(self):
# Arrange
file_client = self._create_file()
properties_on_creation = file_client.get_file_properties()
content_settings = ContentSettings(
content_language='spanish',
content_disposition='inline')
ntfs_attributes = NTFSAttributes(archive=True, temporary=True)
last_write_time = properties_on_creation.last_write_time + timedelta(hours=3)
creation_time = properties_on_creation.creation_time + timedelta(hours=3)
# Act
file_client.set_http_headers(
content_settings=content_settings,
file_attributes=ntfs_attributes,
file_last_write_time=last_write_time,
file_creation_time=creation_time,
)
# Assert
properties = file_client.get_file_properties()
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self.assertEqual(properties.content_settings.content_disposition, content_settings.content_disposition)
self.assertEqual(properties.creation_time, creation_time)
self.assertEqual(properties.last_write_time, last_write_time)
self.assertIn("Archive", properties.file_attributes)
self.assertIn("Temporary", properties.file_attributes)
@record
def test_get_file_properties(self):
# Arrange
file_client = self._create_file()
# Act
properties = file_client.get_file_properties()
# Assert
self.assertIsNotNone(properties)
self.assertEqual(properties.size, len(self.short_byte_data))
@record
def test_get_file_properties_with_snapshot(self):
# Arrange
file_client = self._create_file()
metadata = {"test1": "foo", "test2": "bar"}
file_client.set_file_metadata(metadata)
share_client = self.fsc.get_share_client(self.share_name)
snapshot = share_client.create_snapshot()
metadata2 = {"test100": "foo100", "test200": "bar200"}
file_client.set_file_metadata(metadata2)
# Act
file_props = file_client.get_file_properties()
snapshot_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client.file_name,
snapshot=snapshot,
credential=self.settings.STORAGE_ACCOUNT_KEY)
snapshot_props = snapshot_client.get_file_properties()
# Assert
self.assertIsNotNone(file_props)
self.assertIsNotNone(snapshot_props)
self.assertEqual(file_props.size, snapshot_props.size)
self.assertDictEqual(metadata, snapshot_props.metadata)
@record
def test_get_file_metadata_with_snapshot(self):
# Arrange
file_client = self._create_file()
metadata = {"test1": "foo", "test2": "bar"}
file_client.set_file_metadata(metadata)
share_client = self.fsc.get_share_client(self.share_name)
snapshot = share_client.create_snapshot()
snapshot_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client.file_name,
snapshot=snapshot,
credential=self.settings.STORAGE_ACCOUNT_KEY)
metadata2 = {"test100": "foo100", "test200": "bar200"}
file_client.set_file_metadata(metadata2)
# Act
file_metadata = file_client.get_file_properties().metadata
file_snapshot_metadata = snapshot_client.get_file_properties().metadata
# Assert
self.assertDictEqual(metadata2, file_metadata)
self.assertDictEqual(metadata, file_snapshot_metadata)
@record
def test_get_file_properties_with_non_existing_file(self):
# Arrange
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
# Act
with self.assertRaises(ResourceNotFoundError):
file_client.get_file_properties()
# Assert
@record
def test_get_file_metadata(self):
# Arrange
file_client = self._create_file()
# Act
md = file_client.get_file_properties().metadata
# Assert
self.assertIsNotNone(md)
self.assertEqual(0, len(md))
@record
def test_set_file_metadata_with_upper_case(self):
# Arrange
metadata = {'hello': 'world', 'number': '42', 'UP': 'UPval'}
file_client = self._create_file()
# Act
file_client.set_file_metadata(metadata)
# Assert
md = file_client.get_file_properties().metadata
self.assertEqual(3, len(md))
self.assertEqual(md['hello'], 'world')
self.assertEqual(md['number'], '42')
self.assertEqual(md['UP'], 'UPval')
self.assertFalse('up' in md)
@record
def test_delete_file_with_existing_file(self):
# Arrange
file_client = self._create_file()
# Act
file_client.delete_file()
# Assert
with self.assertRaises(ResourceNotFoundError):
file_client.get_file_properties()
@record
def test_delete_file_with_non_existing_file(self):
# Arrange
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
# Act
with self.assertRaises(ResourceNotFoundError):
file_client.delete_file()
# Assert
@record
def test_update_range(self):
# Arrange
file_client = self._create_file()
# Act
data = b'abcdefghijklmnop' * 32
file_client.upload_range(data, offset=0, length=512)
# Assert
content = file_client.download_file().readall()
self.assertEqual(len(data), 512)
self.assertEqual(data, content[:512])
self.assertEqual(self.short_byte_data[512:], content[512:])
@record
def test_update_range_with_md5(self):
# Arrange
file_client = self._create_file()
# Act
data = b'abcdefghijklmnop' * 32
file_client.upload_range(data, offset=0, length=512, validate_content=True)
# Assert
@record
def test_update_range_from_file_url_when_source_file_does_not_have_enough_bytes(self):
# Arrange
source_file_name = 'testfile1'
source_file_client = self._create_file(source_file_name)
destination_file_name = 'filetoupdate'
destination_file_client = self._create_file(destination_file_name)
# generate SAS for the source file
sas_token_for_source_file = generate_file_sas(
source_file_client.account_name,
source_file_client.share_name,
source_file_client.file_path,
source_file_client.credential.account_key,
)
source_file_url = source_file_client.url + '?' + sas_token_for_source_file
# Act
with self.assertRaises(HttpResponseError):
# when the source file has less bytes than 2050, throw exception
destination_file_client.upload_range_from_url(source_file_url, offset=0, length=2050, source_offset=0)
@record
def test_update_range_from_file_url(self):
# Arrange
source_file_name = 'testfile'
source_file_client = self._create_file(file_name=source_file_name)
data = b'abcdefghijklmnop' * 32
resp = source_file_client.upload_range(data, offset=0, length=512)
destination_file_name = 'filetoupdate'
destination_file_client = self._create_empty_file(file_name=destination_file_name)
# generate SAS for the source file
sas_token_for_source_file = generate_file_sas(
source_file_client.account_name,
source_file_client.share_name,
source_file_client.file_path,
source_file_client.credential.account_key,
FileSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1))
source_file_url = source_file_client.url + '?' + sas_token_for_source_file
# Act
destination_file_client.upload_range_from_url(source_file_url, offset=0, length=512, source_offset=0,
source_etag=resp['etag'],
source_match_condition=MatchConditions.IfNotModified)
# Assert
# To make sure the range of the file is actually updated
file_ranges = destination_file_client.get_ranges()
file_content = destination_file_client.download_file(offset=0, length=512).readall()
self.assertEquals(1, len(file_ranges))
self.assertEquals(0, file_ranges[0].get('start'))
self.assertEquals(511, file_ranges[0].get('end'))
self.assertEquals(data, file_content)
@record
def test_update_big_range_from_file_url(self):
# Arrange
source_file_name = 'testfile1'
end = 1048575
source_file_client = self._create_empty_file(file_name=source_file_name, file_size=1024 * 1024)
data = b'abcdefghijklmnop' * 65536
source_file_client.upload_range(data, offset=0, length=end+1)
destination_file_name = 'filetoupdate1'
destination_file_client = self._create_empty_file(file_name=destination_file_name, file_size=1024 * 1024)
# generate SAS for the source file
sas_token_for_source_file = generate_file_sas(
source_file_client.account_name,
source_file_client.share_name,
source_file_client.file_path,
source_file_client.credential.account_key,
FileSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1))
source_file_url = source_file_client.url + '?' + sas_token_for_source_file
# Act
destination_file_client.upload_range_from_url(source_file_url, offset=0, length=end+1, source_offset=0)
# Assert
# To make sure the range of the file is actually updated
file_ranges = destination_file_client.get_ranges()
file_content = destination_file_client.download_file(offset=0, length=end + 1).readall()
self.assertEquals(1, len(file_ranges))
self.assertEquals(0, file_ranges[0].get('start'))
self.assertEquals(end, file_ranges[0].get('end'))
self.assertEquals(data, file_content)
@record
def test_clear_range(self):
# Arrange
# TODO: update swagger and fix this test
pytest.skip("TODO: fix swagger!")
file_client = self._create_file()
# Act
resp = file_client.clear_range(offset=0, length=512)
# Assert
content = file_client.download_file().readall()
self.assertEqual(b'\x00' * 512, content[:512])
self.assertEqual(self.short_byte_data[512:], content[512:])
@record
def test_update_file_unicode(self):
# Arrange
file_client = self._create_file()
# Act
data = u'abcdefghijklmnop' * 32
file_client.upload_range(data, offset=0, length=512)
encoded = data.encode('utf-8')
# Assert
content = file_client.download_file().readall()
self.assertEqual(encoded, content[:512])
self.assertEqual(self.short_byte_data[512:], content[512:])
# Assert
@record
def test_list_ranges_none(self):
# Arrange
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
file_client.create_file(1024)
# Act
ranges = file_client.get_ranges()
# Assert
self.assertIsNotNone(ranges)
self.assertEqual(len(ranges), 0)
@record
def test_list_ranges_2(self):
# Arrange
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
file_client.create_file(2048)
data = b'abcdefghijklmnop' * 32
resp1 = file_client.upload_range(data, offset=0, length=512)
resp2 = file_client.upload_range(data, offset=1024, length=512)
# Act
ranges = file_client.get_ranges()
# Assert
self.assertIsNotNone(ranges)
self.assertEqual(len(ranges), 2)
self.assertEqual(ranges[0]['start'], 0)
self.assertEqual(ranges[0]['end'], 511)
self.assertEqual(ranges[1]['start'], 1024)
self.assertEqual(ranges[1]['end'], 1535)
@record
def test_list_ranges_none_from_snapshot(self):
# Arrange
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
file_client.create_file(1024)
share_client = self.fsc.get_share_client(self.share_name)
snapshot = share_client.create_snapshot()
snapshot_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client.file_name,
snapshot=snapshot,
credential=self.settings.STORAGE_ACCOUNT_KEY)
file_client.delete_file()
# Act
ranges = snapshot_client.get_ranges()
# Assert
self.assertIsNotNone(ranges)
self.assertEqual(len(ranges), 0)
@record
def test_list_ranges_2_from_snapshot(self):
# Arrange
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
file_client.create_file(2048)
data = b'abcdefghijklmnop' * 32
resp1 = file_client.upload_range(data, offset=0, length=512)
resp2 = file_client.upload_range(data, offset=1024, length=512)
share_client = self.fsc.get_share_client(self.share_name)
snapshot = share_client.create_snapshot()
snapshot_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client.file_name,
snapshot=snapshot,
credential=self.settings.STORAGE_ACCOUNT_KEY)
file_client.delete_file()
# Act
ranges = snapshot_client.get_ranges()
# Assert
self.assertIsNotNone(ranges)
self.assertEqual(len(ranges), 2)
self.assertEqual(ranges[0]['start'], 0)
self.assertEqual(ranges[0]['end'], 511)
self.assertEqual(ranges[1]['start'], 1024)
self.assertEqual(ranges[1]['end'], 1535)
@record
def test_copy_file_with_existing_file(self):
# Arrange
source_client = self._create_file()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path='file1copy',
credential=self.settings.STORAGE_ACCOUNT_KEY)
# Act
copy = file_client.start_copy_from_url(source_client.url)
# Assert
self.assertIsNotNone(copy)
self.assertEqual(copy['copy_status'], 'success')
self.assertIsNotNone(copy['copy_id'])
copy_file = file_client.download_file().readall()
self.assertEqual(copy_file, self.short_byte_data)
@record
def test_copy_file_async_private_file(self):
# Arrange
self._create_remote_share()
source_file = self._create_remote_file()
# Act
target_file_name = 'targetfile'
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=target_file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
with self.assertRaises(HttpResponseError) as e:
file_client.start_copy_from_url(source_file.url)
# Assert
self.assertEqual(e.exception.error_code, StorageErrorCode.cannot_verify_copy_source)
@record
def test_copy_file_async_private_file_with_sas(self):
# Arrange
data = b'12345678' * 1024 * 1024
self._create_remote_share()
source_file = self._create_remote_file(file_data=data)
sas_token = generate_file_sas(
source_file.account_name,
source_file.share_name,
source_file.file_path,
source_file.credential.account_key,
permission=FileSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
source_url = source_file.url + '?' + sas_token
# Act
target_file_name = 'targetfile'
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=target_file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
copy_resp = file_client.start_copy_from_url(source_url)
# Assert
self.assertTrue(copy_resp['copy_status'] in ['success', 'pending'])
self._wait_for_async_copy(self.share_name, target_file_name)
actual_data = file_client.download_file().readall()
self.assertEqual(actual_data, data)
@record
def test_abort_copy_file(self):
# Arrange
data = b'12345678' * 1024 * 1024
self._create_remote_share()
source_file = self._create_remote_file(file_data=data)
sas_token = generate_file_sas(
source_file.account_name,
source_file.share_name,
source_file.file_path,
source_file.credential.account_key,
permission=FileSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
source_url = source_file.url + '?' + sas_token
# Act
target_file_name = 'targetfile'
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=target_file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
copy_resp = file_client.start_copy_from_url(source_url)
self.assertEqual(copy_resp['copy_status'], 'pending')
file_client.abort_copy(copy_resp)
# Assert
target_file = file_client.download_file()
self.assertEqual(target_file.readall(), b'')
self.assertEqual(target_file.properties.copy.status, 'aborted')
@record
def test_abort_copy_file_with_synchronous_copy_fails(self):
# Arrange
source_file = self._create_file()
# Act
target_file_name = 'targetfile'
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=target_file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
copy_resp = file_client.start_copy_from_url(source_file.url)
with self.assertRaises(HttpResponseError):
file_client.abort_copy(copy_resp)
# Assert
self.assertEqual(copy_resp['copy_status'], 'success')
@record
def test_unicode_get_file_unicode_name(self):
# Arrange
file_name = '啊齄丂狛狜'
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
file_client.upload_file(b'hello world')
# Act
content = file_client.download_file().readall()
# Assert
self.assertEqual(content, b'hello world')
@record
def test_file_unicode_data(self):
# Arrange
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
# Act
data = u'hello world啊齄丂狛狜'.encode('utf-8')
file_client.upload_file(data)
# Assert
content = file_client.download_file().readall()
self.assertEqual(content, data)
@record
def test_file_unicode_data_and_file_attributes(self):
# Arrange
file_client = self._get_file_client()
# Act
data = u'hello world啊齄丂狛狜'.encode('utf-8')
file_client.upload_file(data, file_attributes=NTFSAttributes(temporary=True))
# Assert
content = file_client.download_file().readall()
properties = file_client.get_file_properties()
self.assertEqual(content, data)
self.assertIn('Temporary', properties.file_attributes)
@record
def test_unicode_get_file_binary_data(self):
# Arrange
base64_data = 'AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/wABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICEiIyQlJicoKSorLC0uLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVpbXF1eX2BhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ent8fX5/gIGCg4SFhoeIiYqLjI2Oj5CRkpOUlZaXmJmam5ydnp+goaKjpKWmp6ipqqusra6vsLGys7S1tre4ubq7vL2+v8DBwsPExcbHyMnKy8zNzs/Q0dLT1NXW19jZ2tvc3d7f4OHi4+Tl5ufo6err7O3u7/Dx8vP09fb3+Pn6+/z9/v8AAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2Nzg5Ojs8PT4/QEFCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+f4CBgoOEhYaHiImKi4yNjo+QkZKTlJWWl5iZmpucnZ6foKGio6SlpqeoqaqrrK2ur7CxsrO0tba3uLm6u7y9vr/AwcLDxMXGx8jJysvMzc7P0NHS09TV1tfY2drb3N3e3+Dh4uPk5ebn6Onq6+zt7u/w8fLz9PX29/j5+vv8/f7/AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn+AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6/wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w=='
binary_data = base64.b64decode(base64_data)
file_name = self._get_file_reference()
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY)
file_client.upload_file(binary_data)
# Act
content = file_client.download_file().readall()
# Assert
self.assertEqual(content, binary_data)
def test_create_file_from_bytes_with_progress(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
response = file_client.upload_file(data, max_concurrency=2, raw_response_hook=callback)
assert isinstance(response, dict)
assert 'last_modified' in response
assert 'etag' in response
# Assert
self.assertFileEqual(file_client, data)
def test_create_file_from_bytes_with_index(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
index = 1024
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
response = file_client.upload_file(data[index:], max_concurrency=2)
assert isinstance(response, dict)
assert 'last_modified' in response
assert 'etag' in response
# Assert
self.assertFileEqual(file_client, data[1024:])
def test_create_file_from_bytes_with_index_and_count(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
index = 512
count = 1024
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
response = file_client.upload_file(data[index:], length=count, max_concurrency=2)
assert isinstance(response, dict)
assert 'last_modified' in response
assert 'etag' in response
# Assert
self.assertFileEqual(file_client, data[index:index + count])
def test_create_file_from_path(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
with open(INPUT_FILE_PATH, 'rb') as stream:
response = file_client.upload_file(stream, max_concurrency=2)
assert isinstance(response, dict)
assert 'last_modified' in response
assert 'etag' in response
# Assert
self.assertFileEqual(file_client, data)
def test_create_file_from_path_with_progress(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
with open(INPUT_FILE_PATH, 'rb') as stream:
response = file_client.upload_file(stream, max_concurrency=2, raw_response_hook=callback)
assert isinstance(response, dict)
assert 'last_modified' in response
assert 'etag' in response
# Assert
self.assertFileEqual(file_client, data)
self.assert_upload_progress(
len(data),
self.fsc._config.max_range_size,
progress, unknown_size=False)
def test_create_file_from_stream(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
file_size = len(data)
with open(INPUT_FILE_PATH, 'rb') as stream:
response = file_client.upload_file(stream, max_concurrency=2)
assert isinstance(response, dict)
assert 'last_modified' in response
assert 'etag' in response
# Assert
self.assertFileEqual(file_client, data[:file_size])
def test_create_file_from_stream_non_seekable(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
file_size = len(data)
with open(INPUT_FILE_PATH, 'rb') as stream:
non_seekable_file = StorageFileTest.NonSeekableFile(stream)
file_client.upload_file(non_seekable_file, length=file_size, max_concurrency=1)
# Assert
self.assertFileEqual(file_client, data[:file_size])
def test_create_file_from_stream_with_progress(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
file_size = len(data)
with open(INPUT_FILE_PATH, 'rb') as stream:
file_client.upload_file(stream, max_concurrency=2, raw_response_hook=callback)
# Assert
self.assertFileEqual(file_client, data[:file_size])
self.assert_upload_progress(
len(data),
self.fsc._config.max_range_size,
progress, unknown_size=False)
def test_create_file_from_stream_truncated(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
file_size = len(data) - 512
with open(INPUT_FILE_PATH, 'rb') as stream:
file_client.upload_file(stream, length=file_size, max_concurrency=2)
# Assert
self.assertFileEqual(file_client, data[:file_size])
def test_create_file_from_stream_with_progress_truncated(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
with open(INPUT_FILE_PATH, 'wb') as stream:
stream.write(data)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
file_size = len(data) - 5
with open(INPUT_FILE_PATH, 'rb') as stream:
file_client.upload_file(stream, length=file_size, max_concurrency=2, raw_response_hook=callback)
# Assert
self.assertFileEqual(file_client, data[:file_size])
self.assert_upload_progress(
file_size,
self.fsc._config.max_range_size,
progress, unknown_size=False)
@record
def test_create_file_from_text(self):
# Arrange
file_name = self._get_file_reference()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-8')
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
file_client.upload_file(text)
# Assert
self.assertFileEqual(file_client, data)
@record
def test_create_file_from_text_with_encoding(self):
# Arrange
file_name = self._get_file_reference()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-16')
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
file_client.upload_file(text, encoding='UTF-16')
# Assert
self.assertFileEqual(file_client, data)
def test_create_file_from_text_chunked_upload(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_text_data(LARGE_FILE_SIZE)
encoded_data = data.encode('utf-8')
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
file_client.upload_file(data)
# Assert
self.assertFileEqual(file_client, encoded_data)
@record
def test_create_file_with_md5_small(self):
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(512)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
file_client.upload_file(data, validate_content=True)
# Assert
def test_create_file_with_md5_large(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_name = self._get_file_reference()
data = self.get_random_bytes(LARGE_FILE_SIZE)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_name,
credential=self.settings.STORAGE_ACCOUNT_KEY,
max_range_size=4 * 1024)
# Act
file_client.upload_file(data, validate_content=True, max_concurrency=2)
# Assert
# --Test cases for sas & acl ------------------------------------------------
@record
def test_sas_access_file(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_client = self._create_file()
token = generate_file_sas(
file_client.account_name,
file_client.share_name,
file_client.file_path,
file_client.credential.account_key,
permission=FileSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client.file_name,
credential=token)
content = file_client.download_file().readall()
# Assert
self.assertEqual(self.short_byte_data, content)
@record
def test_sas_signed_identifier(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_client = self._create_file()
share_client = self.fsc.get_share_client(self.share_name)
access_policy = AccessPolicy()
access_policy.start = datetime.utcnow() - timedelta(hours=1)
access_policy.expiry = datetime.utcnow() + timedelta(hours=1)
access_policy.permission = FileSasPermissions(read=True)
identifiers = {'testid': access_policy}
share_client.set_share_access_policy(identifiers)
token = generate_file_sas(
file_client.account_name,
file_client.share_name,
file_client.file_path,
file_client.credential.account_key,
policy_id='testid')
# Act
sas_file = ShareFileClient.from_file_url(
file_client.url,
credential=token)
content = file_client.download_file().readall()
# Assert
self.assertEqual(self.short_byte_data, content)
@record
def test_account_sas(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_client = self._create_file()
token = generate_account_sas(
self.fsc.account_name,
self.fsc.credential.account_key,
ResourceTypes(object=True),
AccountSasPermissions(read=True),
datetime.utcnow() + timedelta(hours=1),
)
# Act
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client.file_name,
credential=token)
response = requests.get(file_client.url)
# Assert
self.assertTrue(response.ok)
self.assertEqual(self.short_byte_data, response.content)
@record
def test_shared_read_access_file(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_client = self._create_file()
token = generate_file_sas(
file_client.account_name,
file_client.share_name,
file_client.file_path,
file_client.credential.account_key,
permission=FileSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client.file_name,
credential=token)
response = requests.get(file_client.url)
# Assert
self.assertTrue(response.ok)
self.assertEqual(self.short_byte_data, response.content)
@record
def test_shared_read_access_file_with_content_query_params(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_client = self._create_file()
token = generate_file_sas(
file_client.account_name,
file_client.share_name,
file_client.file_path,
file_client.credential.account_key,
permission=FileSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
cache_control='no-cache',
content_disposition='inline',
content_encoding='utf-8',
content_language='fr',
content_type='text',
)
# Act
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client.file_name,
credential=token)
response = requests.get(file_client.url)
# Assert
self.assertEqual(self.short_byte_data, response.content)
self.assertEqual(response.headers['cache-control'], 'no-cache')
self.assertEqual(response.headers['content-disposition'], 'inline')
self.assertEqual(response.headers['content-encoding'], 'utf-8')
self.assertEqual(response.headers['content-language'], 'fr')
self.assertEqual(response.headers['content-type'], 'text')
@record
def test_shared_write_access_file(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
updated_data = b'updated file data'
file_client_admin = self._create_file()
token = generate_file_sas(
file_client_admin.account_name,
file_client_admin.share_name,
file_client_admin.file_path,
file_client_admin.credential.account_key,
permission=FileSasPermissions(write=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client_admin.file_name,
credential=token)
# Act
headers = {'x-ms-range': 'bytes=0-16', 'x-ms-write': 'update'}
response = requests.put(file_client.url + '&comp=range', headers=headers, data=updated_data)
# Assert
self.assertTrue(response.ok)
file_content = file_client_admin.download_file().readall()
self.assertEqual(updated_data, file_content[:len(updated_data)])
@record
def test_shared_delete_access_file(self):
# SAS URL is calculated from storage key, so this test runs live only
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
file_client_admin = self._create_file()
token = generate_file_sas(
file_client_admin.account_name,
file_client_admin.share_name,
file_client_admin.file_path,
file_client_admin.credential.account_key,
permission=FileSasPermissions(delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
file_client = ShareFileClient(
self.get_file_url(),
share_name=self.share_name,
file_path=file_client_admin.file_name,
credential=token)
# Act
response = requests.delete(file_client.url)
# Assert
self.assertTrue(response.ok)
with self.assertRaises(ResourceNotFoundError):
file_client_admin.download_file()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 35.609726
| 1,392
| 0.64403
|
6313fdfd0887c26bc8a40f39b90983631cd5639d
| 238,324
|
py
|
Python
|
tests/admin_views/tests.py
|
benjaoming/django
|
6dbe979b4d9396e1b307c7d27388c97c13beb21c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/admin_views/tests.py
|
benjaoming/django
|
6dbe979b4d9396e1b307c7d27388c97c13beb21c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/admin_views/tests.py
|
benjaoming/django
|
6dbe979b4d9396e1b307c7d27388c97c13beb21c
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import re
import datetime
import unittest
from django.conf import settings, global_settings
from django.core import mail
from django.core.checks import Error
from django.core.files import temp as tempfile
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import (NoReverseMatch,
get_script_prefix, reverse, set_script_prefix)
# Register auth models with the admin.
from django.contrib.auth import get_permission_codename
from django.contrib.admin import ModelAdmin
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.models import LogEntry, DELETION
from django.contrib.admin.options import TO_FIELD_VAR
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.utils import quote
from django.contrib.admin.validation import ModelAdminValidator
from django.contrib.admin.views.main import IS_POPUP_VAR
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.models import Group, User, Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.staticfiles.storage import staticfiles_storage
from django.forms.utils import ErrorList
from django.template.response import TemplateResponse
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import patch_logger
from django.test import modify_settings, override_settings
from django.utils import formats
from django.utils import translation
from django.utils.cache import get_max_age
from django.utils.encoding import iri_to_uri, force_bytes, force_text
from django.utils.html import escape
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import parse_qsl, urljoin, urlparse
from django.utils._os import upath
from django.utils import six
# local test models
from .models import (Article, BarAccount, CustomArticle, EmptyModel, FooAccount,
Gallery, ModelWithStringPrimaryKey, Person, Persona, Picture, Podcast,
Section, Subscriber, Vodcast, Language, Collector, Widget, Grommet,
DooHickey, FancyDoodad, Whatsit, Category, Post, Plot, FunkyTag, Chapter,
Book, Promo, WorkHour, Employee, Question, Answer, Inquisition, Actor,
FoodDelivery, RowLevelChangePermissionModel, Paper, CoverLetter, Story,
OtherStory, ComplexSortedPerson, PluggableSearchPerson, Parent, Child, AdminOrderedField,
AdminOrderedModelMethod, AdminOrderedAdminMethod, AdminOrderedCallable,
Report, MainPrepopulated, RelatedPrepopulated, UnorderedObject,
Simple, UndeletableObject, UnchangeableObject, Choice, ShortMessage,
Telegram, Pizza, Topping, FilteredManager, City, Restaurant, Worker,
ParentWithDependentChildren, Character, FieldOverridePost, Color2)
from .admin import site, site2, CityAdmin
ERROR_MESSAGE = "Please enter the correct username and password \
for a staff account. Note that both fields may be case-sensitive."
ADMIN_VIEW_TEMPLATES_DIR = settings.TEMPLATE_DIRS + (os.path.join(os.path.dirname(upath(__file__)), 'templates'),)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls",
USE_I18N=True, USE_L10N=False, LANGUAGE_CODE='en')
class AdminViewBasicTestCase(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-colors.xml',
'admin-views-fabrics.xml', 'admin-views-books.xml']
# Store the bit of the URL where the admin is registered as a class
# variable. That way we can test a second AdminSite just by subclassing
# this test case and changing urlbit.
urlbit = 'admin'
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
formats.reset_format_cache()
def assertContentBefore(self, response, text1, text2, failing_msg=None):
"""
Testing utility asserting that text1 appears before text2 in response
content.
"""
self.assertEqual(response.status_code, 200)
self.assertLess(response.content.index(force_bytes(text1)), response.content.index(force_bytes(text2)),
failing_msg)
class AdminViewBasicTest(AdminViewBasicTestCase):
def test_trailing_slash_required(self):
"""
If you leave off the trailing slash, app should redirect and add it.
"""
response = self.client.get('/test_admin/%s/admin_views/article/add' % self.urlbit)
self.assertRedirects(response,
'/test_admin/%s/admin_views/article/add/' % self.urlbit,
status_code=301)
def test_admin_static_template_tag(self):
"""
Test that admin_static.static is pointing to the collectstatic version
(as django.contrib.collectstatic is in installed apps).
"""
old_url = staticfiles_storage.base_url
staticfiles_storage.base_url = '/test/'
try:
self.assertEqual(static('path'), '/test/path')
finally:
staticfiles_storage.base_url = old_url
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/test_admin/%s/admin_views/section/add/' % self.urlbit)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_with_GET_args(self):
response = self.client.get('/test_admin/%s/admin_views/section/add/' % self.urlbit, {'name': 'My Section'})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'value="My Section"',
msg_prefix="Couldn't find an input with the right value in the response")
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/test_admin/%s/admin_views/section/1/' % self.urlbit)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET_string_PK(self):
"""
Ensure GET on the change_view works (returns an HTTP 404 error, see
#11191) when passing a string as the PK argument for a model with an
integer PK field.
"""
response = self.client.get('/test_admin/%s/admin_views/section/abc/' % self.urlbit)
self.assertEqual(response.status_code, 404)
def test_basic_inheritance_GET_string_PK(self):
"""
Ensure GET on the change_view works on inherited models (returns an
HTTP 404 error, see #19951) when passing a string as the PK argument
for a model with an integer PK field.
"""
response = self.client.get('/test_admin/%s/admin_views/supervillain/abc/' % self.urlbit)
self.assertEqual(response.status_code, 404)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "Another Section",
# inline data
"article_set-TOTAL_FORMS": "3",
"article_set-INITIAL_FORMS": "0",
"article_set-MAX_NUM_FORMS": "0",
}
response = self.client.post('/test_admin/%s/admin_views/section/add/' % self.urlbit, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_popup_add_POST(self):
"""
Ensure http response from a popup is properly escaped.
"""
post_data = {
'_popup': '1',
'title': 'title with a new\nline',
'content': 'some content',
'date_0': '2010-09-10',
'date_1': '14:55:39',
}
response = self.client.post('/test_admin/%s/admin_views/article/add/' % self.urlbit, post_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'dismissAddAnotherPopup')
self.assertContains(response, 'title with a new\\u000Aline')
# Post data for edit inline
inline_post_data = {
"name": "Test section",
# inline data
"article_set-TOTAL_FORMS": "6",
"article_set-INITIAL_FORMS": "3",
"article_set-MAX_NUM_FORMS": "0",
"article_set-0-id": "1",
# there is no title in database, give one here or formset will fail.
"article_set-0-title": "Norske bostaver æøå skaper problemer",
"article_set-0-content": "<p>Middle content</p>",
"article_set-0-date_0": "2008-03-18",
"article_set-0-date_1": "11:54:58",
"article_set-0-section": "1",
"article_set-1-id": "2",
"article_set-1-title": "Need a title.",
"article_set-1-content": "<p>Oldest content</p>",
"article_set-1-date_0": "2000-03-18",
"article_set-1-date_1": "11:54:58",
"article_set-2-id": "3",
"article_set-2-title": "Need a title.",
"article_set-2-content": "<p>Newest content</p>",
"article_set-2-date_0": "2009-03-18",
"article_set-2-date_1": "11:54:58",
"article_set-3-id": "",
"article_set-3-title": "",
"article_set-3-content": "",
"article_set-3-date_0": "",
"article_set-3-date_1": "",
"article_set-4-id": "",
"article_set-4-title": "",
"article_set-4-content": "",
"article_set-4-date_0": "",
"article_set-4-date_1": "",
"article_set-5-id": "",
"article_set-5-title": "",
"article_set-5-content": "",
"article_set-5-date_0": "",
"article_set-5-date_1": "",
}
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
response = self.client.post('/test_admin/%s/admin_views/section/1/' % self.urlbit, self.inline_post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_edit_save_as(self):
"""
Test "save as".
"""
post_data = self.inline_post_data.copy()
post_data.update({
'_saveasnew': 'Save+as+new',
"article_set-1-section": "1",
"article_set-2-section": "1",
"article_set-3-section": "1",
"article_set-4-section": "1",
"article_set-5-section": "1",
})
response = self.client.post('/test_admin/%s/admin_views/section/1/' % self.urlbit, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_change_list_sorting_callable(self):
"""
Ensure we can sort on a list_display field that is a callable
(column 2 is callable_year in ArticleAdmin)
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'o': 2})
self.assertContentBefore(response, 'Oldest content', 'Middle content',
"Results of sorting on callable are out of order.")
self.assertContentBefore(response, 'Middle content', 'Newest content',
"Results of sorting on callable are out of order.")
def test_change_list_sorting_model(self):
"""
Ensure we can sort on a list_display field that is a Model method
(column 3 is 'model_year' in ArticleAdmin)
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'o': '-3'})
self.assertContentBefore(response, 'Newest content', 'Middle content',
"Results of sorting on Model method are out of order.")
self.assertContentBefore(response, 'Middle content', 'Oldest content',
"Results of sorting on Model method are out of order.")
def test_change_list_sorting_model_admin(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin method
(column 4 is 'modeladmin_year' in ArticleAdmin)
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'o': '4'})
self.assertContentBefore(response, 'Oldest content', 'Middle content',
"Results of sorting on ModelAdmin method are out of order.")
self.assertContentBefore(response, 'Middle content', 'Newest content',
"Results of sorting on ModelAdmin method are out of order.")
def test_change_list_sorting_model_admin_reverse(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin
method in reverse order (i.e. admin_order_field uses the '-' prefix)
(column 6 is 'model_year_reverse' in ArticleAdmin)
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'o': '6'})
self.assertContentBefore(response, '2009', '2008',
"Results of sorting on ModelAdmin method are out of order.")
self.assertContentBefore(response, '2008', '2000',
"Results of sorting on ModelAdmin method are out of order.")
# Let's make sure the ordering is right and that we don't get a
# FieldError when we change to descending order
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'o': '-6'})
self.assertContentBefore(response, '2000', '2008',
"Results of sorting on ModelAdmin method are out of order.")
self.assertContentBefore(response, '2008', '2009',
"Results of sorting on ModelAdmin method are out of order.")
def test_change_list_sorting_multiple(self):
p1 = Person.objects.create(name="Chris", gender=1, alive=True)
p2 = Person.objects.create(name="Chris", gender=2, alive=True)
p3 = Person.objects.create(name="Bob", gender=1, alive=True)
link1 = reverse('admin:admin_views_person_change', args=(p1.pk,))
link2 = reverse('admin:admin_views_person_change', args=(p2.pk,))
link3 = reverse('admin:admin_views_person_change', args=(p3.pk,))
# Sort by name, gender
# This hard-codes the URL because it'll fail if it runs against the
# 'admin2' custom admin (which doesn't have the Person model).
response = self.client.get('/test_admin/admin/admin_views/person/', {'o': '1.2'})
self.assertContentBefore(response, link3, link1)
self.assertContentBefore(response, link1, link2)
# Sort by gender descending, name
response = self.client.get('/test_admin/admin/admin_views/person/', {'o': '-2.1'})
self.assertContentBefore(response, link2, link3)
self.assertContentBefore(response, link3, link1)
def test_change_list_sorting_preserve_queryset_ordering(self):
"""
If no ordering is defined in `ModelAdmin.ordering` or in the query
string, then the underlying order of the queryset should not be
changed, even if it is defined in `Modeladmin.get_queryset()`.
Refs #11868, #7309.
"""
p1 = Person.objects.create(name="Amy", gender=1, alive=True, age=80)
p2 = Person.objects.create(name="Bob", gender=1, alive=True, age=70)
p3 = Person.objects.create(name="Chris", gender=2, alive=False, age=60)
link1 = reverse('admin:admin_views_person_change', args=(p1.pk,))
link2 = reverse('admin:admin_views_person_change', args=(p2.pk,))
link3 = reverse('admin:admin_views_person_change', args=(p3.pk,))
# This hard-codes the URL because it'll fail if it runs against the
# 'admin2' custom admin (which doesn't have the Person model).
response = self.client.get('/test_admin/admin/admin_views/person/', {})
self.assertContentBefore(response, link3, link2)
self.assertContentBefore(response, link2, link1)
def test_change_list_sorting_model_meta(self):
# Test ordering on Model Meta is respected
l1 = Language.objects.create(iso='ur', name='Urdu')
l2 = Language.objects.create(iso='ar', name='Arabic')
link1 = reverse('admin:admin_views_language_change', args=(quote(l1.pk),))
link2 = reverse('admin:admin_views_language_change', args=(quote(l2.pk),))
response = self.client.get('/test_admin/admin/admin_views/language/', {})
self.assertContentBefore(response, link2, link1)
# Test we can override with query string
response = self.client.get('/test_admin/admin/admin_views/language/', {'o': '-1'})
self.assertContentBefore(response, link1, link2)
def test_change_list_sorting_override_model_admin(self):
# Test ordering on Model Admin is respected, and overrides Model Meta
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse('admin:admin_views_podcast_change', args=(p1.pk,))
link2 = reverse('admin:admin_views_podcast_change', args=(p2.pk,))
response = self.client.get('/test_admin/admin/admin_views/podcast/', {})
self.assertContentBefore(response, link1, link2)
def test_multiple_sort_same_field(self):
# Check that we get the columns we expect if we have two columns
# that correspond to the same ordering field
dt = datetime.datetime.now()
p1 = Podcast.objects.create(name="A", release_date=dt)
p2 = Podcast.objects.create(name="B", release_date=dt - datetime.timedelta(10))
link1 = reverse('admin:admin_views_podcast_change', args=(quote(p1.pk),))
link2 = reverse('admin:admin_views_podcast_change', args=(quote(p2.pk),))
response = self.client.get('/test_admin/admin/admin_views/podcast/', {})
self.assertContentBefore(response, link1, link2)
p1 = ComplexSortedPerson.objects.create(name="Bob", age=10)
p2 = ComplexSortedPerson.objects.create(name="Amy", age=20)
link1 = reverse('admin:admin_views_complexsortedperson_change', args=(p1.pk,))
link2 = reverse('admin:admin_views_complexsortedperson_change', args=(p2.pk,))
response = self.client.get('/test_admin/admin/admin_views/complexsortedperson/', {})
# Should have 5 columns (including action checkbox col)
self.assertContains(response, '<th scope="col"', count=5)
self.assertContains(response, 'Name')
self.assertContains(response, 'Colored name')
# Check order
self.assertContentBefore(response, 'Name', 'Colored name')
# Check sorting - should be by name
self.assertContentBefore(response, link2, link1)
def test_sort_indicators_admin_order(self):
"""
Ensures that the admin shows default sort indicators for all
kinds of 'ordering' fields: field names, method on the model
admin and model itself, and other callables. See #17252.
"""
models = [(AdminOrderedField, 'adminorderedfield'),
(AdminOrderedModelMethod, 'adminorderedmodelmethod'),
(AdminOrderedAdminMethod, 'adminorderedadminmethod'),
(AdminOrderedCallable, 'adminorderedcallable')]
for model, url in models:
model.objects.create(stuff='The Last Item', order=3)
model.objects.create(stuff='The First Item', order=1)
model.objects.create(stuff='The Middle Item', order=2)
response = self.client.get('/test_admin/admin/admin_views/%s/' % url, {})
self.assertEqual(response.status_code, 200)
# Should have 3 columns including action checkbox col.
self.assertContains(response, '<th scope="col"', count=3, msg_prefix=url)
# Check if the correct column was selected. 2 is the index of the
# 'order' column in the model admin's 'list_display' with 0 being
# the implicit 'action_checkbox' and 1 being the column 'stuff'.
self.assertEqual(response.context['cl'].get_ordering_field_columns(), {2: 'asc'})
# Check order of records.
self.assertContentBefore(response, 'The First Item', 'The Middle Item')
self.assertContentBefore(response, 'The Middle Item', 'The Last Item')
def test_limited_filter(self):
"""Ensure admin changelist filters do not contain objects excluded via limit_choices_to.
This also tests relation-spanning filters (e.g. 'color__value').
"""
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div id="changelist-filter">',
msg_prefix="Expected filter not found in changelist view")
self.assertNotContains(response, '<a href="?color__id__exact=3">Blue</a>',
msg_prefix="Changelist filter not correctly limited by limit_choices_to")
def test_relation_spanning_filters(self):
response = self.client.get('/test_admin/%s/admin_views/chapterxtra1/' %
self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div id="changelist-filter">')
filters = {
'chap__id__exact': dict(
values=[c.id for c in Chapter.objects.all()],
test=lambda obj, value: obj.chap.id == value),
'chap__title': dict(
values=[c.title for c in Chapter.objects.all()],
test=lambda obj, value: obj.chap.title == value),
'chap__book__id__exact': dict(
values=[b.id for b in Book.objects.all()],
test=lambda obj, value: obj.chap.book.id == value),
'chap__book__name': dict(
values=[b.name for b in Book.objects.all()],
test=lambda obj, value: obj.chap.book.name == value),
'chap__book__promo__id__exact': dict(
values=[p.id for p in Promo.objects.all()],
test=lambda obj, value: obj.chap.book.promo_set.filter(id=value).exists()),
'chap__book__promo__name': dict(
values=[p.name for p in Promo.objects.all()],
test=lambda obj, value: obj.chap.book.promo_set.filter(name=value).exists()),
}
for filter_path, params in filters.items():
for value in params['values']:
query_string = urlencode({filter_path: value})
# ensure filter link exists
self.assertContains(response, '<a href="?%s">' % query_string)
# ensure link works
filtered_response = self.client.get(
'/test_admin/%s/admin_views/chapterxtra1/?%s' % (
self.urlbit, query_string))
self.assertEqual(filtered_response.status_code, 200)
# ensure changelist contains only valid objects
for obj in filtered_response.context['cl'].queryset.all():
self.assertTrue(params['test'](obj, value))
def test_incorrect_lookup_parameters(self):
"""Ensure incorrect lookup parameters are handled gracefully."""
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit, {'notarealfield': '5'})
self.assertRedirects(response, '/test_admin/%s/admin_views/thing/?e=1' % self.urlbit)
# Spanning relationships through a nonexistent related object (Refs #16716)
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit, {'notarealfield__whatever': '5'})
self.assertRedirects(response, '/test_admin/%s/admin_views/thing/?e=1' % self.urlbit)
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit, {'color__id__exact': 'StringNotInteger!'})
self.assertRedirects(response, '/test_admin/%s/admin_views/thing/?e=1' % self.urlbit)
# Regression test for #18530
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit, {'pub_date__gte': 'foo'})
self.assertRedirects(response, '/test_admin/%s/admin_views/thing/?e=1' % self.urlbit)
def test_isnull_lookups(self):
"""Ensure is_null is handled correctly."""
Article.objects.create(title="I Could Go Anywhere", content="Versatile", date=datetime.datetime.now())
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit)
self.assertContains(response, '4 articles')
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'section__isnull': 'false'})
self.assertContains(response, '3 articles')
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'section__isnull': '0'})
self.assertContains(response, '3 articles')
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'section__isnull': 'true'})
self.assertContains(response, '1 article')
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'section__isnull': '1'})
self.assertContains(response, '1 article')
def test_logout_and_password_change_URLs(self):
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit)
self.assertContains(response, '<a href="/test_admin/%s/logout/">' % self.urlbit)
self.assertContains(response, '<a href="/test_admin/%s/password_change/">' % self.urlbit)
def test_named_group_field_choices_change_list(self):
"""
Ensures the admin changelist shows correct values in the relevant column
for rows corresponding to instances of a model in which a named group
has been used in the choices option of a field.
"""
link1 = reverse('admin:admin_views_fabric_change', args=(1,), current_app=self.urlbit)
link2 = reverse('admin:admin_views_fabric_change', args=(2,), current_app=self.urlbit)
response = self.client.get('/test_admin/%s/admin_views/fabric/' % self.urlbit)
fail_msg = "Changelist table isn't showing the right human-readable values set by a model field 'choices' option named group."
self.assertContains(response, '<a href="%s">Horizontal</a>' % link1, msg_prefix=fail_msg, html=True)
self.assertContains(response, '<a href="%s">Vertical</a>' % link2, msg_prefix=fail_msg, html=True)
def test_named_group_field_choices_filter(self):
"""
Ensures the filter UI shows correctly when at least one named group has
been used in the choices option of a model field.
"""
response = self.client.get('/test_admin/%s/admin_views/fabric/' % self.urlbit)
fail_msg = "Changelist filter isn't showing options contained inside a model field 'choices' option named group."
self.assertContains(response, '<div id="changelist-filter">')
self.assertContains(response,
'<a href="?surface__exact=x">Horizontal</a>', msg_prefix=fail_msg, html=True)
self.assertContains(response,
'<a href="?surface__exact=y">Vertical</a>', msg_prefix=fail_msg, html=True)
def test_change_list_null_boolean_display(self):
Post.objects.create(public=None)
# This hard-codes the URl because it'll fail if it runs
# against the 'admin2' custom admin (which doesn't have the
# Post model).
response = self.client.get("/test_admin/admin/admin_views/post/")
self.assertContains(response, 'icon-unknown.gif')
def test_i18n_language_non_english_default(self):
"""
Check if the JavaScript i18n view returns an empty language catalog
if the default language is non-English but the selected language
is English. See #13388 and #3594 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), translation.override('en-us'):
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
def test_i18n_language_non_english_fallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), translation.override('none'):
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertContains(response, 'Choisir une heure')
def test_L10N_deactivated(self):
"""
Check if L10N is deactivated, the JavaScript i18n view doesn't
return localized date/time formats. Refs #14824.
"""
with self.settings(LANGUAGE_CODE='ru', USE_L10N=False), translation.override('none'):
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertNotContains(response, '%d.%m.%Y %H:%M:%S')
self.assertContains(response, '%Y-%m-%d %H:%M:%S')
def test_disallowed_filtering(self):
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as calls:
response = self.client.get("/test_admin/admin/admin_views/album/?owner__email__startswith=fuzzy")
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
# Filters are allowed if explicitly included in list_filter
response = self.client.get("/test_admin/admin/admin_views/thing/?color__value__startswith=red")
self.assertEqual(response.status_code, 200)
response = self.client.get("/test_admin/admin/admin_views/thing/?color__value=red")
self.assertEqual(response.status_code, 200)
# Filters should be allowed if they involve a local field without the
# need to whitelist them in list_filter or date_hierarchy.
response = self.client.get("/test_admin/admin/admin_views/person/?age__gt=30")
self.assertEqual(response.status_code, 200)
e1 = Employee.objects.create(name='Anonymous', gender=1, age=22, alive=True, code='123')
e2 = Employee.objects.create(name='Visitor', gender=2, age=19, alive=True, code='124')
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e1)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e2)
response = self.client.get("/test_admin/admin/admin_views/workhour/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'employee__person_ptr__exact')
response = self.client.get("/test_admin/admin/admin_views/workhour/?employee__person_ptr__exact=%d" % e1.pk)
self.assertEqual(response.status_code, 200)
def test_disallowed_to_field(self):
with patch_logger('django.security.DisallowedModelAdminToField', 'error') as calls:
response = self.client.get("/test_admin/admin/admin_views/section/", {TO_FIELD_VAR: 'missing_field'})
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
# Specifying a field that is not referred by any other model registered
# to this admin site should raise an exception.
with patch_logger('django.security.DisallowedModelAdminToField', 'error') as calls:
response = self.client.get("/test_admin/admin/admin_views/section/", {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
# #23839 - Primary key should always be allowed, even if the referenced model isn't registered.
response = self.client.get("/test_admin/admin/admin_views/notreferenced/", {TO_FIELD_VAR: 'id'})
self.assertEqual(response.status_code, 200)
# #23915 - Specifying a field referenced by another model though a m2m should be allowed.
response = self.client.get("/test_admin/admin/admin_views/recipe/", {TO_FIELD_VAR: 'rname'})
self.assertEqual(response.status_code, 200)
# #23604, #23915 - Specifying a field referenced through a reverse m2m relationship should be allowed.
response = self.client.get("/test_admin/admin/admin_views/ingredient/", {TO_FIELD_VAR: 'iname'})
self.assertEqual(response.status_code, 200)
# #23329 - Specifying a field that is not referred by any other model directly registered
# to this admin site but registered through inheritance should be allowed.
response = self.client.get("/test_admin/admin/admin_views/referencedbyparent/", {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 200)
# #23431 - Specifying a field that is only referred to by a inline of a registered
# model should be allowed.
response = self.client.get("/test_admin/admin/admin_views/referencedbyinline/", {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 200)
# We also want to prevent the add and change view from leaking a
# disallowed field value.
with patch_logger('django.security.DisallowedModelAdminToField', 'error') as calls:
response = self.client.post("/test_admin/admin/admin_views/section/add/", {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
section = Section.objects.create()
with patch_logger('django.security.DisallowedModelAdminToField', 'error') as calls:
response = self.client.post("/test_admin/admin/admin_views/section/%d/" % section.pk, {TO_FIELD_VAR: 'name'})
self.assertEqual(response.status_code, 400)
self.assertEqual(len(calls), 1)
def test_allowed_filtering_15103(self):
"""
Regressions test for ticket 15103 - filtering on fields defined in a
ForeignKey 'limit_choices_to' should be allowed, otherwise raw_id_fields
can break.
"""
# Filters should be allowed if they are defined on a ForeignKey pointing to this model
response = self.client.get("/test_admin/admin/admin_views/inquisition/?leader__name=Palin&leader__age=27")
self.assertEqual(response.status_code, 200)
def test_popup_dismiss_related(self):
"""
Regression test for ticket 20664 - ensure the pk is properly quoted.
"""
actor = Actor.objects.create(name="Palin", age=27)
response = self.client.get("/test_admin/admin/admin_views/actor/?%s" % IS_POPUP_VAR)
self.assertContains(response, "opener.dismissRelatedLookupPopup(window, '%s')" % actor.pk)
def test_hide_change_password(self):
"""
Tests if the "change password" link in the admin is hidden if the User
does not have a usable password set.
(against 9bea85795705d015cdadc82c68b99196a8554f5c)
"""
user = User.objects.get(username='super')
user.set_unusable_password()
user.save()
response = self.client.get('/test_admin/admin/')
self.assertNotContains(response, reverse('admin:password_change'),
msg_prefix='The "change password" link should not be displayed if a user does not have a usable password.')
def test_change_view_with_show_delete_extra_context(self):
"""
Ensured that the 'show_delete' context variable in the admin's change
view actually controls the display of the delete button.
Refs #10057.
"""
instance = UndeletableObject.objects.create(name='foo')
response = self.client.get('/test_admin/%s/admin_views/undeletableobject/%d/' %
(self.urlbit, instance.pk))
self.assertNotContains(response, 'deletelink')
def test_allows_attributeerror_to_bubble_up(self):
"""
Ensure that AttributeErrors are allowed to bubble when raised inside
a change list view.
Requires a model to be created so there's something to be displayed
Refs: #16655, #18593, and #18747
"""
Simple.objects.create()
with self.assertRaises(AttributeError):
self.client.get('/test_admin/%s/admin_views/simple/' % self.urlbit)
def test_changelist_with_no_change_url(self):
"""
ModelAdmin.changelist_view shouldn't result in a NoReverseMatch if url
for change_view is removed from get_urls
Regression test for #20934
"""
UnchangeableObject.objects.create()
response = self.client.get('/test_admin/admin/admin_views/unchangeableobject/')
self.assertEqual(response.status_code, 200)
# Check the format of the shown object -- shouldn't contain a change link
self.assertContains(response, '<th class="field-__str__">UnchangeableObject object</th>', html=True)
def test_invalid_appindex_url(self):
"""
#21056 -- URL reversing shouldn't work for nonexistent apps.
"""
good_url = '/test_admin/admin/admin_views/'
confirm_good_url = reverse('admin:app_list',
kwargs={'app_label': 'admin_views'})
self.assertEqual(good_url, confirm_good_url)
with self.assertRaises(NoReverseMatch):
reverse('admin:app_list', kwargs={'app_label': 'this_should_fail'})
with self.assertRaises(NoReverseMatch):
reverse('admin:app_list', args=('admin_views2',))
def test_proxy_model_content_type_is_used_for_log_entries(self):
"""
Log entries for proxy models should have the proxy model's content
type.
Regression test for #21084.
"""
color2_content_type = ContentType.objects.get_for_model(Color2, for_concrete_model=False)
# add
color2_add_url = reverse('admin:admin_views_color2_add')
self.client.post(color2_add_url, {'value': 'orange'})
color2_addition_log = LogEntry.objects.all()[0]
self.assertEqual(color2_content_type, color2_addition_log.content_type)
# change
color_id = color2_addition_log.object_id
color2_change_url = reverse('admin:admin_views_color2_change', args=(color_id,))
self.client.post(color2_change_url, {'value': 'blue'})
color2_change_log = LogEntry.objects.all()[0]
self.assertEqual(color2_content_type, color2_change_log.content_type)
# delete
color2_delete_url = reverse('admin:admin_views_color2_delete', args=(color_id,))
self.client.post(color2_delete_url)
color2_delete_log = LogEntry.objects.all()[0]
self.assertEqual(color2_content_type, color2_delete_log.content_type)
def test_adminsite_display_site_url(self):
"""
#13749 - Admin should display link to front-end site 'View site'
"""
url = reverse('admin:index')
response = self.client.get(url)
self.assertEqual(response.context['site_url'], '/my-site-url/')
self.assertContains(response, '<a href="/my-site-url/">View site</a>')
@override_settings(TEMPLATE_DIRS=ADMIN_VIEW_TEMPLATES_DIR)
class AdminCustomTemplateTests(AdminViewBasicTestCase):
def test_extended_bodyclass_template_change_form(self):
"""
Ensure that the admin/change_form.html template uses block.super in the
bodyclass block.
"""
response = self.client.get('/test_admin/%s/admin_views/section/add/' % self.urlbit)
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_template_change_password(self):
"""
Ensure that the auth/user/change_password.html template uses block
super in the bodyclass block.
"""
user = User.objects.get(username='super')
response = self.client.get('/test_admin/%s/auth/user/%s/password/' % (self.urlbit, user.id))
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_template_index(self):
"""
Ensure that the admin/index.html template uses block.super in the
bodyclass block.
"""
response = self.client.get('/test_admin/%s/' % self.urlbit)
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_change_list(self):
"""
Ensure that the admin/change_list.html' template uses block.super
in the bodyclass block.
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit)
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_template_login(self):
"""
Ensure that the admin/login.html template uses block.super in the
bodyclass block.
"""
self.client.logout()
response = self.client.get('/test_admin/%s/login/' % self.urlbit)
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_template_delete_confirmation(self):
"""
Ensure that the admin/delete_confirmation.html template uses
block.super in the bodyclass block.
"""
group = Group.objects.create(name="foogroup")
response = self.client.get('/test_admin/%s/auth/group/%s/delete/' % (self.urlbit, group.id))
self.assertContains(response, 'bodyclass_consistency_check ')
def test_extended_bodyclass_template_delete_selected_confirmation(self):
"""
Ensure that the admin/delete_selected_confirmation.html template uses
block.super in bodyclass block.
"""
group = Group.objects.create(name="foogroup")
post_data = {
'action': 'delete_selected',
'selected_across': '0',
'index': '0',
'_selected_action': group.id
}
response = self.client.post('/test_admin/%s/auth/group/' % (self.urlbit), post_data)
self.assertEqual(response.context['site_header'], 'Django administration')
self.assertContains(response, 'bodyclass_consistency_check ')
def test_filter_with_custom_template(self):
"""
Ensure that one can use a custom template to render an admin filter.
Refs #17515.
"""
response = self.client.get("/test_admin/admin/admin_views/color2/")
self.assertTemplateUsed(response, 'custom_filter_template.html')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminViewFormUrlTest(TestCase):
fixtures = ["admin-views-users.xml"]
urlbit = "admin3"
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_change_form_URL_has_correct_value(self):
"""
Tests whether change_view has form_url in response.context
"""
response = self.client.get('/test_admin/%s/admin_views/section/1/' % self.urlbit)
self.assertIn('form_url', response.context, msg='form_url not present in response.context')
self.assertEqual(response.context['form_url'], 'pony')
def test_initial_data_can_be_overridden(self):
"""
Tests that the behavior for setting initial
form data can be overridden in the ModelAdmin class.
Usually, the initial value is set via the GET params.
"""
response = self.client.get('/test_admin/%s/admin_views/restaurant/add/' % self.urlbit, {'name': 'test_value'})
# this would be the usual behaviour
self.assertNotContains(response, 'value="test_value"')
# this is the overridden behaviour
self.assertContains(response, 'value="overridden_value"')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminJavaScriptTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_js_minified_only_if_debug_is_false(self):
"""
Ensure that the minified versions of the JS files are only used when
DEBUG is False.
Refs #17521.
"""
with override_settings(DEBUG=False):
response = self.client.get(
'/test_admin/%s/admin_views/section/add/' % 'admin')
self.assertNotContains(response, 'jquery.js')
self.assertContains(response, 'jquery.min.js')
self.assertNotContains(response, 'prepopulate.js')
self.assertContains(response, 'prepopulate.min.js')
self.assertNotContains(response, 'actions.js')
self.assertContains(response, 'actions.min.js')
self.assertNotContains(response, 'collapse.js')
self.assertContains(response, 'collapse.min.js')
self.assertNotContains(response, 'inlines.js')
self.assertContains(response, 'inlines.min.js')
with override_settings(DEBUG=True):
response = self.client.get(
'/test_admin/%s/admin_views/section/add/' % 'admin')
self.assertContains(response, 'jquery.js')
self.assertNotContains(response, 'jquery.min.js')
self.assertContains(response, 'prepopulate.js')
self.assertNotContains(response, 'prepopulate.min.js')
self.assertContains(response, 'actions.js')
self.assertNotContains(response, 'actions.min.js')
self.assertContains(response, 'collapse.js')
self.assertNotContains(response, 'collapse.min.js')
self.assertContains(response, 'inlines.js')
self.assertNotContains(response, 'inlines.min.js')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class SaveAsTests(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-person.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_save_as_duplication(self):
"""Ensure save as actually creates a new person"""
post_data = {'_saveasnew': '', 'name': 'John M', 'gender': 1, 'age': 42}
self.client.post('/test_admin/admin/admin_views/person/1/', post_data)
self.assertEqual(len(Person.objects.filter(name='John M')), 1)
self.assertEqual(len(Person.objects.filter(id=1)), 1)
def test_save_as_display(self):
"""
Ensure that 'save as' is displayed when activated and after submitting
invalid data aside save_as_new will not show us a form to overwrite the
initial model.
"""
response = self.client.get('/test_admin/admin/admin_views/person/1/')
self.assertTrue(response.context['save_as'])
post_data = {'_saveasnew': '', 'name': 'John M', 'gender': 3, 'alive': 'checked'}
response = self.client.post('/test_admin/admin/admin_views/person/1/', post_data)
self.assertEqual(response.context['form_url'], '/test_admin/admin/admin_views/person/add/')
@override_settings(ROOT_URLCONF="admin_views.urls")
class CustomModelAdminTest(AdminViewBasicTestCase):
urlbit = "admin2"
def test_custom_admin_site_login_form(self):
self.client.logout()
response = self.client.get('/test_admin/admin2/', follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
login = self.client.post('/test_admin/admin2/login/', {
REDIRECT_FIELD_NAME: '/test_admin/admin2/',
'username': 'customform',
'password': 'secret',
}, follow=True)
self.assertIsInstance(login, TemplateResponse)
self.assertEqual(login.status_code, 200)
self.assertContains(login, 'custom form error')
def test_custom_admin_site_login_template(self):
self.client.logout()
response = self.client.get('/test_admin/admin2/', follow=True)
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/login.html')
self.assertContains(response, 'Hello from a custom login template')
def test_custom_admin_site_logout_template(self):
response = self.client.get('/test_admin/admin2/logout/')
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/logout.html')
self.assertContains(response, 'Hello from a custom logout template')
def test_custom_admin_site_index_view_and_template(self):
try:
response = self.client.get('/test_admin/admin2/')
except TypeError:
self.fail('AdminSite.index_template should accept a list of template paths')
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/index.html')
self.assertContains(response, 'Hello from a custom index template *bar*')
def test_custom_admin_site_app_index_view_and_template(self):
response = self.client.get('/test_admin/admin2/admin_views/')
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/app_index.html')
self.assertContains(response, 'Hello from a custom app_index template')
def test_custom_admin_site_password_change_template(self):
response = self.client.get('/test_admin/admin2/password_change/')
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/password_change_form.html')
self.assertContains(response, 'Hello from a custom password change form template')
def test_custom_admin_site_password_change_with_extra_context(self):
response = self.client.get('/test_admin/admin2/password_change/')
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/password_change_form.html')
self.assertContains(response, 'eggs')
def test_custom_admin_site_password_change_done_template(self):
response = self.client.get('/test_admin/admin2/password_change/done/')
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/password_change_done.html')
self.assertContains(response, 'Hello from a custom password change done template')
def test_custom_admin_site_view(self):
self.client.login(username='super', password='secret')
response = self.client.get('/test_admin/%s/my_view/' % self.urlbit)
self.assertEqual(response.content, b"Django is a magical pony!")
def test_pwd_change_custom_template(self):
self.client.login(username='super', password='secret')
su = User.objects.get(username='super')
try:
response = self.client.get('/test_admin/admin4/auth/user/%s/password/' % su.pk)
except TypeError:
self.fail('ModelAdmin.change_user_password_template should accept a list of template paths')
self.assertEqual(response.status_code, 200)
def get_perm(Model, perm):
"""Return the permission object, for the Model"""
ct = ContentType.objects.get_for_model(Model)
return Permission.objects.get(content_type=ct, codename=perm)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminViewPermissionsTest(TestCase):
"""Tests for Admin Views Permissions."""
fixtures = ['admin-views-users.xml']
def setUp(self):
"""Test setup."""
# Setup permissions, for our users who can add, change, and delete.
# We can't put this into the fixture, because the content type id
# and the permission id could be different on each run of the test.
opts = Article._meta
# User who can add Articles
add_user = User.objects.get(username='adduser')
add_user.user_permissions.add(get_perm(Article,
get_permission_codename('add', opts)))
# User who can change Articles
change_user = User.objects.get(username='changeuser')
change_user.user_permissions.add(get_perm(Article,
get_permission_codename('change', opts)))
# User who can delete Articles
delete_user = User.objects.get(username='deleteuser')
delete_user.user_permissions.add(get_perm(Article,
get_permission_codename('delete', opts)))
delete_user.user_permissions.add(get_perm(Section,
get_permission_codename('delete', Section._meta)))
# login POST dicts
self.super_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
'username': 'super',
'password': 'secret',
}
self.super_email_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
'username': 'super@example.com',
'password': 'secret',
}
self.super_email_bad_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
'username': 'super@example.com',
'password': 'notsecret',
}
self.adduser_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
'username': 'adduser',
'password': 'secret',
}
self.changeuser_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
'username': 'changeuser',
'password': 'secret',
}
self.deleteuser_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
'username': 'deleteuser',
'password': 'secret',
}
self.joepublic_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
'username': 'joepublic',
'password': 'secret',
}
self.no_username_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
'password': 'secret',
}
def test_login(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the original url.
Unsuccessful attempts will continue to render the login page with
a 200 status code.
"""
login_url = reverse('admin:login') + '?next=/test_admin/admin/'
# Super User
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Test if user enters email address
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# only correct passwords get a username hint
login = self.client.post(login_url, self.super_email_bad_login)
self.assertContains(login, ERROR_MESSAGE)
new_user = User(username='jondoe', password='secret', email='super@example.com')
new_user.save()
# check to ensure if there are multiple email addresses a user doesn't get a 500
login = self.client.post(login_url, self.super_email_login)
self.assertContains(login, ERROR_MESSAGE)
# Add User
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.adduser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Change User
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.changeuser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Delete User
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.deleteuser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Regular User should not be able to login.
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.joepublic_login)
self.assertEqual(login.status_code, 200)
self.assertContains(login, ERROR_MESSAGE)
# Requests without username should not return 500 errors.
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
login = self.client.post(login_url, self.no_username_login)
self.assertEqual(login.status_code, 200)
form = login.context[0].get('form')
self.assertEqual(form.errors['username'][0], 'This field is required.')
def test_login_successfully_redirects_to_original_URL(self):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
query_string = 'the-answer=42'
redirect_url = '/test_admin/admin/?%s' % query_string
new_next = {REDIRECT_FIELD_NAME: redirect_url}
post_data = self.super_login.copy()
post_data.pop(REDIRECT_FIELD_NAME)
login = self.client.post(
'%s?%s' % (reverse('admin:login'), urlencode(new_next)),
post_data)
self.assertRedirects(login, redirect_url)
def test_double_login_is_not_allowed(self):
"""Regression test for #19327"""
login_url = reverse('admin:login') + '?next=/test_admin/admin/'
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
# Logging in with non-admin user fails
login = self.client.post(login_url, self.joepublic_login)
self.assertEqual(login.status_code, 200)
self.assertContains(login, ERROR_MESSAGE)
# Establish a valid admin session
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
# Logging in with admin user while already logged in
login = self.client.post(login_url, self.super_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
def test_add_view(self):
"""Test add view restricts access and actually adds items."""
login_url = reverse('admin:login') + '?next=/test_admin/admin/'
add_dict = {'title': 'Døm ikke',
'content': '<p>great article</p>',
'date_0': '2008-03-18', 'date_1': '10:54:39',
'section': 1}
# Change User should not have access to add articles
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.changeuser_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
response = self.client.get('/test_admin/admin/admin_views/article/add/')
self.assertEqual(response.status_code, 403)
# Try POST just to make sure
post = self.client.post('/test_admin/admin/admin_views/article/add/', add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.all().count(), 3)
self.client.get('/test_admin/admin/logout/')
# Add user may login and POST to add view, then redirect to admin root
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.adduser_login)
addpage = self.client.get('/test_admin/admin/admin_views/article/add/')
change_list_link = '› <a href="/test_admin/admin/admin_views/article/">Articles</a>'
self.assertNotContains(addpage, change_list_link,
msg_prefix='User restricted to add permission is given link to change list view in breadcrumbs.')
post = self.client.post('/test_admin/admin/admin_views/article/add/', add_dict)
self.assertRedirects(post, '/test_admin/admin/')
self.assertEqual(Article.objects.all().count(), 4)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a created object')
self.client.get('/test_admin/admin/logout/')
# Super can add too, but is redirected to the change list view
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.super_login)
addpage = self.client.get('/test_admin/admin/admin_views/article/add/')
self.assertContains(addpage, change_list_link,
msg_prefix='Unrestricted user is not given link to change list view in breadcrumbs.')
post = self.client.post('/test_admin/admin/admin_views/article/add/', add_dict)
self.assertRedirects(post, '/test_admin/admin/admin_views/article/')
self.assertEqual(Article.objects.all().count(), 5)
self.client.get('/test_admin/admin/logout/')
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
self.client.login(username='joepublic', password='secret')
# Check and make sure that if user expires, data still persists
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.super_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
def test_change_view(self):
"""Change view should restrict access and allow users to edit items."""
login_url = reverse('admin:login') + '?next=/test_admin/admin/'
change_dict = {'title': 'Ikke fordømt',
'content': '<p>edited article</p>',
'date_0': '2008-03-18', 'date_1': '10:54:39',
'section': 1}
# add user should not be able to view the list of article or change any of them
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.adduser_login)
response = self.client.get('/test_admin/admin/admin_views/article/')
self.assertEqual(response.status_code, 403)
response = self.client.get('/test_admin/admin/admin_views/article/1/')
self.assertEqual(response.status_code, 403)
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertEqual(post.status_code, 403)
self.client.get('/test_admin/admin/logout/')
# change user can view all items and edit them
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.changeuser_login)
response = self.client.get('/test_admin/admin/admin_views/article/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/test_admin/admin/admin_views/article/1/')
self.assertEqual(response.status_code, 200)
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertRedirects(post, '/test_admin/admin/admin_views/article/')
self.assertEqual(Article.objects.get(pk=1).content, '<p>edited article</p>')
# one error in form should produce singular error message, multiple errors plural
change_dict['title'] = ''
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertContains(post, 'Please correct the error below.',
msg_prefix='Singular error message not found in response to post with one error')
change_dict['content'] = ''
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertContains(post, 'Please correct the errors below.',
msg_prefix='Plural error message not found in response to post with multiple errors')
self.client.get('/test_admin/admin/logout/')
# Test redirection when using row-level change permissions. Refs #11513.
RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
RowLevelChangePermissionModel.objects.create(id=2, name="even id")
for login_dict in [self.super_login, self.changeuser_login, self.adduser_login, self.deleteuser_login]:
self.client.post(login_url, login_dict)
response = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/1/')
self.assertEqual(response.status_code, 403)
response = self.client.post('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/1/', {'name': 'changed'})
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=1).name, 'odd id')
self.assertEqual(response.status_code, 403)
response = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/2/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/2/', {'name': 'changed'})
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=2).name, 'changed')
self.assertRedirects(response, '/test_admin/admin/')
self.client.get('/test_admin/admin/logout/')
for login_dict in [self.joepublic_login, self.no_username_login]:
self.client.post(login_url, login_dict)
response = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/1/', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
response = self.client.post('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/1/', {'name': 'changed'}, follow=True)
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=1).name, 'odd id')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
response = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/2/', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
response = self.client.post('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/2/', {'name': 'changed again'}, follow=True)
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=2).name, 'changed')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
self.client.get('/test_admin/admin/logout/')
def test_history_view(self):
"""History view should restrict access."""
login_url = reverse('admin:login') + '?next=/test_admin/admin/'
# add user should not be able to view the list of article or change any of them
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.adduser_login)
response = self.client.get('/test_admin/admin/admin_views/article/1/history/')
self.assertEqual(response.status_code, 403)
self.client.get('/test_admin/admin/logout/')
# change user can view all items and edit them
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.changeuser_login)
response = self.client.get('/test_admin/admin/admin_views/article/1/history/')
self.assertEqual(response.status_code, 200)
# Test redirection when using row-level change permissions. Refs #11513.
RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
RowLevelChangePermissionModel.objects.create(id=2, name="even id")
for login_dict in [self.super_login, self.changeuser_login, self.adduser_login, self.deleteuser_login]:
self.client.post(login_url, login_dict)
response = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/1/history/')
self.assertEqual(response.status_code, 403)
response = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/2/history/')
self.assertEqual(response.status_code, 200)
self.client.get('/test_admin/admin/logout/')
for login_dict in [self.joepublic_login, self.no_username_login]:
self.client.post(login_url, login_dict)
response = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/1/history/', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
response = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/2/history/', follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'login-form')
self.client.get('/test_admin/admin/logout/')
def test_conditionally_show_add_section_link(self):
"""
The foreign key widget should only show the "add related" button if the
user has permission to add that related item.
"""
login_url = reverse('admin:login') + '?next=/test_admin/admin/'
# Set up and log in user.
url = '/test_admin/admin/admin_views/article/add/'
add_link_text = ' class="add-another"'
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.adduser_login)
# The add user can't add sections yet, so they shouldn't see the "add
# section" link.
response = self.client.get(url)
self.assertNotContains(response, add_link_text)
# Allow the add user to add sections too. Now they can see the "add
# section" link.
add_user = User.objects.get(username='adduser')
perm = get_perm(Section, get_permission_codename('add', Section._meta))
add_user.user_permissions.add(perm)
response = self.client.get(url)
self.assertContains(response, add_link_text)
def test_custom_model_admin_templates(self):
login_url = reverse('admin:login') + '?next=/test_admin/admin/'
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.super_login)
# Test custom change list template with custom extra context
response = self.client.get('/test_admin/admin/admin_views/customarticle/')
self.assertContains(response, "var hello = 'Hello!';")
self.assertTemplateUsed(response, 'custom_admin/change_list.html')
# Test custom add form template
response = self.client.get('/test_admin/admin/admin_views/customarticle/add/')
self.assertTemplateUsed(response, 'custom_admin/add_form.html')
# Add an article so we can test delete, change, and history views
post = self.client.post('/test_admin/admin/admin_views/customarticle/add/', {
'content': '<p>great article</p>',
'date_0': '2008-03-18',
'date_1': '10:54:39'
})
self.assertRedirects(post, '/test_admin/admin/admin_views/customarticle/')
self.assertEqual(CustomArticle.objects.all().count(), 1)
article_pk = CustomArticle.objects.all()[0].pk
# Test custom delete, change, and object history templates
# Test custom change form template
response = self.client.get('/test_admin/admin/admin_views/customarticle/%d/' % article_pk)
self.assertTemplateUsed(response, 'custom_admin/change_form.html')
response = self.client.get('/test_admin/admin/admin_views/customarticle/%d/delete/' % article_pk)
self.assertTemplateUsed(response, 'custom_admin/delete_confirmation.html')
response = self.client.post('/test_admin/admin/admin_views/customarticle/', data={
'index': 0,
'action': ['delete_selected'],
'_selected_action': ['1'],
})
self.assertTemplateUsed(response, 'custom_admin/delete_selected_confirmation.html')
response = self.client.get('/test_admin/admin/admin_views/customarticle/%d/history/' % article_pk)
self.assertTemplateUsed(response, 'custom_admin/object_history.html')
self.client.get('/test_admin/admin/logout/')
def test_delete_view(self):
"""Delete view should restrict access and actually delete items."""
login_url = reverse('admin:login') + '?next=/test_admin/admin/'
delete_dict = {'post': 'yes'}
# add user should not be able to delete articles
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.adduser_login)
response = self.client.get('/test_admin/admin/admin_views/article/1/delete/')
self.assertEqual(response.status_code, 403)
post = self.client.post('/test_admin/admin/admin_views/article/1/delete/', delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.all().count(), 3)
self.client.get('/test_admin/admin/logout/')
# Delete user can delete
self.client.get('/test_admin/admin/')
self.client.post(login_url, self.deleteuser_login)
response = self.client.get('/test_admin/admin/admin_views/section/1/delete/')
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 3</li>")
# test response contains link to related Article
self.assertContains(response, "admin_views/article/1/")
response = self.client.get('/test_admin/admin/admin_views/article/1/delete/')
self.assertContains(response, "admin_views/article/1/")
self.assertContains(response, "<h2>Summary</h2>")
self.assertContains(response, "<li>Articles: 1</li>")
self.assertEqual(response.status_code, 200)
post = self.client.post('/test_admin/admin/admin_views/article/1/delete/', delete_dict)
self.assertRedirects(post, '/test_admin/admin/')
self.assertEqual(Article.objects.all().count(), 2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a deleted object')
article_ct = ContentType.objects.get_for_model(Article)
logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION)
self.assertEqual(logged.object_id, '1')
self.client.get('/test_admin/admin/logout/')
def test_disabled_permissions_when_logged_in(self):
self.client.login(username='super', password='secret')
superuser = User.objects.get(username='super')
superuser.is_active = False
superuser.save()
response = self.client.get('/test_admin/admin/', follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, 'Log out')
response = self.client.get('/test_admin/admin/secure-view/', follow=True)
self.assertContains(response, 'id="login-form"')
def test_disabled_staff_permissions_when_logged_in(self):
self.client.login(username='super', password='secret')
superuser = User.objects.get(username='super')
superuser.is_staff = False
superuser.save()
response = self.client.get('/test_admin/admin/', follow=True)
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, 'Log out')
response = self.client.get('/test_admin/admin/secure-view/', follow=True)
self.assertContains(response, 'id="login-form"')
def test_app_index_fail_early(self):
"""
If a user has no module perms, avoid iterating over all the modeladmins
in the registry.
"""
login_url = reverse('admin:login') + '?next=/test_admin/admin/'
opts = Article._meta
change_user = User.objects.get(username='changeuser')
permission = get_perm(Article, get_permission_codename('change', opts))
self.client.post(login_url, self.changeuser_login)
# the user has no module permissions, because this module doesn't exist
change_user.user_permissions.remove(permission)
response = self.client.get('/test_admin/admin/admin_views/')
self.assertEqual(response.status_code, 403)
# the user now has module permissions
change_user.user_permissions.add(permission)
response = self.client.get('/test_admin/admin/admin_views/')
self.assertEqual(response.status_code, 200)
def test_shortcut_view_only_available_to_staff(self):
"""
Only admin users should be able to use the admin shortcut view.
"""
model_ctype = ContentType.objects.get_for_model(ModelWithStringPrimaryKey)
obj = ModelWithStringPrimaryKey.objects.create(string_pk='foo')
shortcut_url = "/test_admin/admin/r/%s/%s/" % (model_ctype.pk, obj.pk)
# Not logged in: we should see the login page.
response = self.client.get(shortcut_url, follow=True)
self.assertTemplateUsed(response, 'admin/login.html')
# Logged in? Redirect.
self.client.login(username='super', password='secret')
response = self.client.get(shortcut_url, follow=False)
# Can't use self.assertRedirects() because User.get_absolute_url() is silly.
self.assertEqual(response.status_code, 302)
# Domain may depend on contrib.sites tests also run
six.assertRegex(self, response.url, 'http://(testserver|example.com)/dummy/foo/')
def test_has_module_permission(self):
"""
Ensure that has_module_permission() returns True for all users who
have any permission for that module (add, change, or delete), so that
the module is displayed on the admin index page.
"""
login_url = reverse('admin:login') + '?next=/test_admin/admin/'
self.client.post(login_url, self.super_login)
response = self.client.get('/test_admin/admin/')
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.get('/test_admin/admin/logout/')
self.client.post(login_url, self.adduser_login)
response = self.client.get('/test_admin/admin/')
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.get('/test_admin/admin/logout/')
self.client.post(login_url, self.changeuser_login)
response = self.client.get('/test_admin/admin/')
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.get('/test_admin/admin/logout/')
self.client.post(login_url, self.deleteuser_login)
response = self.client.get('/test_admin/admin/')
self.assertContains(response, 'admin_views')
self.assertContains(response, 'Articles')
self.client.get('/test_admin/admin/logout/')
def test_overriding_has_module_permission(self):
"""
Ensure that overriding has_module_permission() has the desired effect.
In this case, it always returns False, so the module should not be
displayed on the admin index page for any users.
"""
login_url = reverse('admin:login') + '?next=/test_admin/admin7/'
self.client.post(login_url, self.super_login)
response = self.client.get('/test_admin/admin7/')
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, 'Articles')
self.client.get('/test_admin/admin7/logout/')
self.client.post(login_url, self.adduser_login)
response = self.client.get('/test_admin/admin7/')
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, 'Articles')
self.client.get('/test_admin/admin7/logout/')
self.client.post(login_url, self.changeuser_login)
response = self.client.get('/test_admin/admin7/')
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, 'Articles')
self.client.get('/test_admin/admin7/logout/')
self.client.post(login_url, self.deleteuser_login)
response = self.client.get('/test_admin/admin7/')
self.assertNotContains(response, 'admin_views')
self.assertNotContains(response, 'Articles')
self.client.get('/test_admin/admin7/logout/')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminViewsNoUrlTest(TestCase):
"""Regression test for #17333"""
fixtures = ['admin-views-users.xml']
def setUp(self):
opts = Report._meta
# User who can change Reports
change_user = User.objects.get(username='changeuser')
change_user.user_permissions.add(get_perm(Report,
get_permission_codename('change', opts)))
# login POST dict
self.changeuser_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
'username': 'changeuser',
'password': 'secret',
}
def test_no_standard_modeladmin_urls(self):
"""Admin index views don't break when user's ModelAdmin removes standard urls"""
self.client.get('/test_admin/admin/')
r = self.client.post(reverse('admin:login'), self.changeuser_login)
r = self.client.get('/test_admin/admin/')
# we shouldn' get an 500 error caused by a NoReverseMatch
self.assertEqual(r.status_code, 200)
self.client.get('/test_admin/admin/logout/')
@skipUnlessDBFeature('can_defer_constraint_checks')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminViewDeletedObjectsTest(TestCase):
fixtures = ['admin-views-users.xml', 'deleted-objects.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_nesting(self):
"""
Objects should be nested to display the relationships that
cause them to be scheduled for deletion.
"""
pattern = re.compile(br"""<li>Plot: <a href=".+/admin_views/plot/1/">World Domination</a>\s*<ul>\s*<li>Plot details: <a href=".+/admin_views/plotdetails/1/">almost finished</a>""")
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(1))
six.assertRegex(self, response.content, pattern)
def test_cyclic(self):
"""
Cyclic relationships should still cause each object to only be
listed once.
"""
one = """<li>Cyclic one: <a href="/test_admin/admin/admin_views/cyclicone/1/">I am recursive</a>"""
two = """<li>Cyclic two: <a href="/test_admin/admin/admin_views/cyclictwo/1/">I am recursive too</a>"""
response = self.client.get('/test_admin/admin/admin_views/cyclicone/%s/delete/' % quote(1))
self.assertContains(response, one, 1)
self.assertContains(response, two, 1)
def test_perms_needed(self):
self.client.logout()
delete_user = User.objects.get(username='deleteuser')
delete_user.user_permissions.add(get_perm(Plot,
get_permission_codename('delete', Plot._meta)))
self.assertTrue(self.client.login(username='deleteuser',
password='secret'))
response = self.client.get('/test_admin/admin/admin_views/plot/%s/delete/' % quote(1))
self.assertContains(response, "your account doesn't have permission to delete the following types of objects")
self.assertContains(response, "<li>plot details</li>")
def test_protected(self):
q = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q, answer="Because.")
a2 = Answer.objects.create(question=q, answer="Yes.")
response = self.client.get("/test_admin/admin/admin_views/question/%s/delete/" % quote(q.pk))
self.assertContains(response, "would require deleting the following protected related objects")
self.assertContains(response, '<li>Answer: <a href="/test_admin/admin/admin_views/answer/%s/">Because.</a></li>' % a1.pk)
self.assertContains(response, '<li>Answer: <a href="/test_admin/admin/admin_views/answer/%s/">Yes.</a></li>' % a2.pk)
def test_not_registered(self):
should_contain = """<li>Secret hideout: underground bunker"""
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(1))
self.assertContains(response, should_contain, 1)
def test_multiple_fkeys_to_same_model(self):
"""
If a deleted object has two relationships from another model,
both of those should be followed in looking for related
objects to delete.
"""
should_contain = """<li>Plot: <a href="/test_admin/admin/admin_views/plot/1/">World Domination</a>"""
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(1))
self.assertContains(response, should_contain)
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(2))
self.assertContains(response, should_contain)
def test_multiple_fkeys_to_same_instance(self):
"""
If a deleted object has two relationships pointing to it from
another object, the other object should still only be listed
once.
"""
should_contain = """<li>Plot: <a href="/test_admin/admin/admin_views/plot/2/">World Peace</a></li>"""
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(2))
self.assertContains(response, should_contain, 1)
def test_inheritance(self):
"""
In the case of an inherited model, if either the child or
parent-model instance is deleted, both instances are listed
for deletion, as well as any relationships they have.
"""
should_contain = [
"""<li>Villain: <a href="/test_admin/admin/admin_views/villain/3/">Bob</a>""",
"""<li>Super villain: <a href="/test_admin/admin/admin_views/supervillain/3/">Bob</a>""",
"""<li>Secret hideout: floating castle""",
"""<li>Super secret hideout: super floating castle!"""
]
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(3))
for should in should_contain:
self.assertContains(response, should, 1)
response = self.client.get('/test_admin/admin/admin_views/supervillain/%s/delete/' % quote(3))
for should in should_contain:
self.assertContains(response, should, 1)
def test_generic_relations(self):
"""
If a deleted object has GenericForeignKeys pointing to it,
those objects should be listed for deletion.
"""
plot = Plot.objects.get(pk=3)
FunkyTag.objects.create(content_object=plot, name='hott')
should_contain = """<li>Funky tag: <a href="/test_admin/admin/admin_views/funkytag/1/">hott"""
response = self.client.get('/test_admin/admin/admin_views/plot/%s/delete/' % quote(3))
self.assertContains(response, should_contain)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class TestGenericRelations(TestCase):
fixtures = ['admin-views-users.xml', 'deleted-objects.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def test_generic_content_object_in_list_display(self):
plot = Plot.objects.get(pk=3)
FunkyTag.objects.create(content_object=plot, name='hott')
response = self.client.get('/test_admin/admin/admin_views/funkytag/')
self.assertContains(response, "%s</td>" % plot)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminViewStringPrimaryKeyTest(TestCase):
fixtures = ['admin-views-users.xml', 'string-primary-key.xml']
def __init__(self, *args):
super(AdminViewStringPrimaryKeyTest, self).__init__(*args)
self.pk = """abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 -_.!~*'() ;/?:@&=+$, <>#%" {}|\^[]`"""
def setUp(self):
self.client.login(username='super', password='secret')
content_type_pk = ContentType.objects.get_for_model(ModelWithStringPrimaryKey).pk
LogEntry.objects.log_action(100, content_type_pk, self.pk, self.pk, 2, change_message='Changed something')
def tearDown(self):
self.client.logout()
def test_get_history_view(self):
"""
Retrieving the history for an object using urlencoded form of primary
key should work.
Refs #12349, #18550.
"""
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/history/' % quote(self.pk))
self.assertContains(response, escape(self.pk))
self.assertContains(response, 'Changed something')
self.assertEqual(response.status_code, 200)
def test_get_change_view(self):
"Retrieving the object using urlencoded form of primary key should work"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(self.pk))
self.assertContains(response, escape(self.pk))
self.assertEqual(response.status_code, 200)
def test_changelist_to_changeform_link(self):
"Link to the changeform of the object in changelist should use reverse() and be quoted -- #18072"
prefix = '/test_admin/admin/admin_views/modelwithstringprimarykey/'
response = self.client.get(prefix)
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
pk_final_url = escape(iri_to_uri(quote(self.pk)))
should_contain = """<th class="field-__str__"><a href="%s%s/">%s</a></th>""" % (prefix, pk_final_url, escape(self.pk))
self.assertContains(response, should_contain)
def test_recentactions_link(self):
"The link from the recent actions list referring to the changeform of the object should be quoted"
response = self.client.get('/test_admin/admin/')
link = reverse('admin:admin_views_modelwithstringprimarykey_change', args=(quote(self.pk),))
should_contain = """<a href="%s">%s</a>""" % (escape(link), escape(self.pk))
self.assertContains(response, should_contain)
def test_recentactions_without_content_type(self):
"If a LogEntry is missing content_type it will not display it in span tag under the hyperlink."
response = self.client.get('/test_admin/admin/')
link = reverse('admin:admin_views_modelwithstringprimarykey_change', args=(quote(self.pk),))
should_contain = """<a href="%s">%s</a>""" % (escape(link), escape(self.pk))
self.assertContains(response, should_contain)
should_contain = "Model with string primary key" # capitalized in Recent Actions
self.assertContains(response, should_contain)
logentry = LogEntry.objects.get(content_type__name__iexact=should_contain)
# http://code.djangoproject.com/ticket/10275
# if the log entry doesn't have a content type it should still be
# possible to view the Recent Actions part
logentry.content_type = None
logentry.save()
counted_presence_before = response.content.count(force_bytes(should_contain))
response = self.client.get('/test_admin/admin/')
counted_presence_after = response.content.count(force_bytes(should_contain))
self.assertEqual(counted_presence_before - 1,
counted_presence_after)
def test_logentry_get_admin_url(self):
"LogEntry.get_admin_url returns a URL to edit the entry's object or None for non-existent (possibly deleted) models"
log_entry_name = "Model with string primary key" # capitalized in Recent Actions
logentry = LogEntry.objects.get(content_type__name__iexact=log_entry_name)
model = "modelwithstringprimarykey"
desired_admin_url = "/test_admin/admin/admin_views/%s/%s/" % (model, iri_to_uri(quote(self.pk)))
self.assertEqual(logentry.get_admin_url(), desired_admin_url)
logentry.content_type.model = "non-existent"
self.assertEqual(logentry.get_admin_url(), None)
def test_deleteconfirmation_link(self):
"The link from the delete confirmation page referring back to the changeform of the object should be quoted"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/delete/' % quote(self.pk))
# this URL now comes through reverse(), thus url quoting and iri_to_uri encoding
should_contain = """/%s/">%s</a>""" % (escape(iri_to_uri(quote(self.pk))), escape(self.pk))
self.assertContains(response, should_contain)
def test_url_conflicts_with_add(self):
"A model with a primary key that ends with add should be visible"
add_model = ModelWithStringPrimaryKey(pk="i have something to add")
add_model.save()
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(add_model.pk))
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_delete(self):
"A model with a primary key that ends with delete should be visible"
delete_model = ModelWithStringPrimaryKey(pk="delete")
delete_model.save()
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(delete_model.pk))
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_history(self):
"A model with a primary key that ends with history should be visible"
history_model = ModelWithStringPrimaryKey(pk="history")
history_model.save()
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(history_model.pk))
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_shortcut_view_with_escaping(self):
"'View on site should' work properly with char fields"
model = ModelWithStringPrimaryKey(pk='abc_123')
model.save()
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(model.pk))
should_contain = '/%s/" class="viewsitelink">' % model.pk
self.assertContains(response, should_contain)
def test_change_view_history_link(self):
"""Object history button link should work and contain the pk value quoted."""
url = reverse('admin:%s_modelwithstringprimarykey_change' %
ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
expected_link = reverse('admin:%s_modelwithstringprimarykey_history' %
ModelWithStringPrimaryKey._meta.app_label,
args=(quote(self.pk),))
self.assertContains(response, '<a href="%s" class="historylink"' % expected_link)
def test_redirect_on_add_view_continue_button(self):
"""As soon as an object is added using "Save and continue editing"
button, the user should be redirected to the object's change_view.
In case primary key is a string containing some special characters
like slash or underscore, these characters must be escaped (see #22266)
"""
response = self.client.post(
'/test_admin/admin/admin_views/modelwithstringprimarykey/add/',
{
'string_pk': '123/history',
"_continue": "1", # Save and continue editing
}
)
self.assertEqual(response.status_code, 302) # temporary redirect
self.assertEqual(
response['location'],
(
'http://testserver/test_admin/admin/admin_views/'
'modelwithstringprimarykey/123_2Fhistory/' # PK is quoted
)
)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class SecureViewTests(TestCase):
"""
Test behavior of a view protected by the staff_member_required decorator.
"""
fixtures = ['admin-views-users.xml']
def tearDown(self):
self.client.logout()
def test_secure_view_shows_login_if_not_logged_in(self):
"""
Ensure that we see the admin login form.
"""
secure_url = '/test_admin/admin/secure-view/'
response = self.client.get(secure_url)
self.assertRedirects(response, '%s?next=%s' % (reverse('admin:login'), secure_url))
response = self.client.get(secure_url, follow=True)
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(response.context[REDIRECT_FIELD_NAME], secure_url)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminViewUnicodeTest(TestCase):
fixtures = ['admin-views-unicode.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_unicode_edit(self):
"""
A test to ensure that POST on edit_view handles non-ASCII characters.
"""
post_data = {
"name": "Test lærdommer",
# inline data
"chapter_set-TOTAL_FORMS": "6",
"chapter_set-INITIAL_FORMS": "3",
"chapter_set-MAX_NUM_FORMS": "0",
"chapter_set-0-id": "1",
"chapter_set-0-title": "Norske bostaver æøå skaper problemer",
"chapter_set-0-content": "<p>Svært frustrerende med UnicodeDecodeError</p>",
"chapter_set-1-id": "2",
"chapter_set-1-title": "Kjærlighet.",
"chapter_set-1-content": "<p>La kjærligheten til de lidende seire.</p>",
"chapter_set-2-id": "3",
"chapter_set-2-title": "Need a title.",
"chapter_set-2-content": "<p>Newest content</p>",
"chapter_set-3-id": "",
"chapter_set-3-title": "",
"chapter_set-3-content": "",
"chapter_set-4-id": "",
"chapter_set-4-title": "",
"chapter_set-4-content": "",
"chapter_set-5-id": "",
"chapter_set-5-title": "",
"chapter_set-5-content": "",
}
response = self.client.post('/test_admin/admin/admin_views/book/1/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_unicode_delete(self):
"""
Ensure that the delete_view handles non-ASCII characters
"""
delete_dict = {'post': 'yes'}
response = self.client.get('/test_admin/admin/admin_views/book/1/delete/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/test_admin/admin/admin_views/book/1/delete/', delete_dict)
self.assertRedirects(response, '/test_admin/admin/admin_views/book/')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminViewListEditable(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-person.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_inheritance(self):
Podcast.objects.create(name="This Week in Django",
release_date=datetime.date.today())
response = self.client.get('/test_admin/admin/admin_views/podcast/')
self.assertEqual(response.status_code, 200)
def test_inheritance_2(self):
Vodcast.objects.create(name="This Week in Django", released=True)
response = self.client.get('/test_admin/admin/admin_views/vodcast/')
self.assertEqual(response.status_code, 200)
def test_custom_pk(self):
Language.objects.create(iso='en', name='English', english_name='English')
response = self.client.get('/test_admin/admin/admin_views/language/')
self.assertEqual(response.status_code, 200)
def test_changelist_input_html(self):
response = self.client.get('/test_admin/admin/admin_views/person/')
# 2 inputs per object(the field and the hidden id field) = 6
# 4 management hidden fields = 4
# 4 action inputs (3 regular checkboxes, 1 checkbox to select all)
# main form submit button = 1
# search field and search submit button = 2
# CSRF field = 1
# field to track 'select all' across paginated views = 1
# 6 + 4 + 4 + 1 + 2 + 1 + 1 = 19 inputs
self.assertContains(response, "<input", count=19)
# 1 select per object = 3 selects
self.assertContains(response, "<select", count=4)
def test_post_messages(self):
# Ticket 12707: Saving inline editable should not show admin
# action warnings
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/person/',
data, follow=True)
self.assertEqual(len(response.context['messages']), 1)
def test_post_submission(self):
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"_save": "Save",
}
self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
# test a filtered page
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "1",
"form-0-gender": "1",
"form-0-alive": "checked",
"form-1-id": "3",
"form-1-gender": "1",
"form-1-alive": "checked",
"_save": "Save",
}
self.client.post('/test_admin/admin/admin_views/person/?gender__exact=1', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, True)
# test a searched page
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "1",
"form-0-gender": "1",
"_save": "Save",
}
self.client.post('/test_admin/admin/admin_views/person/?q=john', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
def test_non_field_errors(self):
''' Ensure that non field errors are displayed for each of the
forms in the changelist's formset. Refs #13126.
'''
fd1 = FoodDelivery.objects.create(reference='123', driver='bill', restaurant='thai')
fd2 = FoodDelivery.objects.create(reference='456', driver='bill', restaurant='india')
fd3 = FoodDelivery.objects.create(reference='789', driver='bill', restaurant='pizza')
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "pizza",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/fooddelivery/', data)
self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery with this Driver and Restaurant already exists.</li></ul></td></tr>', 1, html=True)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
# Same data also.
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "thai",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/fooddelivery/', data)
self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist nonfield"><li>Food delivery with this Driver and Restaurant already exists.</li></ul></td></tr>', 2, html=True)
def test_non_form_errors(self):
# test if non-form errors are handled; ticket #12716
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "2",
"form-0-alive": "1",
"form-0-gender": "2",
# Ensure that the form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertContains(response, "Grace is not a Zombie")
def test_non_form_errors_is_errorlist(self):
# test if non-form errors are correctly handled; ticket #12878
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "2",
"form-0-alive": "1",
"form-0-gender": "2",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/person/', data)
non_form_errors = response.context['cl'].formset.non_form_errors()
self.assertIsInstance(non_form_errors, ErrorList)
self.assertEqual(str(non_form_errors), str(ErrorList(["Grace is not a Zombie"])))
def test_list_editable_ordering(self):
collector = Collector.objects.create(id=1, name="Frederick Clegg")
Category.objects.create(id=1, order=1, collector=collector)
Category.objects.create(id=2, order=2, collector=collector)
Category.objects.create(id=3, order=0, collector=collector)
Category.objects.create(id=4, order=0, collector=collector)
# NB: The order values must be changed so that the items are reordered.
data = {
"form-TOTAL_FORMS": "4",
"form-INITIAL_FORMS": "4",
"form-MAX_NUM_FORMS": "0",
"form-0-order": "14",
"form-0-id": "1",
"form-0-collector": "1",
"form-1-order": "13",
"form-1-id": "2",
"form-1-collector": "1",
"form-2-order": "1",
"form-2-id": "3",
"form-2-collector": "1",
"form-3-order": "0",
"form-3-id": "4",
"form-3-collector": "1",
# Ensure that the form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/category/', data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# Check that the order values have been applied to the right objects
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
def test_list_editable_pagination(self):
"""
Ensure that pagination works for list_editable items.
Refs #16819.
"""
UnorderedObject.objects.create(id=1, name='Unordered object #1')
UnorderedObject.objects.create(id=2, name='Unordered object #2')
UnorderedObject.objects.create(id=3, name='Unordered object #3')
response = self.client.get('/test_admin/admin/admin_views/unorderedobject/')
self.assertContains(response, 'Unordered object #3')
self.assertContains(response, 'Unordered object #2')
self.assertNotContains(response, 'Unordered object #1')
response = self.client.get('/test_admin/admin/admin_views/unorderedobject/?p=1')
self.assertNotContains(response, 'Unordered object #3')
self.assertNotContains(response, 'Unordered object #2')
self.assertContains(response, 'Unordered object #1')
def test_list_editable_action_submit(self):
# List editable changes should not be executed if the action "Go" button is
# used to submit the form.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"index": "0",
"_selected_action": ['3'],
"action": ['', 'delete_selected'],
}
self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, True)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 1)
def test_list_editable_action_choices(self):
# List editable changes should be executed if the "Save" button is
# used to submit the form - any action choices should be ignored.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"_save": "Save",
"_selected_action": ['1'],
"action": ['', 'delete_selected'],
}
self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
def test_list_editable_popup(self):
"""
Fields should not be list-editable in popups.
"""
response = self.client.get('/test_admin/admin/admin_views/person/')
self.assertNotEqual(response.context['cl'].list_editable, ())
response = self.client.get('/test_admin/admin/admin_views/person/?%s' % IS_POPUP_VAR)
self.assertEqual(response.context['cl'].list_editable, ())
def test_pk_hidden_fields(self):
""" Ensure that hidden pk fields aren't displayed in the table body and
that their corresponding human-readable value is displayed instead.
Note that the hidden pk fields are in fact be displayed but
separately (not in the table), and only once.
Refs #12475.
"""
story1 = Story.objects.create(title='The adventures of Guido', content='Once upon a time in Djangoland...')
story2 = Story.objects.create(title='Crouching Tiger, Hidden Python', content='The Python was sneaking into...')
response = self.client.get('/test_admin/admin/admin_views/story/')
self.assertContains(response, 'id="id_form-0-id"', 1) # Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(response, '<div class="hiddenfields">\n<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" />\n</div>' % (story2.id, story1.id), html=True)
self.assertContains(response, '<td class="field-id">%d</td>' % story1.id, 1)
self.assertContains(response, '<td class="field-id">%d</td>' % story2.id, 1)
def test_pk_hidden_fields_with_list_display_links(self):
""" Similarly as test_pk_hidden_fields, but when the hidden pk fields are
referenced in list_display_links.
Refs #12475.
"""
story1 = OtherStory.objects.create(title='The adventures of Guido', content='Once upon a time in Djangoland...')
story2 = OtherStory.objects.create(title='Crouching Tiger, Hidden Python', content='The Python was sneaking into...')
link1 = reverse('admin:admin_views_otherstory_change', args=(story1.pk,))
link2 = reverse('admin:admin_views_otherstory_change', args=(story2.pk,))
response = self.client.get('/test_admin/admin/admin_views/otherstory/')
self.assertContains(response, 'id="id_form-0-id"', 1) # Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(response, '<div class="hiddenfields">\n<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" />\n</div>' % (story2.id, story1.id), html=True)
self.assertContains(response, '<th class="field-id"><a href="%s">%d</a></th>' % (link1, story1.id), 1)
self.assertContains(response, '<th class="field-id"><a href="%s">%d</a></th>' % (link2, story2.id), 1)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminSearchTest(TestCase):
fixtures = ['admin-views-users', 'multiple-child-classes',
'admin-views-person']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_search_on_sibling_models(self):
"Check that a search that mentions sibling models"
response = self.client.get('/test_admin/admin/admin_views/recommendation/?q=bar')
# confirm the search returned 1 object
self.assertContains(response, "\n1 recommendation\n")
def test_with_fk_to_field(self):
"""Ensure that the to_field GET parameter is preserved when a search
is performed. Refs #10918.
"""
response = self.client.get('/test_admin/admin/auth/user/?q=joe&%s=id' % TO_FIELD_VAR)
self.assertContains(response, "\n1 user\n")
self.assertContains(response, '<input type="hidden" name="%s" value="id"/>' % TO_FIELD_VAR, html=True)
def test_exact_matches(self):
response = self.client.get('/test_admin/admin/admin_views/recommendation/?q=bar')
# confirm the search returned one object
self.assertContains(response, "\n1 recommendation\n")
response = self.client.get('/test_admin/admin/admin_views/recommendation/?q=ba')
# confirm the search returned zero objects
self.assertContains(response, "\n0 recommendations\n")
def test_beginning_matches(self):
response = self.client.get('/test_admin/admin/admin_views/person/?q=Gui')
# confirm the search returned one object
self.assertContains(response, "\n1 person\n")
self.assertContains(response, "Guido")
response = self.client.get('/test_admin/admin/admin_views/person/?q=uido')
# confirm the search returned zero objects
self.assertContains(response, "\n0 persons\n")
self.assertNotContains(response, "Guido")
def test_pluggable_search(self):
PluggableSearchPerson.objects.create(name="Bob", age=10)
PluggableSearchPerson.objects.create(name="Amy", age=20)
response = self.client.get('/test_admin/admin/admin_views/pluggablesearchperson/?q=Bob')
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Bob")
response = self.client.get('/test_admin/admin/admin_views/pluggablesearchperson/?q=20')
# confirm the search returned one object
self.assertContains(response, "\n1 pluggable search person\n")
self.assertContains(response, "Amy")
def test_reset_link(self):
"""
Test presence of reset link in search bar ("1 result (_x total_)").
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
# + 1 for total count
with self.assertNumQueries(5):
response = self.client.get('/test_admin/admin/admin_views/person/?q=Gui')
self.assertContains(response,
"""<span class="small quiet">1 result (<a href="?">3 total</a>)</span>""",
html=True)
def test_no_total_count(self):
"""
#8408 -- "Show all" should be displayed instead of the total count if
ModelAdmin.show_full_result_count is False.
"""
# 1 query for session + 1 for fetching user
# + 1 for filtered result + 1 for filtered count
with self.assertNumQueries(4):
response = self.client.get('/test_admin/admin/admin_views/recommendation/?q=bar')
self.assertContains(response,
"""<span class="small quiet">1 result (<a href="?">Show all</a>)</span>""",
html=True)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminInheritedInlinesTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_inline(self):
"Ensure that inline models which inherit from a common parent are correctly handled by admin."
foo_user = "foo username"
bar_user = "bar username"
name_re = re.compile(b'name="(.*?)"')
# test the add case
response = self.client.get('/test_admin/admin/admin_views/persona/add/')
names = name_re.findall(response.content)
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
# test the add case
post_data = {
"name": "Test Name",
# inline data
"accounts-TOTAL_FORMS": "1",
"accounts-INITIAL_FORMS": "0",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": foo_user,
"accounts-2-TOTAL_FORMS": "1",
"accounts-2-INITIAL_FORMS": "0",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": bar_user,
}
response = self.client.post('/test_admin/admin/admin_views/persona/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
persona_id = Persona.objects.all()[0].id
foo_id = FooAccount.objects.all()[0].id
bar_id = BarAccount.objects.all()[0].id
# test the edit case
response = self.client.get('/test_admin/admin/admin_views/persona/%d/' % persona_id)
names = name_re.findall(response.content)
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
post_data = {
"name": "Test Name",
"accounts-TOTAL_FORMS": "2",
"accounts-INITIAL_FORMS": "1",
"accounts-MAX_NUM_FORMS": "0",
"accounts-0-username": "%s-1" % foo_user,
"accounts-0-account_ptr": str(foo_id),
"accounts-0-persona": str(persona_id),
"accounts-2-TOTAL_FORMS": "2",
"accounts-2-INITIAL_FORMS": "1",
"accounts-2-MAX_NUM_FORMS": "0",
"accounts-2-0-username": "%s-1" % bar_user,
"accounts-2-0-account_ptr": str(bar_id),
"accounts-2-0-persona": str(persona_id),
}
response = self.client.post('/test_admin/admin/admin_views/persona/%d/' % persona_id, post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, "%s-1" % foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, "%s-1" % bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminActionsTest(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-actions.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_model_admin_custom_action(self):
"Tests a custom action defined in a ModelAdmin method"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'mail_admin',
'index': 0,
}
self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a ModelAdmin action')
def test_model_admin_default_delete_action(self):
"Tests the default delete action defined as a ModelAdmin method"
action_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action': 'delete_selected',
'index': 0,
}
delete_confirmation_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action': 'delete_selected',
'post': 'yes',
}
confirmation = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
self.assertIsInstance(confirmation, TemplateResponse)
self.assertContains(confirmation, "Are you sure you want to delete the selected subscribers?")
self.assertContains(confirmation, "<h2>Summary</h2>")
self.assertContains(confirmation, "<li>Subscribers: 3</li>")
self.assertContains(confirmation, "<li>External subscribers: 1</li>")
self.assertContains(confirmation, ACTION_CHECKBOX_NAME, count=2)
self.client.post('/test_admin/admin/admin_views/subscriber/', delete_confirmation_data)
self.assertEqual(Subscriber.objects.count(), 0)
@override_settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=True)
def test_non_localized_pk(self):
"""If USE_THOUSAND_SEPARATOR is set, make sure that the ids for
the objects selected for deletion are rendered without separators.
Refs #14895.
"""
subscriber = Subscriber.objects.get(id=1)
subscriber.id = 9999
subscriber.save()
action_data = {
ACTION_CHECKBOX_NAME: [9999, 2],
'action': 'delete_selected',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
self.assertTemplateUsed(response, 'admin/delete_selected_confirmation.html')
self.assertContains(response, 'value="9999"') # Instead of 9,999
self.assertContains(response, 'value="2"')
def test_model_admin_default_delete_action_protected(self):
"""
Tests the default delete action defined as a ModelAdmin method in the
case where some related objects are protected from deletion.
"""
q1 = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q1, answer="Because.")
a2 = Answer.objects.create(question=q1, answer="Yes.")
q2 = Question.objects.create(question="Wherefore?")
action_data = {
ACTION_CHECKBOX_NAME: [q1.pk, q2.pk],
'action': 'delete_selected',
'index': 0,
}
response = self.client.post("/test_admin/admin/admin_views/question/", action_data)
self.assertContains(response, "would require deleting the following protected related objects")
self.assertContains(response, '<li>Answer: <a href="/test_admin/admin/admin_views/answer/%s/">Because.</a></li>' % a1.pk, html=True)
self.assertContains(response, '<li>Answer: <a href="/test_admin/admin/admin_views/answer/%s/">Yes.</a></li>' % a2.pk, html=True)
def test_model_admin_default_delete_action_no_change_url(self):
"""
Default delete action shouldn't break if a user's ModelAdmin removes the url for change_view.
Regression test for #20640
"""
obj = UnchangeableObject.objects.create()
action_data = {
ACTION_CHECKBOX_NAME: obj.pk,
"action": "delete_selected",
"index": "0",
}
response = self.client.post('/test_admin/admin/admin_views/unchangeableobject/', action_data)
# No 500 caused by NoReverseMatch
self.assertEqual(response.status_code, 200)
# The page shouldn't display a link to the nonexistent change page
self.assertContains(response, "<li>Unchangeable object: UnchangeableObject object</li>", 1, html=True)
def test_custom_function_mail_action(self):
"Tests a custom action defined in a function"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'external_mail',
'index': 0,
}
self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a function action')
def test_custom_function_action_with_redirect(self):
"Tests a custom action defined in a function"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'redirect_to',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
self.assertEqual(response.status_code, 302)
def test_default_redirect(self):
"""
Test that actions which don't return an HttpResponse are redirected to
the same page, retaining the querystring (which may contain changelist
information).
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'external_mail',
'index': 0,
}
url = '/test_admin/admin/admin_views/externalsubscriber/?o=1'
response = self.client.post(url, action_data)
self.assertRedirects(response, url)
def test_custom_function_action_streaming_response(self):
"""Tests a custom action that returns a StreamingHttpResponse."""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'download',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
content = b''.join(response.streaming_content)
self.assertEqual(content, b'This is the content of the file')
self.assertEqual(response.status_code, 200)
def test_custom_function_action_no_perm_response(self):
"""Tests a custom action that returns an HttpResponse with 403 code."""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'no_perm',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'No permission to perform this action')
def test_actions_ordering(self):
"""
Ensure that actions are ordered as expected.
Refs #15964.
"""
response = self.client.get('/test_admin/admin/admin_views/externalsubscriber/')
self.assertContains(response, '''<label>Action: <select name="action">
<option value="" selected="selected">---------</option>
<option value="delete_selected">Delete selected external
subscribers</option>
<option value="redirect_to">Redirect to (Awesome action)</option>
<option value="external_mail">External mail (Another awesome
action)</option>
<option value="download">Download subscription</option>
<option value="no_perm">No permission to run</option>
</select>''', html=True)
def test_model_without_action(self):
"Tests a ModelAdmin without any action"
response = self.client.get('/test_admin/admin/admin_views/oldsubscriber/')
self.assertEqual(response.context["action_form"], None)
self.assertNotContains(response, '<input type="checkbox" class="action-select"',
msg_prefix="Found an unexpected action toggle checkboxbox in response")
self.assertNotContains(response, '<input type="checkbox" class="action-select"')
def test_model_without_action_still_has_jquery(self):
"Tests that a ModelAdmin without any actions still gets jQuery included in page"
response = self.client.get('/test_admin/admin/admin_views/oldsubscriber/')
self.assertEqual(response.context["action_form"], None)
self.assertContains(response, 'jquery.min.js',
msg_prefix="jQuery missing from admin pages for model with no admin actions")
def test_action_column_class(self):
"Tests that the checkbox column class is present in the response"
response = self.client.get('/test_admin/admin/admin_views/subscriber/')
self.assertNotEqual(response.context["action_form"], None)
self.assertContains(response, 'action-checkbox-column')
def test_multiple_actions_form(self):
"""
Test that actions come from the form whose submit button was pressed (#10618).
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
# Two different actions selected on the two forms...
'action': ['external_mail', 'delete_selected'],
# ...but we clicked "go" on the top form.
'index': 0
}
self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
# Send mail, don't delete.
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a function action')
def test_user_message_on_none_selected(self):
"""
User should see a warning when 'Go' is pressed and no items are selected.
"""
action_data = {
ACTION_CHECKBOX_NAME: [],
'action': 'delete_selected',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
msg = """Items must be selected in order to perform actions on them. No items have been changed."""
self.assertContains(response, msg)
self.assertEqual(Subscriber.objects.count(), 2)
def test_user_message_on_no_action(self):
"""
User should see a warning when 'Go' is pressed and no action is selected.
"""
action_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action': '',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
msg = """No action selected."""
self.assertContains(response, msg)
self.assertEqual(Subscriber.objects.count(), 2)
def test_selection_counter(self):
"""
Check if the selection counter is there.
"""
response = self.client.get('/test_admin/admin/admin_views/subscriber/')
self.assertContains(response, '0 of 2 selected')
def test_popup_actions(self):
""" Actions should not be shown in popups. """
response = self.client.get('/test_admin/admin/admin_views/subscriber/')
self.assertNotEqual(response.context["action_form"], None)
response = self.client.get(
'/test_admin/admin/admin_views/subscriber/?%s' % IS_POPUP_VAR)
self.assertEqual(response.context["action_form"], None)
def test_popup_template_response(self):
"""
Success on popups shall be rendered from template in order to allow
easy customization.
"""
response = self.client.post(
'/test_admin/admin/admin_views/actor/add/?%s=1' % IS_POPUP_VAR,
{'name': 'Troy McClure', 'age': '55', IS_POPUP_VAR: '1'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name, 'admin/popup_response.html')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class TestCustomChangeList(TestCase):
fixtures = ['admin-views-users.xml']
urlbit = 'admin'
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_custom_changelist(self):
"""
Validate that a custom ChangeList class can be used (#9749)
"""
# Insert some data
post_data = {"name": "First Gadget"}
response = self.client.post('/test_admin/%s/admin_views/gadget/add/' % self.urlbit, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
# Hit the page once to get messages out of the queue message list
response = self.client.get('/test_admin/%s/admin_views/gadget/' % self.urlbit)
# Ensure that data is still not visible on the page
response = self.client.get('/test_admin/%s/admin_views/gadget/' % self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'First Gadget')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class TestInlineNotEditable(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_GET_parent_add(self):
"""
InlineModelAdmin broken?
"""
response = self.client.get('/test_admin/admin/admin_views/parent/add/')
self.assertEqual(response.status_code, 200)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminCustomQuerysetTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
self.pks = [EmptyModel.objects.create().id for i in range(3)]
self.super_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
'username': 'super',
'password': 'secret',
}
def test_changelist_view(self):
response = self.client.get('/test_admin/admin/admin_views/emptymodel/')
for i in self.pks:
if i > 1:
self.assertContains(response, 'Primary key = %s' % i)
else:
self.assertNotContains(response, 'Primary key = %s' % i)
def test_changelist_view_count_queries(self):
# create 2 Person objects
Person.objects.create(name='person1', gender=1)
Person.objects.create(name='person2', gender=2)
# 4 queries are expected: 1 for the session, 1 for the user,
# 1 for the count and 1 for the objects on the page
with self.assertNumQueries(4):
resp = self.client.get('/test_admin/admin/admin_views/person/')
self.assertEqual(resp.context['selection_note'], '0 of 2 selected')
self.assertEqual(resp.context['selection_note_all'], 'All 2 selected')
# here one more count(*) query will run, because filters were applied
with self.assertNumQueries(5):
extra = {'q': 'not_in_name'}
resp = self.client.get('/test_admin/admin/admin_views/person/', extra)
self.assertEqual(resp.context['selection_note'], '0 of 0 selected')
self.assertEqual(resp.context['selection_note_all'], 'All 0 selected')
with self.assertNumQueries(5):
extra = {'q': 'person'}
resp = self.client.get('/test_admin/admin/admin_views/person/', extra)
self.assertEqual(resp.context['selection_note'], '0 of 2 selected')
self.assertEqual(resp.context['selection_note_all'], 'All 2 selected')
with self.assertNumQueries(5):
extra = {'gender__exact': '1'}
resp = self.client.get('/test_admin/admin/admin_views/person/', extra)
self.assertEqual(resp.context['selection_note'], '0 of 1 selected')
self.assertEqual(resp.context['selection_note_all'], '1 selected')
def test_change_view(self):
for i in self.pks:
response = self.client.get('/test_admin/admin/admin_views/emptymodel/%s/' % i)
if i > 1:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 404)
def test_add_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __unicode__ method
self.assertEqual(CoverLetter.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"author": "Candidate, Best",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/coverletter/add/',
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name
self.assertContains(
response,
'<li class="success">The cover letter "Candidate, Best" was added successfully.</li>',
html=True
)
# model has no __unicode__ method
self.assertEqual(ShortMessage.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"content": "What's this SMS thing?",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/shortmessage/add/',
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name
self.assertContains(
response,
'<li class="success">The short message "ShortMessage object" was added successfully.</li>',
html=True
)
def test_add_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __unicode__ method
self.assertEqual(Telegram.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "Urgent telegram",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/telegram/add/',
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name
self.assertContains(
response,
'<li class="success">The telegram "Urgent telegram" was added successfully.</li>',
html=True
)
# model has no __unicode__ method
self.assertEqual(Paper.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/paper/add/',
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name
self.assertContains(
response,
'<li class="success">The paper "Paper object" was added successfully.</li>',
html=True
)
def test_edit_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __unicode__ method
cl = CoverLetter.objects.create(author="John Doe")
self.assertEqual(CoverLetter.objects.count(), 1)
response = self.client.get('/test_admin/admin/admin_views/coverletter/%s/' % cl.pk)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"author": "John Doe II",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/coverletter/%s/' % cl.pk,
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name. Instance
# representation is set by model's __unicode__()
self.assertContains(
response,
'<li class="success">The cover letter "John Doe II" was changed successfully.</li>',
html=True
)
# model has no __unicode__ method
sm = ShortMessage.objects.create(content="This is expensive")
self.assertEqual(ShortMessage.objects.count(), 1)
response = self.client.get('/test_admin/admin/admin_views/shortmessage/%s/' % sm.pk)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"content": "Too expensive",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/shortmessage/%s/' % sm.pk,
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by six.text_type()
self.assertContains(
response,
'<li class="success">The short message "ShortMessage_Deferred_timestamp object" was changed successfully.</li>',
html=True
)
def test_edit_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __unicode__ method
t = Telegram.objects.create(title="Frist Telegram")
self.assertEqual(Telegram.objects.count(), 1)
response = self.client.get('/test_admin/admin/admin_views/telegram/%s/' % t.pk)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "Telegram without typo",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/telegram/%s/' % t.pk,
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Telegram.objects.count(), 1)
# Message should contain non-ugly model verbose name. The instance
# representation is set by model's __unicode__()
self.assertContains(
response,
'<li class="success">The telegram "Telegram without typo" was changed successfully.</li>',
html=True
)
# model has no __unicode__ method
p = Paper.objects.create(title="My Paper Title")
self.assertEqual(Paper.objects.count(), 1)
response = self.client.get('/test_admin/admin/admin_views/paper/%s/' % p.pk)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "My Modified Paper Title",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/paper/%s/' % p.pk,
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(Paper.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by six.text_type()
self.assertContains(
response,
'<li class="success">The paper "Paper_Deferred_author object" was changed successfully.</li>',
html=True
)
def test_history_view_custom_qs(self):
"""
Ensure that custom querysets are considered for the admin history view.
Refs #21013.
"""
self.client.post(reverse('admin:login'), self.super_login)
FilteredManager.objects.create(pk=1)
FilteredManager.objects.create(pk=2)
response = self.client.get('/test_admin/admin/admin_views/filteredmanager/')
self.assertContains(response, "PK=1")
self.assertContains(response, "PK=2")
self.assertEqual(self.client.get('/test_admin/admin/admin_views/filteredmanager/1/history/').status_code, 200)
self.assertEqual(self.client.get('/test_admin/admin/admin_views/filteredmanager/2/history/').status_code, 200)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminInlineFileUploadTest(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-actions.xml']
urlbit = 'admin'
def setUp(self):
self.client.login(username='super', password='secret')
# Set up test Picture and Gallery.
# These must be set up here instead of in fixtures in order to allow Picture
# to use a NamedTemporaryFile.
tdir = tempfile.gettempdir()
file1 = tempfile.NamedTemporaryFile(suffix=".file1", dir=tdir)
file1.write(b'a' * (2 ** 21))
filename = file1.name
file1.close()
self.gallery = Gallery(name="Test Gallery")
self.gallery.save()
self.picture = Picture(name="Test Picture", image=filename, gallery=self.gallery)
self.picture.save()
def tearDown(self):
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""
Test that inline file uploads correctly display prior data (#10002).
"""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
self.assertContains(response, b"Currently")
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminInlineTests(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
"widget_set-0-name": "",
"widget_set-1-id": "",
"widget_set-1-owner": "1",
"widget_set-1-name": "",
"widget_set-2-id": "",
"widget_set-2-owner": "1",
"widget_set-2-name": "",
"doohickey_set-TOTAL_FORMS": "3",
"doohickey_set-INITIAL_FORMS": "0",
"doohickey_set-MAX_NUM_FORMS": "0",
"doohickey_set-0-owner": "1",
"doohickey_set-0-code": "",
"doohickey_set-0-name": "",
"doohickey_set-1-owner": "1",
"doohickey_set-1-code": "",
"doohickey_set-1-name": "",
"doohickey_set-2-owner": "1",
"doohickey_set-2-code": "",
"doohickey_set-2-name": "",
"grommet_set-TOTAL_FORMS": "3",
"grommet_set-INITIAL_FORMS": "0",
"grommet_set-MAX_NUM_FORMS": "0",
"grommet_set-0-code": "",
"grommet_set-0-owner": "1",
"grommet_set-0-name": "",
"grommet_set-1-code": "",
"grommet_set-1-owner": "1",
"grommet_set-1-name": "",
"grommet_set-2-code": "",
"grommet_set-2-owner": "1",
"grommet_set-2-name": "",
"whatsit_set-TOTAL_FORMS": "3",
"whatsit_set-INITIAL_FORMS": "0",
"whatsit_set-MAX_NUM_FORMS": "0",
"whatsit_set-0-owner": "1",
"whatsit_set-0-index": "",
"whatsit_set-0-name": "",
"whatsit_set-1-owner": "1",
"whatsit_set-1-index": "",
"whatsit_set-1-name": "",
"whatsit_set-2-owner": "1",
"whatsit_set-2-index": "",
"whatsit_set-2-name": "",
"fancydoodad_set-TOTAL_FORMS": "3",
"fancydoodad_set-INITIAL_FORMS": "0",
"fancydoodad_set-MAX_NUM_FORMS": "0",
"fancydoodad_set-0-doodad_ptr": "",
"fancydoodad_set-0-owner": "1",
"fancydoodad_set-0-name": "",
"fancydoodad_set-0-expensive": "on",
"fancydoodad_set-1-doodad_ptr": "",
"fancydoodad_set-1-owner": "1",
"fancydoodad_set-1-name": "",
"fancydoodad_set-1-expensive": "on",
"fancydoodad_set-2-doodad_ptr": "",
"fancydoodad_set-2-owner": "1",
"fancydoodad_set-2-name": "",
"fancydoodad_set-2-expensive": "on",
"category_set-TOTAL_FORMS": "3",
"category_set-INITIAL_FORMS": "0",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "",
"category_set-0-id": "",
"category_set-0-collector": "1",
"category_set-1-order": "",
"category_set-1-id": "",
"category_set-1-collector": "1",
"category_set-2-order": "",
"category_set-2-id": "",
"category_set-2-collector": "1",
}
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
self.collector = Collector(pk=1, name='John Fowles')
self.collector.save()
def tearDown(self):
self.client.logout()
def test_simple_inline(self):
"A simple model can be saved as inlines"
# First add a new inline
self.post_data['widget_set-0-name'] = "Widget 1"
collector_url = '/test_admin/admin/admin_views/collector/%d/' % self.collector.pk
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
widget_id = Widget.objects.all()[0].id
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="widget_set-0-id"')
# Now resave that inline
self.post_data['widget_set-INITIAL_FORMS'] = "1"
self.post_data['widget_set-0-id'] = str(widget_id)
self.post_data['widget_set-0-name'] = "Widget 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
# Now modify that inline
self.post_data['widget_set-INITIAL_FORMS'] = "1"
self.post_data['widget_set-0-id'] = str(widget_id)
self.post_data['widget_set-0-name'] = "Widget 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1 Updated")
def test_explicit_autofield_inline(self):
"A model with an explicit autofield primary key can be saved as inlines. Regression for #8093"
# First add a new inline
self.post_data['grommet_set-0-name'] = "Grommet 1"
collector_url = '/test_admin/admin/admin_views/collector/%d/' % self.collector.pk
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="grommet_set-0-code"')
# Now resave that inline
self.post_data['grommet_set-INITIAL_FORMS'] = "1"
self.post_data['grommet_set-0-code'] = str(Grommet.objects.all()[0].code)
self.post_data['grommet_set-0-name'] = "Grommet 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Now modify that inline
self.post_data['grommet_set-INITIAL_FORMS'] = "1"
self.post_data['grommet_set-0-code'] = str(Grommet.objects.all()[0].code)
self.post_data['grommet_set-0-name'] = "Grommet 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1 Updated")
def test_char_pk_inline(self):
"A model with a character PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1"
collector_url = '/test_admin/admin/admin_views/collector/%d/' % self.collector.pk
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="doohickey_set-0-code"')
# Now resave that inline
self.post_data['doohickey_set-INITIAL_FORMS'] = "1"
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Now modify that inline
self.post_data['doohickey_set-INITIAL_FORMS'] = "1"
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1 Updated")
def test_integer_pk_inline(self):
"A model with an integer PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Check that the PK link exists on the rendered form
response = self.client.get('/test_admin/admin/admin_views/collector/1/')
self.assertContains(response, 'name="whatsit_set-0-index"')
# Now resave that inline
self.post_data['whatsit_set-INITIAL_FORMS'] = "1"
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Now modify that inline
self.post_data['whatsit_set-INITIAL_FORMS'] = "1"
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1 Updated"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1 Updated")
def test_inherited_inline(self):
"An inherited model can be saved as inlines. Regression for #11042"
# First add a new inline
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1"
collector_url = '/test_admin/admin/admin_views/collector/%d/' % self.collector.pk
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
doodad_pk = FancyDoodad.objects.all()[0].pk
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="fancydoodad_set-0-doodad_ptr"')
# Now resave that inline
self.post_data['fancydoodad_set-INITIAL_FORMS'] = "1"
self.post_data['fancydoodad_set-0-doodad_ptr'] = str(doodad_pk)
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
# Now modify that inline
self.post_data['fancydoodad_set-INITIAL_FORMS'] = "1"
self.post_data['fancydoodad_set-0-doodad_ptr'] = str(doodad_pk)
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1 Updated")
def test_ordered_inline(self):
"""Check that an inline with an editable ordering fields is
updated correctly. Regression for #10922"""
# Create some objects with an initial ordering
Category.objects.create(id=1, order=1, collector=self.collector)
Category.objects.create(id=2, order=2, collector=self.collector)
Category.objects.create(id=3, order=0, collector=self.collector)
Category.objects.create(id=4, order=0, collector=self.collector)
# NB: The order values must be changed so that the items are reordered.
self.post_data.update({
"name": "Frederick Clegg",
"category_set-TOTAL_FORMS": "7",
"category_set-INITIAL_FORMS": "4",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "14",
"category_set-0-id": "1",
"category_set-0-collector": "1",
"category_set-1-order": "13",
"category_set-1-id": "2",
"category_set-1-collector": "1",
"category_set-2-order": "1",
"category_set-2-id": "3",
"category_set-2-collector": "1",
"category_set-3-order": "0",
"category_set-3-id": "4",
"category_set-3-collector": "1",
"category_set-4-order": "",
"category_set-4-id": "",
"category_set-4-collector": "1",
"category_set-5-order": "",
"category_set-5-id": "",
"category_set-5-collector": "1",
"category_set-6-order": "",
"category_set-6-id": "",
"category_set-6-collector": "1",
})
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# Check that the order values have been applied to the right objects
self.assertEqual(self.collector.category_set.count(), 4)
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class NeverCacheTests(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-colors.xml', 'admin-views-fabrics.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_admin_index(self):
"Check the never-cache status of the main index"
response = self.client.get('/test_admin/admin/')
self.assertEqual(get_max_age(response), 0)
def test_app_index(self):
"Check the never-cache status of an application index"
response = self.client.get('/test_admin/admin/admin_views/')
self.assertEqual(get_max_age(response), 0)
def test_model_index(self):
"Check the never-cache status of a model index"
response = self.client.get('/test_admin/admin/admin_views/fabric/')
self.assertEqual(get_max_age(response), 0)
def test_model_add(self):
"Check the never-cache status of a model add page"
response = self.client.get('/test_admin/admin/admin_views/fabric/add/')
self.assertEqual(get_max_age(response), 0)
def test_model_view(self):
"Check the never-cache status of a model edit page"
response = self.client.get('/test_admin/admin/admin_views/section/1/')
self.assertEqual(get_max_age(response), 0)
def test_model_history(self):
"Check the never-cache status of a model history page"
response = self.client.get('/test_admin/admin/admin_views/section/1/history/')
self.assertEqual(get_max_age(response), 0)
def test_model_delete(self):
"Check the never-cache status of a model delete page"
response = self.client.get('/test_admin/admin/admin_views/section/1/delete/')
self.assertEqual(get_max_age(response), 0)
def test_login(self):
"Check the never-cache status of login views"
self.client.logout()
response = self.client.get('/test_admin/admin/')
self.assertEqual(get_max_age(response), 0)
def test_logout(self):
"Check the never-cache status of logout view"
response = self.client.get('/test_admin/admin/logout/')
self.assertEqual(get_max_age(response), 0)
def test_password_change(self):
"Check the never-cache status of the password change view"
self.client.logout()
response = self.client.get('/test_admin/password_change/')
self.assertEqual(get_max_age(response), None)
def test_password_change_done(self):
"Check the never-cache status of the password change done view"
response = self.client.get('/test_admin/admin/password_change/done/')
self.assertEqual(get_max_age(response), None)
def test_JS_i18n(self):
"Check the never-cache status of the JavaScript i18n view"
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertEqual(get_max_age(response), None)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class PrePopulatedTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_prepopulated_on(self):
response = self.client.get('/test_admin/admin/admin_views/prepopulatedpost/add/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "id: '#id_slug',")
self.assertContains(response, "field['dependency_ids'].push('#id_title');")
self.assertContains(response, "id: '#id_prepopulatedsubpost_set-0-subslug',")
def test_prepopulated_off(self):
response = self.client.get('/test_admin/admin/admin_views/prepopulatedpost/1/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "A Long Title")
self.assertNotContains(response, "id: '#id_slug'")
self.assertNotContains(response, "field['dependency_ids'].push('#id_title');")
self.assertNotContains(response, "id: '#id_prepopulatedsubpost_set-0-subslug',")
@override_settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=True)
def test_prepopulated_maxlength_localized(self):
"""
Regression test for #15938: if USE_THOUSAND_SEPARATOR is set, make sure
that maxLength (in the JavaScript) is rendered without separators.
"""
response = self.client.get('/test_admin/admin/admin_views/prepopulatedpostlargeslug/add/')
self.assertContains(response, "maxLength: 1000") # instead of 1,000
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class SeleniumAdminViewsFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_views'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-views-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_prepopulated_fields(self):
"""
Ensure that the JavaScript-automated prepopulated fields work with the
main form and with stacked and tabular inlines.
Refs #13068, #9264, #9983, #9784.
"""
self.admin_login(username='super', password='secret', login_url='/test_admin/admin/')
self.selenium.get('%s%s' % (self.live_server_url,
'/test_admin/admin/admin_views/mainprepopulated/add/'))
# Main form ----------------------------------------------------------
self.selenium.find_element_by_css_selector('#id_pubdate').send_keys('2012-02-18')
self.get_select_option('#id_status', 'option two').click()
self.selenium.find_element_by_css_selector('#id_name').send_keys(' this is the mAin nÀMë and it\'s awεšome')
slug1 = self.selenium.find_element_by_css_selector('#id_slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_slug2').get_attribute('value')
self.assertEqual(slug1, 'main-name-and-its-awesome-2012-02-18')
self.assertEqual(slug2, 'option-two-main-name-and-its-awesome')
# Stacked inlines ----------------------------------------------------
# Initial inline
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-0-pubdate').send_keys('2011-12-17')
self.get_select_option('#id_relatedprepopulated_set-0-status', 'option one').click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-0-name').send_keys(' here is a sŤāÇkeð inline ! ')
slug1 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-0-slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-0-slug2').get_attribute('value')
self.assertEqual(slug1, 'here-stacked-inline-2011-12-17')
self.assertEqual(slug2, 'option-one-here-stacked-inline')
# Add an inline
self.selenium.find_elements_by_link_text('Add another Related prepopulated')[0].click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-1-pubdate').send_keys('1999-01-25')
self.get_select_option('#id_relatedprepopulated_set-1-status', 'option two').click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-1-name').send_keys(' now you haVe anöther sŤāÇkeð inline with a very ... loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog text... ')
slug1 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-1-slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-1-slug2').get_attribute('value')
self.assertEqual(slug1, 'now-you-have-another-stacked-inline-very-loooooooo') # 50 characters maximum for slug1 field
self.assertEqual(slug2, 'option-two-now-you-have-another-stacked-inline-very-looooooo') # 60 characters maximum for slug2 field
# Tabular inlines ----------------------------------------------------
# Initial inline
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-0-pubdate').send_keys('1234-12-07')
self.get_select_option('#id_relatedprepopulated_set-2-0-status', 'option two').click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-0-name').send_keys('And now, with a tÃbűlaŘ inline !!!')
slug1 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-0-slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-0-slug2').get_attribute('value')
self.assertEqual(slug1, 'and-now-tabular-inline-1234-12-07')
self.assertEqual(slug2, 'option-two-and-now-tabular-inline')
# Add an inline
self.selenium.find_elements_by_link_text('Add another Related prepopulated')[1].click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-1-pubdate').send_keys('1981-08-22')
self.get_select_option('#id_relatedprepopulated_set-2-1-status', 'option one').click()
self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-1-name').send_keys('a tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters')
slug1 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-1-slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_relatedprepopulated_set-2-1-slug2').get_attribute('value')
self.assertEqual(slug1, 'tabular-inline-ignored-characters-1981-08-22')
self.assertEqual(slug2, 'option-one-tabular-inline-ignored-characters')
# Save and check that everything is properly stored in the database
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.assertEqual(MainPrepopulated.objects.all().count(), 1)
MainPrepopulated.objects.get(
name=' this is the mAin nÀMë and it\'s awεšome',
pubdate='2012-02-18',
status='option two',
slug1='main-name-and-its-awesome-2012-02-18',
slug2='option-two-main-name-and-its-awesome',
)
self.assertEqual(RelatedPrepopulated.objects.all().count(), 4)
RelatedPrepopulated.objects.get(
name=' here is a sŤāÇkeð inline ! ',
pubdate='2011-12-17',
status='option one',
slug1='here-stacked-inline-2011-12-17',
slug2='option-one-here-stacked-inline',
)
RelatedPrepopulated.objects.get(
name=' now you haVe anöther sŤāÇkeð inline with a very ... loooooooooooooooooo', # 75 characters in name field
pubdate='1999-01-25',
status='option two',
slug1='now-you-have-another-stacked-inline-very-loooooooo',
slug2='option-two-now-you-have-another-stacked-inline-very-looooooo',
)
RelatedPrepopulated.objects.get(
name='And now, with a tÃbűlaŘ inline !!!',
pubdate='1234-12-07',
status='option two',
slug1='and-now-tabular-inline-1234-12-07',
slug2='option-two-and-now-tabular-inline',
)
RelatedPrepopulated.objects.get(
name='a tÃbűlaŘ inline with ignored ;"&*^\%$#@-/`~ characters',
pubdate='1981-08-22',
status='option one',
slug1='tabular-inline-ignored-characters-1981-08-22',
slug2='option-one-tabular-inline-ignored-characters',
)
def test_populate_existing_object(self):
"""
Ensure that the prepopulation works for existing objects too, as long
as the original field is empty.
Refs #19082.
"""
# Slugs are empty to start with.
item = MainPrepopulated.objects.create(
name=' this is the mAin nÀMë',
pubdate='2012-02-18',
status='option two',
slug1='',
slug2='',
)
self.admin_login(username='super',
password='secret',
login_url='/test_admin/admin/')
object_url = '%s%s' % (
self.live_server_url,
'/test_admin/admin/admin_views/mainprepopulated/{}/'.format(item.id))
self.selenium.get(object_url)
self.selenium.find_element_by_css_selector('#id_name').send_keys(' the best')
# The slugs got prepopulated since they were originally empty
slug1 = self.selenium.find_element_by_css_selector('#id_slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_slug2').get_attribute('value')
self.assertEqual(slug1, 'main-name-best-2012-02-18')
self.assertEqual(slug2, 'option-two-main-name-best')
# Save the object
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.selenium.get(object_url)
self.selenium.find_element_by_css_selector('#id_name').send_keys(' hello')
# The slugs got prepopulated didn't change since they were originally not empty
slug1 = self.selenium.find_element_by_css_selector('#id_slug1').get_attribute('value')
slug2 = self.selenium.find_element_by_css_selector('#id_slug2').get_attribute('value')
self.assertEqual(slug1, 'main-name-best-2012-02-18')
self.assertEqual(slug2, 'option-two-main-name-best')
def test_collapsible_fieldset(self):
"""
Test that the 'collapse' class in fieldsets definition allows to
show/hide the appropriate field section.
"""
self.admin_login(username='super', password='secret', login_url='/test_admin/admin/')
self.selenium.get('%s%s' % (self.live_server_url,
'/test_admin/admin/admin_views/article/add/'))
self.assertFalse(self.selenium.find_element_by_id('id_title').is_displayed())
self.selenium.find_elements_by_link_text('Show')[0].click()
self.assertTrue(self.selenium.find_element_by_id('id_title').is_displayed())
self.assertEqual(
self.selenium.find_element_by_id('fieldsetcollapser0').text,
"Hide"
)
def test_first_field_focus(self):
"""JavaScript-assisted auto-focus on first usable form field."""
# First form field has a single widget
self.admin_login(username='super', password='secret', login_url='/test_admin/admin/')
self.selenium.get('%s%s' % (self.live_server_url,
'/test_admin/admin/admin_views/picture/add/'))
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element_by_id('id_name')
)
# First form field has a MultiWidget
self.selenium.get('%s%s' % (self.live_server_url,
'/test_admin/admin/admin_views/reservation/add/'))
self.assertEqual(
self.selenium.switch_to.active_element,
self.selenium.find_element_by_id('id_start_date_0')
)
def test_cancel_delete_confirmation(self):
"Cancelling the deletion of an object takes the user back one page."
pizza = Pizza.objects.create(name="Panucci's Double Cheese")
url = reverse('admin:admin_views_pizza_change', args=(pizza.id,))
full_url = '%s%s' % (self.live_server_url, url)
self.admin_login(username='super', password='secret', login_url='/test_admin/admin/')
self.selenium.get(full_url)
self.selenium.find_element_by_class_name('deletelink').click()
self.selenium.find_element_by_class_name('cancel-link').click()
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
def test_cancel_delete_related_confirmation(self):
"""
Cancelling the deletion of an object with relations takes the user back
one page.
"""
pizza = Pizza.objects.create(name="Panucci's Double Cheese")
topping1 = Topping.objects.create(name="Cheddar")
topping2 = Topping.objects.create(name="Mozzarella")
pizza.toppings.add(topping1, topping2)
url = reverse('admin:admin_views_pizza_change', args=(pizza.id,))
full_url = '%s%s' % (self.live_server_url, url)
self.admin_login(username='super', password='secret', login_url='/test_admin/admin/')
self.selenium.get(full_url)
self.selenium.find_element_by_class_name('deletelink').click()
self.selenium.find_element_by_class_name('cancel-link').click()
self.assertEqual(self.selenium.current_url, full_url)
self.assertEqual(Pizza.objects.count(), 1)
self.assertEqual(Topping.objects.count(), 2)
class SeleniumAdminViewsChromeTests(SeleniumAdminViewsFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumAdminViewsIETests(SeleniumAdminViewsFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class ReadonlyTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_readonly_get(self):
response = self.client.get('/test_admin/admin/admin_views/post/add/')
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'name="posted"')
# 3 fields + 2 submit buttons + 5 inline management form fields, + 2
# hidden fields for inlines + 1 field for the inline + 2 empty form
self.assertContains(response, "<input", count=15)
self.assertContains(response, formats.localize(datetime.date.today()))
self.assertContains(response,
"<label>Awesomeness level:</label>")
self.assertContains(response, "Very awesome.")
self.assertContains(response, "Unknown coolness.")
self.assertContains(response, "foo")
# Checks that multiline text in a readonly field gets <br /> tags
self.assertContains(response, "Multiline<br />test<br />string")
self.assertContains(response, "<p>Multiline<br />html<br />content</p>", html=True)
self.assertContains(response, "InlineMultiline<br />test<br />string")
self.assertContains(response,
formats.localize(datetime.date.today() - datetime.timedelta(days=7)))
self.assertContains(response, '<div class="form-row field-coolness">')
self.assertContains(response, '<div class="form-row field-awesomeness_level">')
self.assertContains(response, '<div class="form-row field-posted">')
self.assertContains(response, '<div class="form-row field-value">')
self.assertContains(response, '<div class="form-row">')
self.assertContains(response, '<p class="help">', 3)
self.assertContains(response, '<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>', html=True)
self.assertContains(response, '<p class="help">Some help text for the content (with unicode ŠĐĆŽćžšđ)</p>', html=True)
self.assertContains(response, '<p class="help">Some help text for the date (with unicode ŠĐĆŽćžšđ)</p>', html=True)
p = Post.objects.create(title="I worked on readonly_fields", content="Its good stuff")
response = self.client.get('/test_admin/admin/admin_views/post/%d/' % p.pk)
self.assertContains(response, "%d amount of cool" % p.pk)
def test_readonly_post(self):
data = {
"title": "Django Got Readonly Fields",
"content": "This is an incredible development.",
"link_set-TOTAL_FORMS": "1",
"link_set-INITIAL_FORMS": "0",
"link_set-MAX_NUM_FORMS": "0",
}
response = self.client.post('/test_admin/admin/admin_views/post/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 1)
p = Post.objects.get()
self.assertEqual(p.posted, datetime.date.today())
data["posted"] = "10-8-1990" # some date that's not today
response = self.client.post('/test_admin/admin/admin_views/post/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 2)
p = Post.objects.order_by('-id')[0]
self.assertEqual(p.posted, datetime.date.today())
def test_readonly_manytomany(self):
"Regression test for #13004"
response = self.client.get('/test_admin/admin/admin_views/pizza/add/')
self.assertEqual(response.status_code, 200)
def test_user_password_change_limited_queryset(self):
su = User.objects.filter(is_superuser=True)[0]
response = self.client.get('/test_admin/admin2/auth/user/%s/password/' % su.pk)
self.assertEqual(response.status_code, 404)
def test_change_form_renders_correct_null_choice_value(self):
"""
Regression test for #17911.
"""
choice = Choice.objects.create(choice=None)
response = self.client.get('/test_admin/admin/admin_views/choice/%s/' % choice.pk)
self.assertContains(response, '<p>No opinion</p>', html=True)
self.assertNotContains(response, '<p>(None)</p>')
def test_readonly_backwards_ref(self):
"""
Regression test for #16433 - backwards references for related objects
broke if the related field is read-only due to the help_text attribute
"""
topping = Topping.objects.create(name='Salami')
pizza = Pizza.objects.create(name='Americano')
pizza.toppings.add(topping)
response = self.client.get('/test_admin/admin/admin_views/topping/add/')
self.assertEqual(response.status_code, 200)
def test_readonly_field_overrides(self):
"""
Regression test for #22087 - ModelForm Meta overrides are ignored by
AdminReadonlyField
"""
p = FieldOverridePost.objects.create(title="Test Post", content="Test Content")
response = self.client.get('/test_admin/admin/admin_views/fieldoverridepost/%d/' % p.pk)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<p class="help">Overridden help text for the date</p>')
self.assertContains(response, '<label for="id_public">Overridden public label:</label>', html=True)
self.assertNotContains(response, "Some help text for the date (with unicode ŠĐĆŽćžšđ)")
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class LimitChoicesToInAdminTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_limit_choices_to_as_callable(self):
"""Test for ticket 2445 changes to admin."""
threepwood = Character.objects.create(
username='threepwood',
last_action=datetime.datetime.today() + datetime.timedelta(days=1),
)
marley = Character.objects.create(
username='marley',
last_action=datetime.datetime.today() - datetime.timedelta(days=1),
)
response = self.client.get('/test_admin/admin/admin_views/stumpjoke/add/')
# The allowed option should appear twice; the limited option should not appear.
self.assertContains(response, threepwood.username, count=2)
self.assertNotContains(response, marley.username)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class RawIdFieldsTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_limit_choices_to(self):
"""Regression test for 14880"""
actor = Actor.objects.create(name="Palin", age=27)
Inquisition.objects.create(expected=True,
leader=actor,
country="England")
Inquisition.objects.create(expected=False,
leader=actor,
country="Spain")
response = self.client.get('/test_admin/admin/admin_views/sketch/add/')
# Find the link
m = re.search(br'<a href="([^"]*)"[^>]* id="lookup_id_inquisition"', response.content)
self.assertTrue(m) # Got a match
popup_url = m.groups()[0].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step also tests integers, strings and booleans in the
# lookup query string; in model we define inquisition field to have a
# limit_choices_to option that includes a filter on a string field
# (inquisition__actor__name), a filter on an integer field
# (inquisition__actor__age), and a filter on a boolean field
# (inquisition__expected).
response2 = self.client.get(popup_url)
self.assertContains(response2, "Spain")
self.assertNotContains(response2, "England")
def test_limit_choices_to_isnull_false(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get('/test_admin/admin/admin_views/sketch/add/')
# Find the link
m = re.search(br'<a href="([^"]*)"[^>]* id="lookup_id_defendant0"', response.content)
self.assertTrue(m) # Got a match
popup_url = m.groups()[0].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=0 gets parsed correctly from the
# lookup query string; in model we define defendant0 field to have a
# limit_choices_to option that includes "actor__title__isnull=False".
response2 = self.client.get(popup_url)
self.assertContains(response2, "Kilbraken")
self.assertNotContains(response2, "Palin")
def test_limit_choices_to_isnull_true(self):
"""Regression test for 20182"""
Actor.objects.create(name="Palin", age=27)
Actor.objects.create(name="Kilbraken", age=50, title="Judge")
response = self.client.get('/test_admin/admin/admin_views/sketch/add/')
# Find the link
m = re.search(br'<a href="([^"]*)"[^>]* id="lookup_id_defendant1"', response.content)
self.assertTrue(m) # Got a match
popup_url = m.groups()[0].decode().replace("&", "&")
# Handle relative links
popup_url = urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup and verify the correct objects show up in the resulting
# page. This step tests field__isnull=1 gets parsed correctly from the
# lookup query string; in model we define defendant1 field to have a
# limit_choices_to option that includes "actor__title__isnull=True".
response2 = self.client.get(popup_url)
self.assertNotContains(response2, "Kilbraken")
self.assertContains(response2, "Palin")
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class UserAdminTest(TestCase):
"""
Tests user CRUD functionality.
"""
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_save_button(self):
user_count = User.objects.count()
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, '/test_admin/admin/auth/user/%s/' % new_user.pk)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_save_continue_editing_button(self):
user_count = User.objects.count()
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_continue': '1',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, '/test_admin/admin/auth/user/%s/' % new_user.pk)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_password_mismatch(self):
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'mismatch',
})
self.assertEqual(response.status_code, 200)
adminform = response.context['adminform']
self.assertNotIn('password', adminform.form.errors)
self.assertEqual(adminform.form.errors['password2'],
["The two password fields didn't match."])
def test_user_fk_popup(self):
"""Quick user addition in a FK popup shouldn't invoke view for further user customization"""
response = self.client.get('/test_admin/admin/admin_views/album/add/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/test_admin/admin/auth/user/add')
self.assertContains(response, 'class="add-another" id="add_id_owner"')
response = self.client.get('/test_admin/admin/auth/user/add/?_popup=1')
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_popup': '1',
'_save': '1',
}
response = self.client.post('/test_admin/admin/auth/user/add/?_popup=1', data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'dismissAddAnotherPopup')
def test_save_add_another_button(self):
user_count = User.objects.count()
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_addanother': '1',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, '/test_admin/admin/auth/user/add/')
self.assertEqual(User.objects.count(), user_count + 1)
self.assertTrue(new_user.has_usable_password())
def test_user_permission_performance(self):
u = User.objects.all()[0]
# Don't depend on a warm cache, see #17377.
ContentType.objects.clear_cache()
with self.assertNumQueries(10):
response = self.client.get('/test_admin/admin/auth/user/%s/' % u.pk)
self.assertEqual(response.status_code, 200)
def test_form_url_present_in_context(self):
u = User.objects.all()[0]
response = self.client.get('/test_admin/admin3/auth/user/%s/password/' % u.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['form_url'], 'pony')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class GroupAdminTest(TestCase):
"""
Tests group CRUD functionality.
"""
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_save_button(self):
group_count = Group.objects.count()
response = self.client.post('/test_admin/admin/auth/group/add/', {
'name': 'newgroup',
})
Group.objects.order_by('-id')[0]
self.assertRedirects(response, '/test_admin/admin/auth/group/')
self.assertEqual(Group.objects.count(), group_count + 1)
def test_group_permission_performance(self):
g = Group.objects.create(name="test_group")
# Ensure no queries are skipped due to cached content type for Group.
ContentType.objects.clear_cache()
with self.assertNumQueries(8):
response = self.client.get('/test_admin/admin/auth/group/%s/' % g.pk)
self.assertEqual(response.status_code, 200)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class CSSTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_field_prefix_css_classes(self):
"""
Ensure that fields have a CSS class name with a 'field-' prefix.
Refs #16371.
"""
response = self.client.get('/test_admin/admin/admin_views/post/add/')
# The main form
self.assertContains(response, 'class="form-row field-title"')
self.assertContains(response, 'class="form-row field-content"')
self.assertContains(response, 'class="form-row field-public"')
self.assertContains(response, 'class="form-row field-awesomeness_level"')
self.assertContains(response, 'class="form-row field-coolness"')
self.assertContains(response, 'class="form-row field-value"')
self.assertContains(response, 'class="form-row"') # The lambda function
# The tabular inline
self.assertContains(response, '<td class="field-url">')
self.assertContains(response, '<td class="field-posted">')
def test_index_css_classes(self):
"""
Ensure that CSS class names are used for each app and model on the
admin index pages.
Refs #17050.
"""
# General index page
response = self.client.get("/test_admin/admin/")
self.assertContains(response, '<div class="app-admin_views module">')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
# App index page
response = self.client.get("/test_admin/admin/admin_views/")
self.assertContains(response, '<div class="app-admin_views module">')
self.assertContains(response, '<tr class="model-actor">')
self.assertContains(response, '<tr class="model-album">')
def test_app_model_in_form_body_class(self):
"""
Ensure app and model tag are correctly read by change_form template
"""
response = self.client.get('/test_admin/admin/admin_views/section/add/')
self.assertEqual(response.status_code, 200)
self.assertContains(response,
'<body class=" app-admin_views model-section ')
def test_app_model_in_list_body_class(self):
"""
Ensure app and model tag are correctly read by change_list template
"""
response = self.client.get('/test_admin/admin/admin_views/section/')
self.assertEqual(response.status_code, 200)
self.assertContains(response,
'<body class=" app-admin_views model-section ')
def test_app_model_in_delete_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by delete_confirmation
template
"""
response = self.client.get(
'/test_admin/admin/admin_views/section/1/delete/')
self.assertEqual(response.status_code, 200)
self.assertContains(response,
'<body class=" app-admin_views model-section ')
def test_app_model_in_app_index_body_class(self):
"""
Ensure app and model tag are correctly read by app_index template
"""
response = self.client.get('/test_admin/admin/admin_views/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<body class=" dashboard app-admin_views')
def test_app_model_in_delete_selected_confirmation_body_class(self):
"""
Ensure app and model tag are correctly read by
delete_selected_confirmation template
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'delete_selected',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/section/',
action_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response,
'<body class=" app-admin_views model-section ')
def test_changelist_field_classes(self):
"""
Cells of the change list table should contain the field name in their class attribute
Refs #11195.
"""
Podcast.objects.create(name="Django Dose",
release_date=datetime.date.today())
response = self.client.get('/test_admin/admin/admin_views/podcast/')
self.assertContains(
response, '<th class="field-name">')
self.assertContains(
response, '<td class="field-release_date nowrap">')
self.assertContains(
response, '<td class="action-checkbox">')
try:
import docutils
except ImportError:
docutils = None
@unittest.skipUnless(docutils, "no docutils installed.")
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
@modify_settings(INSTALLED_APPS={'append': ['django.contrib.admindocs', 'django.contrib.flatpages']})
class AdminDocsTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_tags(self):
response = self.client.get('/test_admin/admin/doc/tags/')
# The builtin tag group exists
self.assertContains(response, "<h2>Built-in tags</h2>", count=2, html=True)
# A builtin tag exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-autoescape">autoescape</h3>', html=True)
self.assertContains(response, '<li><a href="#built_in-autoescape">autoescape</a></li>', html=True)
# An app tag exists in both the index and detail
self.assertContains(response, '<h3 id="flatpages-get_flatpages">get_flatpages</h3>', html=True)
self.assertContains(response, '<li><a href="#flatpages-get_flatpages">get_flatpages</a></li>', html=True)
# The admin list tag group exists
self.assertContains(response, "<h2>admin_list</h2>", count=2, html=True)
# An admin list tag exists in both the index and detail
self.assertContains(response, '<h3 id="admin_list-admin_actions">admin_actions</h3>', html=True)
self.assertContains(response, '<li><a href="#admin_list-admin_actions">admin_actions</a></li>', html=True)
def test_filters(self):
response = self.client.get('/test_admin/admin/doc/filters/')
# The builtin filter group exists
self.assertContains(response, "<h2>Built-in filters</h2>", count=2, html=True)
# A builtin filter exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-add">add</h3>', html=True)
self.assertContains(response, '<li><a href="#built_in-add">add</a></li>', html=True)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class ValidXHTMLTests(TestCase):
fixtures = ['admin-views-users.xml']
urlbit = 'admin'
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
@override_settings(
TEMPLATE_CONTEXT_PROCESSORS=filter(
lambda t: t != 'django.core.context_processors.i18n',
global_settings.TEMPLATE_CONTEXT_PROCESSORS),
USE_I18N=False,
)
def test_lang_name_present(self):
response = self.client.get('/test_admin/%s/admin_views/' % self.urlbit)
self.assertNotContains(response, ' lang=""')
self.assertNotContains(response, ' xml:lang=""')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls",
USE_THOUSAND_SEPARATOR=True, USE_L10N=True)
class DateHierarchyTests(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
formats.reset_format_cache()
def assert_non_localized_year(self, response, year):
"""Ensure that the year is not localized with
USE_THOUSAND_SEPARATOR. Refs #15234.
"""
self.assertNotContains(response, formats.number_format(year))
def assert_contains_year_link(self, response, date):
self.assertContains(response, '?release_date__year=%d"' % (date.year,))
def assert_contains_month_link(self, response, date):
self.assertContains(
response, '?release_date__month=%d&release_date__year=%d"' % (
date.month, date.year))
def assert_contains_day_link(self, response, date):
self.assertContains(
response, '?release_date__day=%d&'
'release_date__month=%d&release_date__year=%d"' % (
date.day, date.month, date.year))
def test_empty(self):
"""
Ensure that no date hierarchy links display with empty changelist.
"""
response = self.client.get(
reverse('admin:admin_views_podcast_changelist'))
self.assertNotContains(response, 'release_date__year=')
self.assertNotContains(response, 'release_date__month=')
self.assertNotContains(response, 'release_date__day=')
def test_single(self):
"""
Ensure that single day-level date hierarchy appears for single object.
"""
DATE = datetime.date(2000, 6, 30)
Podcast.objects.create(release_date=DATE)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
self.assert_contains_day_link(response, DATE)
self.assert_non_localized_year(response, 2000)
def test_within_month(self):
"""
Ensure that day-level links appear for changelist within single month.
"""
DATES = (datetime.date(2000, 6, 30),
datetime.date(2000, 6, 15),
datetime.date(2000, 6, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
for date in DATES:
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_within_year(self):
"""
Ensure that month-level links appear for changelist within single year.
"""
DATES = (datetime.date(2000, 1, 30),
datetime.date(2000, 3, 15),
datetime.date(2000, 5, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
# no day-level links
self.assertNotContains(response, 'release_date__day=')
for date in DATES:
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_multiple_years(self):
"""
Ensure that year-level links appear for year-spanning changelist.
"""
DATES = (datetime.date(2001, 1, 30),
datetime.date(2003, 3, 15),
datetime.date(2005, 5, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
response = self.client.get(
reverse('admin:admin_views_podcast_changelist'))
# no day/month-level links
self.assertNotContains(response, 'release_date__day=')
self.assertNotContains(response, 'release_date__month=')
for date in DATES:
self.assert_contains_year_link(response, date)
# and make sure GET parameters still behave correctly
for date in DATES:
url = '%s?release_date__year=%d' % (
reverse('admin:admin_views_podcast_changelist'),
date.year)
response = self.client.get(url)
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
url = '%s?release_date__year=%d&release_date__month=%d' % (
reverse('admin:admin_views_podcast_changelist'),
date.year, date.month)
response = self.client.get(url)
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminCustomSaveRelatedTests(TestCase):
"""
Ensure that one can easily customize the way related objects are saved.
Refs #16115.
"""
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def test_should_be_able_to_edit_related_objects_on_add_view(self):
post = {
'child_set-TOTAL_FORMS': '3',
'child_set-INITIAL_FORMS': '0',
'name': 'Josh Stone',
'child_set-0-name': 'Paul',
'child_set-1-name': 'Catherine',
}
self.client.post('/test_admin/admin/admin_views/parent/add/', post)
self.assertEqual(1, Parent.objects.count())
self.assertEqual(2, Child.objects.count())
children_names = list(Child.objects.order_by('name').values_list('name', flat=True))
self.assertEqual('Josh Stone', Parent.objects.latest('id').name)
self.assertEqual(['Catherine Stone', 'Paul Stone'], children_names)
def test_should_be_able_to_edit_related_objects_on_change_view(self):
parent = Parent.objects.create(name='Josh Stone')
paul = Child.objects.create(parent=parent, name='Paul')
catherine = Child.objects.create(parent=parent, name='Catherine')
post = {
'child_set-TOTAL_FORMS': '5',
'child_set-INITIAL_FORMS': '2',
'name': 'Josh Stone',
'child_set-0-name': 'Paul',
'child_set-0-id': paul.id,
'child_set-1-name': 'Catherine',
'child_set-1-id': catherine.id,
}
self.client.post('/test_admin/admin/admin_views/parent/%s/' % parent.id, post)
children_names = list(Child.objects.order_by('name').values_list('name', flat=True))
self.assertEqual('Josh Stone', Parent.objects.latest('id').name)
self.assertEqual(['Catherine Stone', 'Paul Stone'], children_names)
def test_should_be_able_to_edit_related_objects_on_changelist_view(self):
parent = Parent.objects.create(name='Josh Rock')
Child.objects.create(parent=parent, name='Paul')
Child.objects.create(parent=parent, name='Catherine')
post = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': parent.id,
'form-0-name': 'Josh Stone',
'_save': 'Save'
}
self.client.post('/test_admin/admin/admin_views/parent/', post)
children_names = list(Child.objects.order_by('name').values_list('name', flat=True))
self.assertEqual('Josh Stone', Parent.objects.latest('id').name)
self.assertEqual(['Catherine Stone', 'Paul Stone'], children_names)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminViewLogoutTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_client_logout_url_can_be_used_to_login(self):
response = self.client.get('/test_admin/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/logged_out.html')
self.assertEqual(response.request['PATH_INFO'], '/test_admin/admin/logout/')
# we are now logged out
response = self.client.get('/test_admin/admin/logout/')
self.assertEqual(response.status_code, 302) # we should be redirected to the login page.
# follow the redirect and test results.
response = self.client.get('/test_admin/admin/logout/', follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(response.request['PATH_INFO'], '/test_admin/admin/login/')
self.assertContains(response, '<input type="hidden" name="next" value="/test_admin/admin/" />')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminUserMessageTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def send_message(self, level):
"""
Helper that sends a post to the dummy test methods and asserts that a
message with the level has appeared in the response.
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'message_%s' % level,
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/usermessenger/',
action_data, follow=True)
self.assertContains(response,
'<li class="%s">Test %s</li>' % (level, level),
html=True)
@override_settings(MESSAGE_LEVEL=10) # Set to DEBUG for this request
def test_message_debug(self):
self.send_message('debug')
def test_message_info(self):
self.send_message('info')
def test_message_success(self):
self.send_message('success')
def test_message_warning(self):
self.send_message('warning')
def test_message_error(self):
self.send_message('error')
def test_message_extra_tags(self):
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action': 'message_extra_tags',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/usermessenger/',
action_data, follow=True)
self.assertContains(response,
'<li class="extra_tag info">Test tags</li>',
html=True)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminKeepChangeListFiltersTests(TestCase):
fixtures = ['admin-views-users.xml']
admin_site = site
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def assertURLEqual(self, url1, url2):
"""
Assert that two URLs are equal despite the ordering
of their querystring. Refs #22360.
"""
parsed_url1 = urlparse(url1)
path1 = parsed_url1.path
parsed_qs1 = dict(parse_qsl(parsed_url1.query))
parsed_url2 = urlparse(url2)
path2 = parsed_url2.path
parsed_qs2 = dict(parse_qsl(parsed_url2.query))
for parsed_qs in [parsed_qs1, parsed_qs2]:
if '_changelist_filters' in parsed_qs:
changelist_filters = parsed_qs['_changelist_filters']
parsed_filters = dict(parse_qsl(changelist_filters))
parsed_qs['_changelist_filters'] = parsed_filters
self.assertEqual(path1, path2)
self.assertEqual(parsed_qs1, parsed_qs2)
def test_assert_url_equal(self):
# Test equality.
self.assertURLEqual(
'http://testserver/test_admin/admin/auth/user/105/?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0',
'http://testserver/test_admin/admin/auth/user/105/?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0'
)
# Test inequality.
with self.assertRaises(AssertionError):
self.assertURLEqual(
'http://testserver/test_admin/admin/auth/user/105/?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0',
'http://testserver/test_admin/admin/auth/user/105/?_changelist_filters=is_staff__exact%3D1%26is_superuser__exact%3D1'
)
# Ignore scheme and host.
self.assertURLEqual(
'http://testserver/test_admin/admin/auth/user/105/?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0',
'/test_admin/admin/auth/user/105/?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0'
)
# Ignore ordering of querystring.
self.assertURLEqual(
'/test_admin/admin/auth/user/?is_staff__exact=0&is_superuser__exact=0',
'/test_admin/admin/auth/user/?is_superuser__exact=0&is_staff__exact=0'
)
# Ignore ordering of _changelist_filters.
self.assertURLEqual(
'/test_admin/admin/auth/user/105/?_changelist_filters=is_staff__exact%3D0%26is_superuser__exact%3D0',
'/test_admin/admin/auth/user/105/?_changelist_filters=is_superuser__exact%3D0%26is_staff__exact%3D0'
)
def get_changelist_filters(self):
return {
'is_superuser__exact': 0,
'is_staff__exact': 0,
}
def get_changelist_filters_querystring(self):
return urlencode(self.get_changelist_filters())
def get_preserved_filters_querystring(self):
return urlencode({
'_changelist_filters': self.get_changelist_filters_querystring()
})
def get_sample_user_id(self):
return 104
def get_changelist_url(self):
return '%s?%s' % (
reverse('admin:auth_user_changelist',
current_app=self.admin_site.name),
self.get_changelist_filters_querystring(),
)
def get_add_url(self):
return '%s?%s' % (
reverse('admin:auth_user_add',
current_app=self.admin_site.name),
self.get_preserved_filters_querystring(),
)
def get_change_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse('admin:auth_user_change', args=(user_id,),
current_app=self.admin_site.name),
self.get_preserved_filters_querystring(),
)
def get_history_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse('admin:auth_user_history', args=(user_id,),
current_app=self.admin_site.name),
self.get_preserved_filters_querystring(),
)
def get_delete_url(self, user_id=None):
if user_id is None:
user_id = self.get_sample_user_id()
return "%s?%s" % (
reverse('admin:auth_user_delete', args=(user_id,),
current_app=self.admin_site.name),
self.get_preserved_filters_querystring(),
)
def test_changelist_view(self):
response = self.client.get(self.get_changelist_url())
self.assertEqual(response.status_code, 200)
# Check the `change_view` link has the correct querystring.
detail_link = re.search(
'<a href="(.*?)">joepublic</a>',
force_text(response.content)
)
self.assertURLEqual(detail_link.group(1), self.get_change_url())
def test_change_view(self):
# Get the `change_view`.
response = self.client.get(self.get_change_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form enctype="multipart/form-data" action="(.*?)" method="post" id="user_form".*?>',
force_text(response.content)
)
self.assertURLEqual(form_action.group(1), '?%s' % self.get_preserved_filters_querystring())
# Check the history link.
history_link = re.search(
'<a href="(.*?)" class="historylink">History</a>',
force_text(response.content)
)
self.assertURLEqual(history_link.group(1), self.get_history_url())
# Check the delete link.
delete_link = re.search(
'<a href="(.*?)" class="deletelink">Delete</a>',
force_text(response.content)
)
self.assertURLEqual(delete_link.group(1), self.get_delete_url())
# Test redirect on "Save".
post_data = {
'username': 'joepublic',
'last_login_0': '2007-05-30',
'last_login_1': '13:20:10',
'date_joined_0': '2007-05-30',
'date_joined_1': '13:20:10',
}
post_data['_save'] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_changelist_url()
)
post_data.pop('_save')
# Test redirect on "Save and continue".
post_data['_continue'] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_change_url()
)
post_data.pop('_continue')
# Test redirect on "Save and add new".
post_data['_addanother'] = 1
response = self.client.post(self.get_change_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_add_url()
)
post_data.pop('_addanother')
def test_add_view(self):
# Get the `add_view`.
response = self.client.get(self.get_add_url())
self.assertEqual(response.status_code, 200)
# Check the form action.
form_action = re.search(
'<form enctype="multipart/form-data" action="(.*?)" method="post" id="user_form".*?>',
force_text(response.content)
)
self.assertURLEqual(form_action.group(1), '?%s' % self.get_preserved_filters_querystring())
post_data = {
'username': 'dummy',
'password1': 'test',
'password2': 'test',
}
# Test redirect on "Save".
post_data['_save'] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_change_url(User.objects.latest('pk').pk)
)
post_data.pop('_save')
# Test redirect on "Save and continue".
post_data['username'] = 'dummy2'
post_data['_continue'] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_change_url(User.objects.latest('pk').pk)
)
post_data.pop('_continue')
# Test redirect on "Save and add new".
post_data['username'] = 'dummy3'
post_data['_addanother'] = 1
response = self.client.post(self.get_add_url(), data=post_data)
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_add_url()
)
post_data.pop('_addanother')
def test_delete_view(self):
# Test redirect on "Delete".
response = self.client.post(self.get_delete_url(), {'post': 'yes'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.url,
self.get_changelist_url()
)
def test_url_prefix(self):
context = {
'preserved_filters': self.get_preserved_filters_querystring(),
'opts': User._meta,
}
url = reverse('admin:auth_user_changelist', current_app=self.admin_site.name)
self.assertURLEqual(
self.get_changelist_url(),
add_preserved_filters(context, url),
)
original_prefix = get_script_prefix()
try:
set_script_prefix('/prefix/')
url = reverse('admin:auth_user_changelist', current_app=self.admin_site.name)
self.assertURLEqual(
self.get_changelist_url(),
add_preserved_filters(context, url),
)
finally:
set_script_prefix(original_prefix)
class NamespacedAdminKeepChangeListFiltersTests(AdminKeepChangeListFiltersTests):
admin_site = site2
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class TestLabelVisibility(TestCase):
""" #11277 -Labels of hidden fields in admin were not hidden. """
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def test_all_fields_visible(self):
response = self.client.get('/test_admin/admin/admin_views/emptymodelvisible/add/')
self.assert_fieldline_visible(response)
self.assert_field_visible(response, 'first')
self.assert_field_visible(response, 'second')
def test_all_fields_hidden(self):
response = self.client.get('/test_admin/admin/admin_views/emptymodelhidden/add/')
self.assert_fieldline_hidden(response)
self.assert_field_hidden(response, 'first')
self.assert_field_hidden(response, 'second')
def test_mixin(self):
response = self.client.get('/test_admin/admin/admin_views/emptymodelmixin/add/')
self.assert_fieldline_visible(response)
self.assert_field_hidden(response, 'first')
self.assert_field_visible(response, 'second')
def assert_field_visible(self, response, field_name):
self.assertContains(response, '<div class="field-box field-%s">' % field_name)
def assert_field_hidden(self, response, field_name):
self.assertContains(response, '<div class="field-box field-%s hidden">' % field_name)
def assert_fieldline_visible(self, response):
self.assertContains(response, '<div class="form-row field-first field-second">')
def assert_fieldline_hidden(self, response):
self.assertContains(response, '<div class="form-row hidden')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class AdminViewOnSiteTests(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-restaurants.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_add_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data
"""
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {"family_name": "Test1",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": "",
"dependentchild_set-0-family_name": "Test2"}
response = self.client.post('/test_admin/admin/admin_views/parentwithdependentchildren/add/',
post_data)
# just verifying the parent form failed validation, as expected --
# this isn't the regression test
self.assertIn('some_required_info', response.context['adminform'].form.errors)
# actual regression test
for error_set in response.context['inline_admin_formset'].formset.errors:
self.assertEqual(['Children must share a family name with their parents in this contrived test case'],
error_set.get('__all__'))
def test_change_view_form_and_formsets_run_validation(self):
"""
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data
"""
pwdc = ParentWithDependentChildren.objects.create(some_required_info=6,
family_name="Test1")
# The form validation should fail because 'some_required_info' is
# not included on the parent form, and the family_name of the parent
# does not match that of the child
post_data = {"family_name": "Test2",
"dependentchild_set-TOTAL_FORMS": "1",
"dependentchild_set-INITIAL_FORMS": "0",
"dependentchild_set-MAX_NUM_FORMS": "1",
"dependentchild_set-0-id": "",
"dependentchild_set-0-parent": str(pwdc.id),
"dependentchild_set-0-family_name": "Test1"}
response = self.client.post('/test_admin/admin/admin_views/parentwithdependentchildren/%d/'
% pwdc.id, post_data)
# just verifying the parent form failed validation, as expected --
# this isn't the regression test
self.assertIn('some_required_info', response.context['adminform'].form.errors)
# actual regression test
for error_set in response.context['inline_admin_formset'].formset.errors:
self.assertEqual(['Children must share a family name with their parents in this contrived test case'],
error_set.get('__all__'))
def test_check(self):
"Ensure that the view_on_site value is either a boolean or a callable"
try:
CityAdmin.view_on_site = True
self.assertEqual(CityAdmin.check(City), [])
CityAdmin.view_on_site = False
self.assertEqual(CityAdmin.check(City), [])
CityAdmin.view_on_site = lambda obj: obj.get_absolute_url()
self.assertEqual(CityAdmin.check(City), [])
CityAdmin.view_on_site = []
self.assertEqual(CityAdmin.check(City), [
Error(
"The value of 'view_on_site' must be a callable or a boolean value.",
hint=None,
obj=CityAdmin,
id='admin.E025',
),
])
finally:
# Restore the original values for the benefit of other tests.
CityAdmin.view_on_site = True
def test_false(self):
"Ensure that the 'View on site' button is not displayed if view_on_site is False"
response = self.client.get('/test_admin/admin/admin_views/restaurant/1/')
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertNotContains(response,
'"/test_admin/admin/r/%s/1/"' % content_type_pk,
)
def test_true(self):
"Ensure that the default behavior is followed if view_on_site is True"
response = self.client.get('/test_admin/admin/admin_views/city/1/')
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertContains(response,
'"/test_admin/admin/r/%s/1/"' % content_type_pk,
)
def test_callable(self):
"Ensure that the right link is displayed if view_on_site is a callable"
response = self.client.get('/test_admin/admin/admin_views/worker/1/')
worker = Worker.objects.get(pk=1)
self.assertContains(response,
'"/worker/%s/%s/"' % (worker.surname, worker.name),
)
def test_missing_get_absolute_url(self):
"Ensure None is returned if model doesn't have get_absolute_url"
model_admin = ModelAdmin(Worker, None)
self.assertIsNone(model_admin.get_view_on_site_url(Worker()))
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF="admin_views.urls")
class InlineAdminViewOnSiteTest(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-restaurants.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_false(self):
"Ensure that the 'View on site' button is not displayed if view_on_site is False"
response = self.client.get('/test_admin/admin/admin_views/state/1/')
content_type_pk = ContentType.objects.get_for_model(City).pk
self.assertNotContains(response,
'/test_admin/admin/r/%s/1/' % content_type_pk,
)
def test_true(self):
"Ensure that the 'View on site' button is displayed if view_on_site is True"
response = self.client.get('/test_admin/admin/admin_views/city/1/')
content_type_pk = ContentType.objects.get_for_model(Restaurant).pk
self.assertContains(response,
'/test_admin/admin/r/%s/1/' % content_type_pk,
)
def test_callable(self):
"Ensure that the right link is displayed if view_on_site is a callable"
response = self.client.get('/test_admin/admin/admin_views/restaurant/1/')
worker = Worker.objects.get(pk=1)
self.assertContains(response,
'"/worker_inline/%s/%s/"' % (worker.surname, worker.name),
)
class AdminGenericRelationTests(TestCase):
def test_generic_relation_fk_list_filter(self):
"""
Validates a model with a generic relation to a model with
a foreign key can specify the generic+fk relationship
path as a list_filter. See trac #21428.
"""
class GenericFKAdmin(ModelAdmin):
list_filter = ('tags__content_type',)
validator = ModelAdminValidator()
try:
validator.validate_list_filter(GenericFKAdmin, Plot)
except ImproperlyConfigured:
self.fail("Couldn't validate a GenericRelation -> FK path in ModelAdmin.list_filter")
@override_settings(ROOT_URLCONF="admin_views.urls")
class TestEtagWithAdminView(TestCase):
# See https://code.djangoproject.com/ticket/16003
def test_admin(self):
with self.settings(USE_ETAGS=False):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertFalse(response.has_header('ETag'))
with self.settings(USE_ETAGS=True):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response.has_header('ETag'))
| 45.74357
| 251
| 0.64894
|
07c7cacc89e134a0fd7bfe47314b33e0f926a649
| 3,292
|
py
|
Python
|
py/selenium/webdriver/common/desired_capabilities.py
|
weilandia/selenium
|
949f05eb9a0dad3fddbf1fb34c3b0164deba1db7
|
[
"Apache-2.0"
] | 3
|
2019-04-21T14:21:16.000Z
|
2019-04-27T03:24:01.000Z
|
py/selenium/webdriver/common/desired_capabilities.py
|
weilandia/selenium
|
949f05eb9a0dad3fddbf1fb34c3b0164deba1db7
|
[
"Apache-2.0"
] | 6
|
2019-09-26T23:37:05.000Z
|
2019-10-11T20:57:06.000Z
|
py/selenium/webdriver/common/desired_capabilities.py
|
lucianodgs/selenium
|
e2b2b97de0d9a6cc86563c866a9361237519159f
|
[
"Apache-2.0"
] | 2
|
2019-06-27T19:40:39.000Z
|
2019-11-25T23:36:37.000Z
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The Desired Capabilities implementation.
"""
class DesiredCapabilities(object):
"""
Set of default supported desired capabilities.
Use this as a starting point for creating a desired capabilities object for
requesting remote webdrivers for connecting to selenium server or selenium grid.
Usage Example::
from selenium import webdriver
selenium_grid_url = "http://198.0.0.1:4444/wd/hub"
# Create a desired capabilities object as a starting point.
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['platform'] = "WINDOWS"
capabilities['version'] = "10"
# Instantiate an instance of Remote WebDriver with the desired capabilities.
driver = webdriver.Remote(desired_capabilities=capabilities,
command_executor=selenium_grid_url)
Note: Always use '.copy()' on the DesiredCapabilities object to avoid the side
effects of altering the Global class instance.
"""
FIREFOX = {
"browserName": "firefox",
"acceptInsecureCerts": True,
}
INTERNETEXPLORER = {
"browserName": "internet explorer",
"version": "",
"platform": "WINDOWS",
}
EDGE = {
"browserName": "MicrosoftEdge",
"version": "",
"platform": "WINDOWS"
}
CHROME = {
"browserName": "chrome",
"version": "",
"platform": "ANY",
}
OPERA = {
"browserName": "opera",
"version": "",
"platform": "ANY",
}
SAFARI = {
"browserName": "safari",
"version": "",
"platform": "MAC",
}
HTMLUNIT = {
"browserName": "htmlunit",
"version": "",
"platform": "ANY",
}
HTMLUNITWITHJS = {
"browserName": "htmlunit",
"version": "firefox",
"platform": "ANY",
"javascriptEnabled": True,
}
IPHONE = {
"browserName": "iPhone",
"version": "",
"platform": "MAC",
}
IPAD = {
"browserName": "iPad",
"version": "",
"platform": "MAC",
}
ANDROID = {
"browserName": "android",
"version": "",
"platform": "ANDROID",
}
PHANTOMJS = {
"browserName": "phantomjs",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
WEBKITGTK = {
"browserName": "MiniBrowser",
"version": "",
"platform": "ANY",
}
| 25.71875
| 84
| 0.595383
|
3c9ba4d2fa93151744e77256710e5f58b2f35110
| 20,137
|
py
|
Python
|
bot/exts/evergreen/help.py
|
blankRiot96/sir-lancebot
|
25175a0d33886902576333756c84bd195af563ea
|
[
"MIT"
] | 1
|
2022-01-06T01:23:13.000Z
|
2022-01-06T01:23:13.000Z
|
bot/exts/evergreen/help.py
|
blankRiot96/sir-lancebot
|
25175a0d33886902576333756c84bd195af563ea
|
[
"MIT"
] | null | null | null |
bot/exts/evergreen/help.py
|
blankRiot96/sir-lancebot
|
25175a0d33886902576333756c84bd195af563ea
|
[
"MIT"
] | null | null | null |
# Help command from Python bot. All commands that will be added to there in futures should be added to here too.
import asyncio
import itertools
import logging
from contextlib import suppress
from typing import List, NamedTuple, Union
from discord import Colour, Embed, HTTPException, Message, Reaction, User
from discord.ext import commands
from discord.ext.commands import CheckFailure, Cog as DiscordCog, Command, Context
from fuzzywuzzy import fuzz, process
from bot import constants
from bot.bot import Bot
from bot.constants import Emojis
from bot.utils.pagination import (
FIRST_EMOJI, LAST_EMOJI,
LEFT_EMOJI, LinePaginator, RIGHT_EMOJI,
)
DELETE_EMOJI = Emojis.trashcan
REACTIONS = {
FIRST_EMOJI: "first",
LEFT_EMOJI: "back",
RIGHT_EMOJI: "next",
LAST_EMOJI: "end",
DELETE_EMOJI: "stop",
}
class Cog(NamedTuple):
"""Show information about a Cog's name, description and commands."""
name: str
description: str
commands: List[Command]
log = logging.getLogger(__name__)
class HelpQueryNotFound(ValueError):
"""
Raised when a HelpSession Query doesn't match a command or cog.
Contains the custom attribute of ``possible_matches``.
Instances of this object contain a dictionary of any command(s) that were close to matching the
query, where keys are the possible matched command names and values are the likeness match scores.
"""
def __init__(self, arg: str, possible_matches: dict = None):
super().__init__(arg)
self.possible_matches = possible_matches
class HelpSession:
"""
An interactive session for bot and command help output.
Expected attributes include:
* title: str
The title of the help message.
* query: Union[discord.ext.commands.Bot, discord.ext.commands.Command]
* description: str
The description of the query.
* pages: list[str]
A list of the help content split into manageable pages.
* message: `discord.Message`
The message object that's showing the help contents.
* destination: `discord.abc.Messageable`
Where the help message is to be sent to.
Cogs can be grouped into custom categories. All cogs with the same category will be displayed
under a single category name in the help output. Custom categories are defined inside the cogs
as a class attribute named `category`. A description can also be specified with the attribute
`category_description`. If a description is not found in at least one cog, the default will be
the regular description (class docstring) of the first cog found in the category.
"""
def __init__(
self,
ctx: Context,
*command,
cleanup: bool = False,
only_can_run: bool = True,
show_hidden: bool = False,
max_lines: int = 15
):
"""Creates an instance of the HelpSession class."""
self._ctx = ctx
self._bot = ctx.bot
self.title = "Command Help"
# set the query details for the session
if command:
query_str = " ".join(command)
self.query = self._get_query(query_str)
self.description = self.query.description or self.query.help
else:
self.query = ctx.bot
self.description = self.query.description
self.author = ctx.author
self.destination = ctx.channel
# set the config for the session
self._cleanup = cleanup
self._only_can_run = only_can_run
self._show_hidden = show_hidden
self._max_lines = max_lines
# init session states
self._pages = None
self._current_page = 0
self.message = None
self._timeout_task = None
self.reset_timeout()
def _get_query(self, query: str) -> Union[Command, Cog]:
"""Attempts to match the provided query with a valid command or cog."""
command = self._bot.get_command(query)
if command:
return command
# Find all cog categories that match.
cog_matches = []
description = None
for cog in self._bot.cogs.values():
if hasattr(cog, "category") and cog.category == query:
cog_matches.append(cog)
if hasattr(cog, "category_description"):
description = cog.category_description
# Try to search by cog name if no categories match.
if not cog_matches:
cog = self._bot.cogs.get(query)
# Don't consider it a match if the cog has a category.
if cog and not hasattr(cog, "category"):
cog_matches = [cog]
if cog_matches:
cog = cog_matches[0]
cmds = (cog.get_commands() for cog in cog_matches) # Commands of all cogs
return Cog(
name=cog.category if hasattr(cog, "category") else cog.qualified_name,
description=description or cog.description,
commands=tuple(itertools.chain.from_iterable(cmds)) # Flatten the list
)
self._handle_not_found(query)
def _handle_not_found(self, query: str) -> None:
"""
Handles when a query does not match a valid command or cog.
Will pass on possible close matches along with the `HelpQueryNotFound` exception.
"""
# Combine command and cog names
choices = list(self._bot.all_commands) + list(self._bot.cogs)
result = process.extractBests(query, choices, scorer=fuzz.ratio, score_cutoff=90)
raise HelpQueryNotFound(f'Query "{query}" not found.', dict(result))
async def timeout(self, seconds: int = 30) -> None:
"""Waits for a set number of seconds, then stops the help session."""
await asyncio.sleep(seconds)
await self.stop()
def reset_timeout(self) -> None:
"""Cancels the original timeout task and sets it again from the start."""
# cancel original if it exists
if self._timeout_task:
if not self._timeout_task.cancelled():
self._timeout_task.cancel()
# recreate the timeout task
self._timeout_task = self._bot.loop.create_task(self.timeout())
async def on_reaction_add(self, reaction: Reaction, user: User) -> None:
"""Event handler for when reactions are added on the help message."""
# ensure it was the relevant session message
if reaction.message.id != self.message.id:
return
# ensure it was the session author who reacted
if user.id != self.author.id:
return
emoji = str(reaction.emoji)
# check if valid action
if emoji not in REACTIONS:
return
self.reset_timeout()
# Run relevant action method
action = getattr(self, f"do_{REACTIONS[emoji]}", None)
if action:
await action()
# remove the added reaction to prep for re-use
with suppress(HTTPException):
await self.message.remove_reaction(reaction, user)
async def on_message_delete(self, message: Message) -> None:
"""Closes the help session when the help message is deleted."""
if message.id == self.message.id:
await self.stop()
async def prepare(self) -> None:
"""Sets up the help session pages, events, message and reactions."""
await self.build_pages()
self._bot.add_listener(self.on_reaction_add)
self._bot.add_listener(self.on_message_delete)
await self.update_page()
self.add_reactions()
def add_reactions(self) -> None:
"""Adds the relevant reactions to the help message based on if pagination is required."""
# if paginating
if len(self._pages) > 1:
for reaction in REACTIONS:
self._bot.loop.create_task(self.message.add_reaction(reaction))
# if single-page
else:
self._bot.loop.create_task(self.message.add_reaction(DELETE_EMOJI))
def _category_key(self, cmd: Command) -> str:
"""
Returns a cog name of a given command for use as a key for `sorted` and `groupby`.
A zero width space is used as a prefix for results with no cogs to force them last in ordering.
"""
if cmd.cog:
try:
if cmd.cog.category:
return f"**{cmd.cog.category}**"
except AttributeError:
pass
return f"**{cmd.cog_name}**"
else:
return "**\u200bNo Category:**"
def _get_command_params(self, cmd: Command) -> str:
"""
Returns the command usage signature.
This is a custom implementation of `command.signature` in order to format the command
signature without aliases.
"""
results = []
for name, param in cmd.clean_params.items():
# if argument has a default value
if param.default is not param.empty:
if isinstance(param.default, str):
show_default = param.default
else:
show_default = param.default is not None
# if default is not an empty string or None
if show_default:
results.append(f"[{name}={param.default}]")
else:
results.append(f"[{name}]")
# if variable length argument
elif param.kind == param.VAR_POSITIONAL:
results.append(f"[{name}...]")
# if required
else:
results.append(f"<{name}>")
return f"{cmd.name} {' '.join(results)}"
async def build_pages(self) -> None:
"""Builds the list of content pages to be paginated through in the help message, as a list of str."""
# Use LinePaginator to restrict embed line height
paginator = LinePaginator(prefix="", suffix="", max_lines=self._max_lines)
# show signature if query is a command
if isinstance(self.query, commands.Command):
await self._add_command_signature(paginator)
if isinstance(self.query, Cog):
paginator.add_line(f"**{self.query.name}**")
if self.description:
paginator.add_line(f"*{self.description}*")
# list all children commands of the queried object
if isinstance(self.query, (commands.GroupMixin, Cog)):
await self._list_child_commands(paginator)
self._pages = paginator.pages
async def _add_command_signature(self, paginator: LinePaginator) -> None:
prefix = constants.Client.prefix
signature = self._get_command_params(self.query)
parent = self.query.full_parent_name + " " if self.query.parent else ""
paginator.add_line(f"**```{prefix}{parent}{signature}```**")
aliases = [f"`{alias}`" if not parent else f"`{parent} {alias}`" for alias in self.query.aliases]
aliases += [f"`{alias}`" for alias in getattr(self.query, "root_aliases", ())]
aliases = ", ".join(sorted(aliases))
if aliases:
paginator.add_line(f"**Can also use:** {aliases}\n")
if not await self.query.can_run(self._ctx):
paginator.add_line("***You cannot run this command.***\n")
async def _list_child_commands(self, paginator: LinePaginator) -> None:
# remove hidden commands if session is not wanting hiddens
if not self._show_hidden:
filtered = [c for c in self.query.commands if not c.hidden]
else:
filtered = self.query.commands
# if after filter there are no commands, finish up
if not filtered:
self._pages = paginator.pages
return
if isinstance(self.query, Cog):
grouped = (("**Commands:**", self.query.commands),)
elif isinstance(self.query, commands.Command):
grouped = (("**Subcommands:**", self.query.commands),)
# otherwise sort and organise all commands into categories
else:
cat_sort = sorted(filtered, key=self._category_key)
grouped = itertools.groupby(cat_sort, key=self._category_key)
for category, cmds in grouped:
await self._format_command_category(paginator, category, list(cmds))
async def _format_command_category(self, paginator: LinePaginator, category: str, cmds: List[Command]) -> None:
cmds = sorted(cmds, key=lambda c: c.name)
cat_cmds = []
for command in cmds:
cat_cmds += await self._format_command(command)
# state var for if the category should be added next
print_cat = 1
new_page = True
for details in cat_cmds:
# keep details together, paginating early if it won"t fit
lines_adding = len(details.split("\n")) + print_cat
if paginator._linecount + lines_adding > self._max_lines:
paginator._linecount = 0
new_page = True
paginator.close_page()
# new page so print category title again
print_cat = 1
if print_cat:
if new_page:
paginator.add_line("")
paginator.add_line(category)
print_cat = 0
paginator.add_line(details)
async def _format_command(self, command: Command) -> List[str]:
# skip if hidden and hide if session is set to
if command.hidden and not self._show_hidden:
return []
# Patch to make the !help command work outside of #bot-commands again
# This probably needs a proper rewrite, but this will make it work in
# the mean time.
try:
can_run = await command.can_run(self._ctx)
except CheckFailure:
can_run = False
# see if the user can run the command
strikeout = ""
if not can_run:
# skip if we don't show commands they can't run
if self._only_can_run:
return []
strikeout = "~~"
if isinstance(self.query, commands.Command):
prefix = ""
else:
prefix = constants.Client.prefix
signature = self._get_command_params(command)
info = f"{strikeout}**`{prefix}{signature}`**{strikeout}"
# handle if the command has no docstring
short_doc = command.short_doc or "No details provided"
return [f"{info}\n*{short_doc}*"]
def embed_page(self, page_number: int = 0) -> Embed:
"""Returns an Embed with the requested page formatted within."""
embed = Embed()
if isinstance(self.query, (commands.Command, Cog)) and page_number > 0:
title = f'Command Help | "{self.query.name}"'
else:
title = self.title
embed.set_author(name=title, icon_url=constants.Icons.questionmark)
embed.description = self._pages[page_number]
page_count = len(self._pages)
if page_count > 1:
embed.set_footer(text=f"Page {self._current_page+1} / {page_count}")
return embed
async def update_page(self, page_number: int = 0) -> None:
"""Sends the intial message, or changes the existing one to the given page number."""
self._current_page = page_number
embed_page = self.embed_page(page_number)
if not self.message:
self.message = await self.destination.send(embed=embed_page)
else:
await self.message.edit(embed=embed_page)
@classmethod
async def start(cls, ctx: Context, *command, **options) -> "HelpSession":
"""
Create and begin a help session based on the given command context.
Available options kwargs:
* cleanup: Optional[bool]
Set to `True` to have the message deleted on session end. Defaults to `False`.
* only_can_run: Optional[bool]
Set to `True` to hide commands the user can't run. Defaults to `False`.
* show_hidden: Optional[bool]
Set to `True` to include hidden commands. Defaults to `False`.
* max_lines: Optional[int]
Sets the max number of lines the paginator will add to a single page. Defaults to 20.
"""
session = cls(ctx, *command, **options)
await session.prepare()
return session
async def stop(self) -> None:
"""Stops the help session, removes event listeners and attempts to delete the help message."""
self._bot.remove_listener(self.on_reaction_add)
self._bot.remove_listener(self.on_message_delete)
# ignore if permission issue, or the message doesn't exist
with suppress(HTTPException, AttributeError):
if self._cleanup:
await self.message.delete()
else:
await self.message.clear_reactions()
@property
def is_first_page(self) -> bool:
"""Check if session is currently showing the first page."""
return self._current_page == 0
@property
def is_last_page(self) -> bool:
"""Check if the session is currently showing the last page."""
return self._current_page == (len(self._pages)-1)
async def do_first(self) -> None:
"""Event that is called when the user requests the first page."""
if not self.is_first_page:
await self.update_page(0)
async def do_back(self) -> None:
"""Event that is called when the user requests the previous page."""
if not self.is_first_page:
await self.update_page(self._current_page-1)
async def do_next(self) -> None:
"""Event that is called when the user requests the next page."""
if not self.is_last_page:
await self.update_page(self._current_page+1)
async def do_end(self) -> None:
"""Event that is called when the user requests the last page."""
if not self.is_last_page:
await self.update_page(len(self._pages)-1)
async def do_stop(self) -> None:
"""Event that is called when the user requests to stop the help session."""
await self.message.delete()
class Help(DiscordCog):
"""Custom Embed Pagination Help feature."""
@commands.command("help")
async def new_help(self, ctx: Context, *commands) -> None:
"""Shows Command Help."""
try:
await HelpSession.start(ctx, *commands)
except HelpQueryNotFound as error:
embed = Embed()
embed.colour = Colour.red()
embed.title = str(error)
if error.possible_matches:
matches = "\n".join(error.possible_matches.keys())
embed.description = f"**Did you mean:**\n`{matches}`"
await ctx.send(embed=embed)
def unload(bot: Bot) -> None:
"""
Reinstates the original help command.
This is run if the cog raises an exception on load, or if the extension is unloaded.
"""
bot.remove_command("help")
bot.add_command(bot._old_help)
def setup(bot: Bot) -> None:
"""
The setup for the help extension.
This is called automatically on `bot.load_extension` being run.
Stores the original help command instance on the `bot._old_help` attribute for later
reinstatement, before removing it from the command registry so the new help command can be
loaded successfully.
If an exception is raised during the loading of the cog, `unload` will be called in order to
reinstate the original help command.
"""
bot._old_help = bot.get_command("help")
bot.remove_command("help")
try:
bot.add_cog(Help())
except Exception:
unload(bot)
raise
def teardown(bot: Bot) -> None:
"""
The teardown for the help extension.
This is called automatically on `bot.unload_extension` being run.
Calls `unload` in order to reinstate the original help command.
"""
unload(bot)
| 35.767318
| 115
| 0.619059
|
4c63689b56d0efdbfeb93cd21dfc503f89e9bf35
| 6,123
|
py
|
Python
|
ingest_results/score.py
|
bzinberg/MCS
|
495562a0d4d0b1e3679b78f0f6fb568a1b9a9ae2
|
[
"Apache-2.0"
] | 1
|
2020-03-19T18:44:01.000Z
|
2020-03-19T18:44:01.000Z
|
ingest_results/score.py
|
bzinberg/MCS
|
495562a0d4d0b1e3679b78f0f6fb568a1b9a9ae2
|
[
"Apache-2.0"
] | null | null | null |
ingest_results/score.py
|
bzinberg/MCS
|
495562a0d4d0b1e3679b78f0f6fb568a1b9a9ae2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# author: Mathieu Bernard <mathieu.a.bernard@inria.fr>
"""Evaluation script for the Intuitive Physics Challenge
Execute the script as follow::
./score.py input_dir output_dir
The `input_dir` MUST HAVE the two subdirectories `res` and `ref` and
the files `res/answer.txt` and `ref/answer.txt`.
The `output_dir` MUST BE an existing directory. The file
`output_dir/scores.txt` will be created (or overwritted if existing;)
"""
import argparse
import collections
import os
import numpy as np
from sklearn.metrics import roc_auc_score
def _score_relative(submitted, reference):
"""Computes the relative error rate
Equation 1 of https://arxiv.org/pdf/1803.07616.pdf
"""
N = len(submitted)
score = 0
for scene in reference.keys():
sub, ref = submitted[scene], reference[scene]
pos, imp = 0, 0
for k in ('1', '2', '3', '4'):
if ref[k] == 1: # possible movie
pos += sub[k]
else: # impossible movie
imp += sub[k]
if pos < imp: # increment the relative error score
score += 1
# cast to float in case we are running python2
return float(score) / float(N)
def _score_absolute(submitted, reference):
"""Computes the absolute error rate
Equation 2 of https://arxiv.org/pdf/1803.07616.pdf
"""
y_true = np.asarray([
y for k, v in sorted(reference.items())
for _, y in sorted(v.items())], dtype=np.float32)
y_score = np.asarray([
y for k, v in sorted(submitted.items())
for _, y in sorted(v.items())], dtype=np.float32)
return 1.0 - roc_auc_score(y_true, y_score)
def score_per_block(submitted, reference):
sub_occluded = {k: v for k, v in submitted.items() if 'occluded' in k}
sub_visible = {k: v for k, v in submitted.items() if 'visible' in k}
ref_occluded = {k: v for k, v in reference.items() if 'occluded' in k}
ref_visible = {k: v for k, v in reference.items() if 'visible' in k}
return {
'visible': {
'relative': _score_relative(sub_visible, ref_visible),
'absolute': _score_absolute(sub_visible, ref_visible),
},
'occluded': {
'relative': _score_relative(sub_occluded, ref_occluded),
'absolute': _score_absolute(sub_occluded, ref_occluded),
},
'all': {
'relative': _score_relative(submitted, reference),
'absolute': _score_absolute(submitted, reference),
}}
def score(submitted, reference):
"""Computes the evaluation scores
The scores are computed for all scenes, visible scenes and
occluded scenes. For each category the absolute and relative error
rate are evaluated and returned as a dictionary.
"""
assert sorted(submitted.keys()) == sorted(reference.keys())
return {block: score_per_block(
{k: v for k, v in submitted.items() if block in k},
{k: v for k, v in reference.items() if block in k})
for block in ('O1', 'O2', 'O3')}
def load_answer(path):
"""Returns the content of `path`/answer.txt as a dict
Returns
-------
answer : dict
The output dict is structured as follow::
{scene: {1: p_1, 2: p_2, 3: p_3, 4: p_4}}
where p_i is the plausibility score for the associated movie.
Raises
------
ValueError
If `path`/answer.txt does not exist
AssertionError
If the answers file is badly formatted
"""
if not os.path.isdir(path):
raise ValueError('{} does not exist'.format(path))
answer_file = os.path.join(path, 'answer.txt')
if not os.path.isfile(answer_file):
raise ValueError('{} does not exist'.format(answer_file))
answer = collections.defaultdict(dict)
for line in open(answer_file, 'r'):
split_line = line.split()
assert len(split_line) == 2
plausibility = float(split_line[1])
assert 0 <= plausibility <= 1
header = split_line[0].split('/')
scene_id = '/'.join(header[:-1])
movie_id = header[-1]
assert movie_id in ('1', '2', '3', '4')
answer[scene_id][movie_id] = plausibility
for v in answer.values():
assert sorted(v.keys()) == ['1', '2', '3', '4']
return answer
def build_html(submitted):
header = '<!DOCTYPE html>\n<html>\n<body>\n\n<p>\n'
footer = '</p></body></html>\n'
html = ''
for k, v in sorted(submitted.items()):
for w, x in sorted(v.items()):
html += '{}_{}: {}<br>\n'.format(k, w, x)
return header + html + footer
def parse_arguments():
"""Parses command-line arguments"""
parser = argparse.ArgumentParser(
description='scoring program for the IntPhys challenge')
parser.add_argument(
'input_dir',
help='directory containing reference and submission data')
parser.add_argument(
'output_dir',
help='where the scores.txt file is written by the scoring program')
return parser.parse_args()
def main():
"""Entry point of the IntPhys evaluation program"""
args = parse_arguments()
# load the submitted and reference data
input_dir = args.input_dir
submitted = load_answer(os.path.join(input_dir, 'res'))
reference = load_answer(os.path.join(input_dir, 'ref'))
output_dir = args.output_dir
if not os.path.isdir(output_dir):
raise ValueError('{} does not exist'.format(output_dir))
# build the html page with detailed results
html_file = os.path.join(output_dir, 'scores.html')
with open(html_file, 'w') as fout:
fout.write(build_html(submitted))
# compute the scores
scores = score(submitted, reference)
# write the final scores.txt file
scores_file = os.path.join(output_dir, 'scores.txt')
with open(scores_file, 'w') as fout:
for k, v in sorted(scores.items()):
for w, x in v.items():
for y, z in x.items():
fout.write('{}_{}_{}: {}\n'.format(k, w, y, z))
if __name__ == '__main__':
main()
| 28.882075
| 75
| 0.617671
|
772aa2ebea13466404604afafa4bb20aaf5b53b1
| 55,202
|
py
|
Python
|
run_project_tests.py
|
jonaslb/meson
|
8133a7b9a4b8f0686fbc479aa2d64e41c85a979b
|
[
"Apache-2.0"
] | null | null | null |
run_project_tests.py
|
jonaslb/meson
|
8133a7b9a4b8f0686fbc479aa2d64e41c85a979b
|
[
"Apache-2.0"
] | null | null | null |
run_project_tests.py
|
jonaslb/meson
|
8133a7b9a4b8f0686fbc479aa2d64e41c85a979b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2012-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent.futures import ProcessPoolExecutor, CancelledError
from enum import Enum
from io import StringIO
from pathlib import Path, PurePath
import argparse
import functools
import itertools
import json
import multiprocessing
import os
import re
import shlex
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import typing as T
import xml.etree.ElementTree as ET
from mesonbuild import build
from mesonbuild import environment
from mesonbuild import compilers
from mesonbuild import mesonlib
from mesonbuild import mlog
from mesonbuild import mtest
from mesonbuild.build import ConfigurationData
from mesonbuild.mesonlib import MachineChoice, Popen_safe, TemporaryDirectoryWinProof
from mesonbuild.coredata import backendlist, version as meson_version
from run_tests import get_fake_options, run_configure, get_meson_script
from run_tests import get_backend_commands, get_backend_args_for_dir, Backend
from run_tests import ensure_backend_detects_changes
from run_tests import guess_backend
ALL_TESTS = ['cmake', 'common', 'native', 'warning-meson', 'failing-meson', 'failing-build', 'failing-test',
'keyval', 'platform-osx', 'platform-windows', 'platform-linux',
'java', 'C#', 'vala', 'rust', 'd', 'objective c', 'objective c++',
'fortran', 'swift', 'cuda', 'python3', 'python', 'fpga', 'frameworks', 'nasm', 'wasm'
]
class BuildStep(Enum):
configure = 1
build = 2
test = 3
install = 4
clean = 5
validate = 6
class TestResult(BaseException):
def __init__(self, cicmds):
self.msg = '' # empty msg indicates test success
self.stdo = ''
self.stde = ''
self.mlog = ''
self.cicmds = cicmds
self.conftime = 0
self.buildtime = 0
self.testtime = 0
def add_step(self, step, stdo, stde, mlog='', time=0):
self.step = step
self.stdo += stdo
self.stde += stde
self.mlog += mlog
if step == BuildStep.configure:
self.conftime = time
elif step == BuildStep.build:
self.buildtime = time
elif step == BuildStep.test:
self.testtime = time
def fail(self, msg):
self.msg = msg
class InstalledFile:
def __init__(self, raw: T.Dict[str, str]):
self.path = raw['file']
self.typ = raw['type']
self.platform = raw.get('platform', None)
self.language = raw.get('language', 'c') # type: str
version = raw.get('version', '') # type: str
if version:
self.version = version.split('.') # type: T.List[str]
else:
# split on '' will return [''], we want an empty list though
self.version = []
def get_path(self, compiler: str, env: environment.Environment) -> T.Optional[Path]:
p = Path(self.path)
canonical_compiler = compiler
if ((compiler in ['clang-cl', 'intel-cl']) or
(env.machines.host.is_windows() and compiler in {'pgi', 'dmd', 'ldc'})):
canonical_compiler = 'msvc'
has_pdb = False
if self.language in {'c', 'cpp'}:
has_pdb = canonical_compiler == 'msvc'
elif self.language == 'd':
# dmd's optlink does not genearte pdb iles
has_pdb = env.coredata.compilers.host['d'].linker.id in {'link', 'lld-link'}
# Abort if the platform does not match
matches = {
'msvc': canonical_compiler == 'msvc',
'gcc': canonical_compiler != 'msvc',
'cygwin': env.machines.host.is_cygwin(),
'!cygwin': not env.machines.host.is_cygwin(),
}.get(self.platform or '', True)
if not matches:
return None
# Handle the different types
if self.typ in ['file', 'dir']:
return p
elif self.typ == 'shared_lib':
if env.machines.host.is_windows() or env.machines.host.is_cygwin():
# Windows only has foo.dll and foo-X.dll
if len(self.version) > 1:
return None
if self.version:
p = p.with_name('{}-{}'.format(p.name, self.version[0]))
return p.with_suffix('.dll')
p = p.with_name('lib{}'.format(p.name))
if env.machines.host.is_darwin():
# MacOS only has libfoo.dylib and libfoo.X.dylib
if len(self.version) > 1:
return None
# pathlib.Path.with_suffix replaces, not appends
suffix = '.dylib'
if self.version:
suffix = '.{}{}'.format(self.version[0], suffix)
else:
# pathlib.Path.with_suffix replaces, not appends
suffix = '.so'
if self.version:
suffix = '{}.{}'.format(suffix, '.'.join(self.version))
return p.with_suffix(suffix)
elif self.typ == 'exe':
if env.machines.host.is_windows() or env.machines.host.is_cygwin():
return p.with_suffix('.exe')
elif self.typ == 'pdb':
if self.version:
p = p.with_name('{}-{}'.format(p.name, self.version[0]))
return p.with_suffix('.pdb') if has_pdb else None
elif self.typ == 'implib' or self.typ == 'implibempty':
if env.machines.host.is_windows() and canonical_compiler == 'msvc':
# only MSVC doesn't generate empty implibs
if self.typ == 'implibempty' and compiler == 'msvc':
return None
return p.parent / (re.sub(r'^lib', '', p.name) + '.lib')
elif env.machines.host.is_windows() or env.machines.host.is_cygwin():
return p.with_suffix('.dll.a')
else:
return None
elif self.typ == 'expr':
return Path(platform_fix_name(p.as_posix(), canonical_compiler, env))
else:
raise RuntimeError('Invalid installed file type {}'.format(self.typ))
return p
def get_paths(self, compiler: str, env: environment.Environment, installdir: Path) -> T.List[Path]:
p = self.get_path(compiler, env)
if not p:
return []
if self.typ == 'dir':
abs_p = installdir / p
if not abs_p.exists():
raise RuntimeError('{} does not exist'.format(p))
if not abs_p.is_dir():
raise RuntimeError('{} is not a directory'.format(p))
return [x.relative_to(installdir) for x in abs_p.rglob('*') if x.is_file() or x.is_symlink()]
else:
return [p]
@functools.total_ordering
class TestDef:
def __init__(self, path: Path, name: T.Optional[str], args: T.List[str], skip: bool = False):
self.path = path
self.name = name
self.args = args
self.skip = skip
self.env = os.environ.copy()
self.installed_files = [] # type: T.List[InstalledFile]
self.do_not_set_opts = [] # type: T.List[str]
self.stdout = [] # type: T.List[T.Dict[str, str]]
def __repr__(self) -> str:
return '<{}: {:<48} [{}: {}] -- {}>'.format(type(self).__name__, str(self.path), self.name, self.args, self.skip)
def display_name(self) -> str:
if self.name:
return '{} ({})'.format(self.path.as_posix(), self.name)
return self.path.as_posix()
def __lt__(self, other: object) -> bool:
if isinstance(other, TestDef):
# None is not sortable, so replace it with an empty string
s_id = int(self.path.name.split(' ')[0])
o_id = int(other.path.name.split(' ')[0])
return (s_id, self.path, self.name or '') < (o_id, other.path, other.name or '')
return NotImplemented
failing_logs = []
print_debug = 'MESON_PRINT_TEST_OUTPUT' in os.environ
under_ci = 'CI' in os.environ
skip_scientific = under_ci and ('SKIP_SCIENTIFIC' in os.environ)
do_debug = under_ci or print_debug
no_meson_log_msg = 'No meson-log.txt found.'
host_c_compiler = None
compiler_id_map = {} # type: T.Dict[str, str]
tool_vers_map = {} # type: T.Dict[str, str]
class StopException(Exception):
def __init__(self):
super().__init__('Stopped by user')
stop = False
def stop_handler(signal, frame):
global stop
stop = True
signal.signal(signal.SIGINT, stop_handler)
signal.signal(signal.SIGTERM, stop_handler)
def setup_commands(optbackend):
global do_debug, backend, backend_flags
global compile_commands, clean_commands, test_commands, install_commands, uninstall_commands
backend, backend_flags = guess_backend(optbackend, shutil.which('msbuild'))
compile_commands, clean_commands, test_commands, install_commands, \
uninstall_commands = get_backend_commands(backend, do_debug)
# TODO try to eliminate or at least reduce this function
def platform_fix_name(fname: str, canonical_compiler: str, env: environment.Environment) -> str:
if '?lib' in fname:
if env.machines.host.is_windows() and canonical_compiler == 'msvc':
fname = re.sub(r'lib/\?lib(.*)\.', r'bin/\1.', fname)
fname = re.sub(r'/\?lib/', r'/bin/', fname)
elif env.machines.host.is_windows():
fname = re.sub(r'lib/\?lib(.*)\.', r'bin/lib\1.', fname)
fname = re.sub(r'\?lib(.*)\.dll$', r'lib\1.dll', fname)
fname = re.sub(r'/\?lib/', r'/bin/', fname)
elif env.machines.host.is_cygwin():
fname = re.sub(r'lib/\?lib(.*)\.so$', r'bin/cyg\1.dll', fname)
fname = re.sub(r'lib/\?lib(.*)\.', r'bin/cyg\1.', fname)
fname = re.sub(r'\?lib(.*)\.dll$', r'cyg\1.dll', fname)
fname = re.sub(r'/\?lib/', r'/bin/', fname)
else:
fname = re.sub(r'\?lib', 'lib', fname)
if fname.endswith('?so'):
if env.machines.host.is_windows() and canonical_compiler == 'msvc':
fname = re.sub(r'lib/([^/]*)\?so$', r'bin/\1.dll', fname)
fname = re.sub(r'/(?:lib|)([^/]*?)\?so$', r'/\1.dll', fname)
return fname
elif env.machines.host.is_windows():
fname = re.sub(r'lib/([^/]*)\?so$', r'bin/\1.dll', fname)
fname = re.sub(r'/([^/]*?)\?so$', r'/\1.dll', fname)
return fname
elif env.machines.host.is_cygwin():
fname = re.sub(r'lib/([^/]*)\?so$', r'bin/\1.dll', fname)
fname = re.sub(r'/lib([^/]*?)\?so$', r'/cyg\1.dll', fname)
fname = re.sub(r'/([^/]*?)\?so$', r'/\1.dll', fname)
return fname
elif env.machines.host.is_darwin():
return fname[:-3] + '.dylib'
else:
return fname[:-3] + '.so'
return fname
def validate_install(test: TestDef, installdir: Path, compiler: str, env: environment.Environment) -> str:
ret_msg = ''
expected_raw = [] # type: T.List[Path]
for i in test.installed_files:
try:
expected_raw += i.get_paths(compiler, env, installdir)
except RuntimeError as err:
ret_msg += 'Expected path error: {}\n'.format(err)
expected = {x: False for x in expected_raw}
found = [x.relative_to(installdir) for x in installdir.rglob('*') if x.is_file() or x.is_symlink()]
# Mark all found files as found and detect unexpected files
for fname in found:
if fname not in expected:
ret_msg += 'Extra file {} found.\n'.format(fname)
continue
expected[fname] = True
# Check if expected files were found
for p, f in expected.items():
if not f:
ret_msg += 'Expected file {} missing.\n'.format(p)
# List dir content on error
if ret_msg != '':
ret_msg += '\nInstall dir contents:\n'
for i in found:
ret_msg += ' - {}\n'.format(i)
return ret_msg
def log_text_file(logfile, testdir, stdo, stde):
global stop, executor, futures
logfile.write('%s\nstdout\n\n---\n' % testdir.as_posix())
logfile.write(stdo)
logfile.write('\n\n---\n\nstderr\n\n---\n')
logfile.write(stde)
logfile.write('\n\n---\n\n')
if print_debug:
try:
print(stdo)
except UnicodeError:
sanitized_out = stdo.encode('ascii', errors='replace').decode()
print(sanitized_out)
try:
print(stde, file=sys.stderr)
except UnicodeError:
sanitized_err = stde.encode('ascii', errors='replace').decode()
print(sanitized_err, file=sys.stderr)
if stop:
print("Aborting..")
for f in futures:
f[2].cancel()
executor.shutdown()
raise StopException()
def bold(text):
return mlog.bold(text).get_text(mlog.colorize_console())
def green(text):
return mlog.green(text).get_text(mlog.colorize_console())
def red(text):
return mlog.red(text).get_text(mlog.colorize_console())
def yellow(text):
return mlog.yellow(text).get_text(mlog.colorize_console())
def _run_ci_include(args: T.List[str]) -> str:
if not args:
return 'At least one parameter required'
try:
data = Path(args[0]).read_text(errors='ignore', encoding='utf-8')
return 'Included file {}:\n{}\n'.format(args[0], data)
except Exception:
return 'Failed to open {}'.format(args[0])
ci_commands = {
'ci_include': _run_ci_include
}
def run_ci_commands(raw_log: str) -> T.List[str]:
res = []
for l in raw_log.splitlines():
if not l.startswith('!meson_ci!/'):
continue
cmd = shlex.split(l[11:])
if not cmd or cmd[0] not in ci_commands:
continue
res += ['CI COMMAND {}:\n{}\n'.format(cmd[0], ci_commands[cmd[0]](cmd[1:]))]
return res
def _compare_output(expected: T.List[T.Dict[str, str]], output: str, desc: str) -> str:
if expected:
i = iter(expected)
def next_expected(i):
# Get the next expected line
item = next(i)
how = item.get('match', 'literal')
expected = item.get('line')
# Simple heuristic to automatically convert path separators for
# Windows:
#
# Any '/' appearing before 'WARNING' or 'ERROR' (i.e. a path in a
# filename part of a location) is replaced with '\' (in a re: '\\'
# which matches a literal '\')
#
# (There should probably be a way to turn this off for more complex
# cases which don't fit this)
if mesonlib.is_windows():
if how != "re":
sub = r'\\'
else:
sub = r'\\\\'
expected = re.sub(r'/(?=.*(WARNING|ERROR))', sub, expected)
return how, expected
try:
how, expected = next_expected(i)
for actual in output.splitlines():
if how == "re":
match = bool(re.match(expected, actual))
else:
match = (expected == actual)
if match:
how, expected = next_expected(i)
# reached the end of output without finding expected
return 'expected "{}" not found in {}'.format(expected, desc)
except StopIteration:
# matched all expected lines
pass
return ''
def validate_output(test: TestDef, stdo: str, stde: str) -> str:
return _compare_output(test.stdout, stdo, 'stdout')
# There are some class variables and such that cahce
# information. Clear all of these. The better solution
# would be to change the code so that no state is persisted
# but that would be a lot of work given that Meson was originally
# coded to run as a batch process.
def clear_internal_caches():
import mesonbuild.interpreterbase
from mesonbuild.dependencies import CMakeDependency
from mesonbuild.mesonlib import PerMachine
mesonbuild.interpreterbase.FeatureNew.feature_registry = {}
CMakeDependency.class_cmakeinfo = PerMachine(None, None)
def run_test_inprocess(testdir):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
old_stderr = sys.stderr
sys.stderr = mystderr = StringIO()
old_cwd = os.getcwd()
os.chdir(testdir)
test_log_fname = Path('meson-logs', 'testlog.txt')
try:
returncode_test = mtest.run_with_args(['--no-rebuild'])
if test_log_fname.exists():
test_log = test_log_fname.open(errors='ignore').read()
else:
test_log = ''
returncode_benchmark = mtest.run_with_args(['--no-rebuild', '--benchmark', '--logbase', 'benchmarklog'])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
os.chdir(old_cwd)
return max(returncode_test, returncode_benchmark), mystdout.getvalue(), mystderr.getvalue(), test_log
# Build directory name must be the same so Ccache works over
# consecutive invocations.
def create_deterministic_builddir(test: TestDef, use_tmpdir: bool) -> str:
import hashlib
src_dir = test.path.as_posix()
if test.name:
src_dir += test.name
rel_dirname = 'b ' + hashlib.sha256(src_dir.encode(errors='ignore')).hexdigest()[0:10]
abs_pathname = os.path.join(tempfile.gettempdir() if use_tmpdir else os.getcwd(), rel_dirname)
os.mkdir(abs_pathname)
return abs_pathname
def format_parameter_file(file_basename: str, test: TestDef, test_build_dir: str) -> Path:
confdata = ConfigurationData()
confdata.values = {'MESON_TEST_ROOT': (str(test.path.absolute()), 'base directory of current test')}
template = test.path / (file_basename + '.in')
destination = Path(test_build_dir) / file_basename
mesonlib.do_conf_file(str(template), str(destination), confdata, 'meson')
return destination
def detect_parameter_files(test: TestDef, test_build_dir: str) -> (Path, Path):
nativefile = test.path / 'nativefile.ini'
crossfile = test.path / 'crossfile.ini'
if os.path.exists(str(test.path / 'nativefile.ini.in')):
nativefile = format_parameter_file('nativefile.ini', test, test_build_dir)
if os.path.exists(str(test.path / 'crossfile.ini.in')):
crossfile = format_parameter_file('crossfile.ini', test, test_build_dir)
return nativefile, crossfile
def run_test(test: TestDef, extra_args, compiler, backend, flags, commands, should_fail, use_tmp: bool):
if test.skip:
return None
build_dir = create_deterministic_builddir(test, use_tmp)
try:
with TemporaryDirectoryWinProof(prefix='i ', dir=None if use_tmp else os.getcwd()) as install_dir:
try:
return _run_test(test, build_dir, install_dir, extra_args, compiler, backend, flags, commands, should_fail)
except TestResult as r:
return r
finally:
mlog.shutdown() # Close the log file because otherwise Windows wets itself.
finally:
mesonlib.windows_proof_rmtree(build_dir)
def _run_test(test: TestDef, test_build_dir: str, install_dir: str, extra_args, compiler, backend, flags, commands, should_fail):
compile_commands, clean_commands, install_commands, uninstall_commands = commands
gen_start = time.time()
# Configure in-process
gen_args = [] # type: T.List[str]
if 'prefix' not in test.do_not_set_opts:
gen_args += ['--prefix', 'x:/usr'] if mesonlib.is_windows() else ['--prefix', '/usr']
if 'libdir' not in test.do_not_set_opts:
gen_args += ['--libdir', 'lib']
gen_args += [test.path.as_posix(), test_build_dir] + flags + extra_args
nativefile, crossfile = detect_parameter_files(test, test_build_dir)
if nativefile.exists():
gen_args.extend(['--native-file', nativefile.as_posix()])
if crossfile.exists():
gen_args.extend(['--cross-file', crossfile.as_posix()])
(returncode, stdo, stde) = run_configure(gen_args, env=test.env)
try:
logfile = Path(test_build_dir, 'meson-logs', 'meson-log.txt')
mesonlog = logfile.open(errors='ignore', encoding='utf-8').read()
except Exception:
mesonlog = no_meson_log_msg
cicmds = run_ci_commands(mesonlog)
testresult = TestResult(cicmds)
testresult.add_step(BuildStep.configure, stdo, stde, mesonlog, time.time() - gen_start)
output_msg = validate_output(test, stdo, stde)
testresult.mlog += output_msg
if output_msg:
testresult.fail('Unexpected output while configuring.')
return testresult
if should_fail == 'meson':
if returncode == 1:
return testresult
elif returncode != 0:
testresult.fail('Test exited with unexpected status {}.'.format(returncode))
return testresult
else:
testresult.fail('Test that should have failed succeeded.')
return testresult
if returncode != 0:
testresult.fail('Generating the build system failed.')
return testresult
builddata = build.load(test_build_dir)
dir_args = get_backend_args_for_dir(backend, test_build_dir)
# Build with subprocess
def build_step():
build_start = time.time()
pc, o, e = Popen_safe(compile_commands + dir_args, cwd=test_build_dir)
testresult.add_step(BuildStep.build, o, e, '', time.time() - build_start)
if should_fail == 'build':
if pc.returncode != 0:
raise testresult
testresult.fail('Test that should have failed to build succeeded.')
raise testresult
if pc.returncode != 0:
testresult.fail('Compiling source code failed.')
raise testresult
# Touch the meson.build file to force a regenerate
def force_regenerate():
ensure_backend_detects_changes(backend)
os.utime(str(test.path / 'meson.build'))
# just test building
build_step()
# test that regeneration works for build step
force_regenerate()
build_step() # TBD: assert nothing gets built after the regenerate?
# test that regeneration works for test step
force_regenerate()
# Test in-process
clear_internal_caches()
test_start = time.time()
(returncode, tstdo, tstde, test_log) = run_test_inprocess(test_build_dir)
testresult.add_step(BuildStep.test, tstdo, tstde, test_log, time.time() - test_start)
if should_fail == 'test':
if returncode != 0:
return testresult
testresult.fail('Test that should have failed to run unit tests succeeded.')
return testresult
if returncode != 0:
testresult.fail('Running unit tests failed.')
return testresult
# Do installation, if the backend supports it
if install_commands:
env = test.env.copy()
env['DESTDIR'] = install_dir
# Install with subprocess
pi, o, e = Popen_safe(install_commands, cwd=test_build_dir, env=env)
testresult.add_step(BuildStep.install, o, e)
if pi.returncode != 0:
testresult.fail('Running install failed.')
return testresult
# Clean with subprocess
env = test.env.copy()
pi, o, e = Popen_safe(clean_commands + dir_args, cwd=test_build_dir, env=env)
testresult.add_step(BuildStep.clean, o, e)
if pi.returncode != 0:
testresult.fail('Running clean failed.')
return testresult
# Validate installed files
testresult.add_step(BuildStep.install, '', '')
if not install_commands:
return testresult
install_msg = validate_install(test, Path(install_dir), compiler, builddata.environment)
if install_msg:
testresult.fail('\n' + install_msg)
return testresult
return testresult
def gather_tests(testdir: Path, stdout_mandatory: bool) -> T.List[TestDef]:
tests = [t.name for t in testdir.iterdir() if t.is_dir()]
tests = [t for t in tests if not t.startswith('.')] # Filter non-tests files (dot files, etc)
test_defs = [TestDef(testdir / t, None, []) for t in tests]
all_tests = [] # type: T.List[TestDef]
for t in test_defs:
test_def = {}
test_def_file = t.path / 'test.json'
if test_def_file.is_file():
test_def = json.loads(test_def_file.read_text())
# Handle additional environment variables
env = {} # type: T.Dict[str, str]
if 'env' in test_def:
assert isinstance(test_def['env'], dict)
env = test_def['env']
for key, val in env.items():
val = val.replace('@ROOT@', t.path.resolve().as_posix())
val = val.replace('@PATH@', t.env.get('PATH', ''))
env[key] = val
# Handle installed files
installed = [] # type: T.List[InstalledFile]
if 'installed' in test_def:
installed = [InstalledFile(x) for x in test_def['installed']]
# Handle expected output
stdout = test_def.get('stdout', [])
if stdout_mandatory and not stdout:
raise RuntimeError("{} must contain a non-empty stdout key".format(test_def_file))
# Handle the do_not_set_opts list
do_not_set_opts = test_def.get('do_not_set_opts', []) # type: T.List[str]
# Skip tests if the tool requirements are not met
if 'tools' in test_def:
assert isinstance(test_def['tools'], dict)
for tool, vers_req in test_def['tools'].items():
if tool not in tool_vers_map:
t.skip = True
elif not mesonlib.version_compare(tool_vers_map[tool], vers_req):
t.skip = True
# Skip the matrix code and just update the existing test
if 'matrix' not in test_def:
t.env.update(env)
t.installed_files = installed
t.do_not_set_opts = do_not_set_opts
t.stdout = stdout
all_tests += [t]
continue
# 'matrix; entry is present, so build multiple tests from matrix definition
opt_list = [] # type: T.List[T.List[T.Tuple[str, bool]]]
matrix = test_def['matrix']
assert "options" in matrix
for key, val in matrix["options"].items():
assert isinstance(val, list)
tmp_opts = [] # type: T.List[T.Tuple[str, bool]]
for i in val:
assert isinstance(i, dict)
assert "val" in i
skip = False
# Skip the matrix entry if environment variable is present
if 'skip_on_env' in i:
for skip_env_var in i['skip_on_env']:
if skip_env_var in os.environ:
skip = True
# Only run the test if all compiler ID's match
if 'compilers' in i:
for lang, id_list in i['compilers'].items():
if lang not in compiler_id_map or compiler_id_map[lang] not in id_list:
skip = True
break
# Add an empty matrix entry
if i['val'] is None:
tmp_opts += [(None, skip)]
continue
tmp_opts += [('{}={}'.format(key, i['val']), skip)]
if opt_list:
new_opt_list = [] # type: T.List[T.List[T.Tuple[str, bool]]]
for i in opt_list:
for j in tmp_opts:
new_opt_list += [[*i, j]]
opt_list = new_opt_list
else:
opt_list = [[x] for x in tmp_opts]
# Exclude specific configurations
if 'exclude' in matrix:
assert isinstance(matrix['exclude'], list)
new_opt_list = [] # type: T.List[T.List[T.Tuple[str, bool]]]
for i in opt_list:
exclude = False
opt_names = [x[0] for x in i]
for j in matrix['exclude']:
ex_list = ['{}={}'.format(k, v) for k, v in j.items()]
if all([x in opt_names for x in ex_list]):
exclude = True
break
if not exclude:
new_opt_list += [i]
opt_list = new_opt_list
for i in opt_list:
name = ' '.join([x[0] for x in i if x[0] is not None])
opts = ['-D' + x[0] for x in i if x[0] is not None]
skip = any([x[1] for x in i])
test = TestDef(t.path, name, opts, skip or t.skip)
test.env.update(env)
test.installed_files = installed
test.do_not_set_opts = do_not_set_opts
test.stdout = stdout
all_tests += [test]
return sorted(all_tests)
def have_d_compiler():
if shutil.which("ldc2"):
return True
elif shutil.which("ldc"):
return True
elif shutil.which("gdc"):
return True
elif shutil.which("dmd"):
# The Windows installer sometimes produces a DMD install
# that exists but segfaults every time the compiler is run.
# Don't know why. Don't know how to fix. Skip in this case.
cp = subprocess.run(['dmd', '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if cp.stdout == b'':
return False
return True
return False
def have_objc_compiler(use_tmp: bool) -> bool:
with TemporaryDirectoryWinProof(prefix='b ', dir=None if use_tmp else '.') as build_dir:
env = environment.Environment(None, build_dir, get_fake_options('/'))
try:
objc_comp = env.detect_objc_compiler(MachineChoice.HOST)
except mesonlib.MesonException:
return False
if not objc_comp:
return False
env.coredata.process_new_compiler('objc', objc_comp, env)
try:
objc_comp.sanity_check(env.get_scratch_dir(), env)
except mesonlib.MesonException:
return False
return True
def have_objcpp_compiler(use_tmp: bool) -> bool:
with TemporaryDirectoryWinProof(prefix='b ', dir=None if use_tmp else '.') as build_dir:
env = environment.Environment(None, build_dir, get_fake_options('/'))
try:
objcpp_comp = env.detect_objcpp_compiler(MachineChoice.HOST)
except mesonlib.MesonException:
return False
if not objcpp_comp:
return False
env.coredata.process_new_compiler('objcpp', objcpp_comp, env)
try:
objcpp_comp.sanity_check(env.get_scratch_dir(), env)
except mesonlib.MesonException:
return False
return True
def have_java():
if shutil.which('javac') and shutil.which('java'):
return True
return False
def skippable(suite, test):
# Everything is optional when not running on CI
if not under_ci:
return True
if not suite.endswith('frameworks'):
return True
# this test assumptions aren't valid for Windows paths
if test.endswith('38 libdir must be inside prefix'):
return True
# gtk-doc test may be skipped, pending upstream fixes for spaces in
# filenames landing in the distro used for CI
if test.endswith('10 gtk-doc'):
return True
# NetCDF is not in the CI Docker image
if test.endswith('netcdf'):
return True
# MSVC doesn't link with GFortran
if test.endswith('14 fortran links c'):
return True
# Blocks are not supported on all compilers
if test.endswith('29 blocks'):
return True
# Scientific libraries are skippable on certain systems
# See the discussion here: https://github.com/mesonbuild/meson/pull/6562
if any([x in test for x in ['17 mpi', '25 hdf5', '30 scalapack']]) and skip_scientific:
return True
# These create OS specific tests, and need to be skippable
if any([x in test for x in ['16 sdl', '17 mpi']]):
return True
# We test cmake, and llvm-config. Some linux spins don't provide cmake or
# don't provide either the static or shared llvm libraries (fedora and
# opensuse only have the dynamic ones, for example).
if test.endswith('15 llvm'):
return True
# This test breaks with gobject-introspection <= 1.58.1
if test.endswith('34 gir static lib'):
return True
# No frameworks test should be skipped on linux CI, as we expect all
# prerequisites to be installed
if mesonlib.is_linux():
return False
# Boost test should only be skipped for windows CI build matrix entries
# which don't define BOOST_ROOT
if test.endswith('1 boost'):
if mesonlib.is_windows():
return 'BOOST_ROOT' not in os.environ
return False
# Qt is provided on macOS by Homebrew
if test.endswith('4 qt') and mesonlib.is_osx():
return False
# Other framework tests are allowed to be skipped on other platforms
return True
def skip_csharp(backend) -> bool:
if backend is not Backend.ninja:
return True
if not shutil.which('resgen'):
return True
if shutil.which('mcs'):
return False
if shutil.which('csc'):
# Only support VS2017 for now. Earlier versions fail
# under CI in mysterious ways.
try:
stdo = subprocess.check_output(['csc', '/version'])
except subprocess.CalledProcessError:
return True
# Having incrementing version numbers would be too easy.
# Microsoft reset the versioning back to 1.0 (from 4.x)
# when they got the Roslyn based compiler. Thus there
# is NO WAY to reliably do version number comparisons.
# Only support the version that ships with VS2017.
return not stdo.startswith(b'2.')
return True
# In Azure some setups have a broken rustc that will error out
# on all compilation attempts.
def has_broken_rustc() -> bool:
dirname = 'brokenrusttest'
if os.path.exists(dirname):
mesonlib.windows_proof_rmtree(dirname)
os.mkdir(dirname)
open(dirname + '/sanity.rs', 'w').write('''fn main() {
}
''')
pc = subprocess.run(['rustc', '-o', 'sanity.exe', 'sanity.rs'],
cwd=dirname,
stdout = subprocess.DEVNULL,
stderr = subprocess.DEVNULL)
mesonlib.windows_proof_rmtree(dirname)
return pc.returncode != 0
def should_skip_rust(backend: Backend) -> bool:
if not shutil.which('rustc'):
return True
if backend is not Backend.ninja:
return True
if mesonlib.is_windows() and has_broken_rustc():
return True
return False
def detect_tests_to_run(only: T.List[str], use_tmp: bool) -> T.List[T.Tuple[str, T.List[TestDef], bool]]:
"""
Parameters
----------
only: list of str, optional
specify names of tests to run
Returns
-------
gathered_tests: list of tuple of str, list of TestDef, bool
tests to run
"""
skip_fortran = not(shutil.which('gfortran') or
shutil.which('flang') or
shutil.which('pgfortran') or
shutil.which('ifort'))
class TestCategory:
def __init__(self, category: str, subdir: str, skip: bool = False, stdout_mandatory: bool = False):
self.category = category # category name
self.subdir = subdir # subdirectory
self.skip = skip # skip condition
self.stdout_mandatory = stdout_mandatory # expected stdout is mandatory for tests in this categroy
all_tests = [
TestCategory('cmake', 'cmake', not shutil.which('cmake') or (os.environ.get('compiler') == 'msvc2015' and under_ci)),
TestCategory('common', 'common'),
TestCategory('native', 'native'),
TestCategory('warning-meson', 'warning', stdout_mandatory=True),
TestCategory('failing-meson', 'failing', stdout_mandatory=True),
TestCategory('failing-build', 'failing build'),
TestCategory('failing-test', 'failing test'),
TestCategory('keyval', 'keyval'),
TestCategory('platform-osx', 'osx', not mesonlib.is_osx()),
TestCategory('platform-windows', 'windows', not mesonlib.is_windows() and not mesonlib.is_cygwin()),
TestCategory('platform-linux', 'linuxlike', mesonlib.is_osx() or mesonlib.is_windows()),
TestCategory('java', 'java', backend is not Backend.ninja or mesonlib.is_osx() or not have_java()),
TestCategory('C#', 'csharp', skip_csharp(backend)),
TestCategory('vala', 'vala', backend is not Backend.ninja or not shutil.which(os.environ.get('VALAC', 'valac'))),
TestCategory('rust', 'rust', should_skip_rust(backend)),
TestCategory('d', 'd', backend is not Backend.ninja or not have_d_compiler()),
TestCategory('objective c', 'objc', backend not in (Backend.ninja, Backend.xcode) or not have_objc_compiler(options.use_tmpdir)),
TestCategory('objective c++', 'objcpp', backend not in (Backend.ninja, Backend.xcode) or not have_objcpp_compiler(options.use_tmpdir)),
TestCategory('fortran', 'fortran', skip_fortran or backend != Backend.ninja),
TestCategory('swift', 'swift', backend not in (Backend.ninja, Backend.xcode) or not shutil.which('swiftc')),
# CUDA tests on Windows: use Ninja backend: python run_project_tests.py --only cuda --backend ninja
TestCategory('cuda', 'cuda', backend not in (Backend.ninja, Backend.xcode) or not shutil.which('nvcc')),
TestCategory('python3', 'python3', backend is not Backend.ninja),
TestCategory('python', 'python'),
TestCategory('fpga', 'fpga', shutil.which('yosys') is None),
TestCategory('frameworks', 'frameworks'),
TestCategory('nasm', 'nasm'),
TestCategory('wasm', 'wasm', shutil.which('emcc') is None or backend is not Backend.ninja),
]
categories = [t.category for t in all_tests]
assert categories == ALL_TESTS, 'argparse("--only", choices=ALL_TESTS) need to be updated to match all_tests categories'
if only:
all_tests = [t for t in all_tests if t.category in only]
gathered_tests = [(t.category, gather_tests(Path('test cases', t.subdir), t.stdout_mandatory), t.skip) for t in all_tests]
return gathered_tests
def run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
log_name_base: str, failfast: bool,
extra_args: T.List[str], use_tmp: bool) -> T.Tuple[int, int, int]:
global logfile
txtname = log_name_base + '.txt'
with open(txtname, 'w', encoding='utf-8', errors='ignore') as lf:
logfile = lf
return _run_tests(all_tests, log_name_base, failfast, extra_args, use_tmp)
def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
log_name_base: str, failfast: bool,
extra_args: T.List[str], use_tmp: bool) -> T.Tuple[int, int, int]:
global stop, executor, futures, host_c_compiler
xmlname = log_name_base + '.xml'
junit_root = ET.Element('testsuites')
conf_time = 0
build_time = 0
test_time = 0
passing_tests = 0
failing_tests = 0
skipped_tests = 0
commands = (compile_commands, clean_commands, install_commands, uninstall_commands)
try:
# This fails in some CI environments for unknown reasons.
num_workers = multiprocessing.cpu_count()
except Exception as e:
print('Could not determine number of CPUs due to the following reason:' + str(e))
print('Defaulting to using only one process')
num_workers = 1
# Due to Ninja deficiency, almost 50% of build time
# is spent waiting. Do something useful instead.
#
# Remove this once the following issue has been resolved:
# https://github.com/mesonbuild/meson/pull/2082
if not mesonlib.is_windows(): # twice as fast on Windows by *not* multiplying by 2.
num_workers *= 2
executor = ProcessPoolExecutor(max_workers=num_workers)
for name, test_cases, skipped in all_tests:
current_suite = ET.SubElement(junit_root, 'testsuite', {'name': name, 'tests': str(len(test_cases))})
print()
if skipped:
print(bold('Not running %s tests.' % name))
else:
print(bold('Running %s tests.' % name))
print()
futures = []
for t in test_cases:
# Jenkins screws us over by automatically sorting test cases by name
# and getting it wrong by not doing logical number sorting.
(testnum, testbase) = t.path.name.split(' ', 1)
testname = '%.3d %s' % (int(testnum), testbase)
if t.name:
testname += ' ({})'.format(t.name)
should_fail = False
suite_args = []
if name.startswith('failing'):
should_fail = name.split('failing-')[1]
if name.startswith('warning'):
suite_args = ['--fatal-meson-warnings']
should_fail = name.split('warning-')[1]
t.skip = skipped or t.skip
result = executor.submit(run_test, t, extra_args + suite_args + t.args,
host_c_compiler, backend, backend_flags, commands, should_fail, use_tmp)
futures.append((testname, t, result))
for (testname, t, result) in futures:
sys.stdout.flush()
try:
result = result.result()
except CancelledError:
continue
if (result is None) or (('MESON_SKIP_TEST' in result.stdo) and (skippable(name, t.path.as_posix()))):
print(yellow('Skipping:'), t.display_name())
current_test = ET.SubElement(current_suite, 'testcase', {'name': testname,
'classname': name})
ET.SubElement(current_test, 'skipped', {})
skipped_tests += 1
else:
without_install = "" if len(install_commands) > 0 else " (without install)"
if result.msg != '':
print(red('Failed test{} during {}: {!r}'.format(without_install, result.step.name, t.display_name())))
print('Reason:', result.msg)
failing_tests += 1
if result.step == BuildStep.configure and result.mlog != no_meson_log_msg:
# For configure failures, instead of printing stdout,
# print the meson log if available since it's a superset
# of stdout and often has very useful information.
failing_logs.append(result.mlog)
elif under_ci:
# Always print the complete meson log when running in
# a CI. This helps debugging issues that only occur in
# a hard to reproduce environment
failing_logs.append(result.mlog)
failing_logs.append(result.stdo)
else:
failing_logs.append(result.stdo)
for cmd_res in result.cicmds:
failing_logs.append(cmd_res)
failing_logs.append(result.stde)
if failfast:
print("Cancelling the rest of the tests")
for (_, _, res) in futures:
res.cancel()
else:
print('Succeeded test%s: %s' % (without_install, t.display_name()))
passing_tests += 1
conf_time += result.conftime
build_time += result.buildtime
test_time += result.testtime
total_time = conf_time + build_time + test_time
log_text_file(logfile, t.path, result.stdo, result.stde)
current_test = ET.SubElement(current_suite, 'testcase', {'name': testname,
'classname': name,
'time': '%.3f' % total_time})
if result.msg != '':
ET.SubElement(current_test, 'failure', {'message': result.msg})
stdoel = ET.SubElement(current_test, 'system-out')
stdoel.text = result.stdo
stdeel = ET.SubElement(current_test, 'system-err')
stdeel.text = result.stde
if failfast and failing_tests > 0:
break
print("\nTotal configuration time: %.2fs" % conf_time)
print("Total build time: %.2fs" % build_time)
print("Total test time: %.2fs" % test_time)
ET.ElementTree(element=junit_root).write(xmlname, xml_declaration=True, encoding='UTF-8')
return passing_tests, failing_tests, skipped_tests
def check_file(file: Path):
lines = file.read_bytes().split(b'\n')
tabdetector = re.compile(br' *\t')
for i, line in enumerate(lines):
if re.match(tabdetector, line):
raise SystemExit("File {} contains a tab indent on line {:d}. Only spaces are permitted.".format(file, i + 1))
if line.endswith(b'\r'):
raise SystemExit("File {} contains DOS line ending on line {:d}. Only unix-style line endings are permitted.".format(file, i + 1))
def check_format():
check_suffixes = {'.c',
'.cpp',
'.cxx',
'.cc',
'.rs',
'.f90',
'.vala',
'.d',
'.s',
'.m',
'.mm',
'.asm',
'.java',
'.txt',
'.py',
'.swift',
'.build',
'.md',
}
skip_dirs = {
'.dub', # external deps are here
'.pytest_cache',
'meson-logs', 'meson-private',
'work area',
'.eggs', '_cache', # e.g. .mypy_cache
'venv', # virtualenvs have DOS line endings
}
for (root, _, filenames) in os.walk('.'):
if any([x in root for x in skip_dirs]):
continue
for fname in filenames:
file = Path(fname)
if file.suffix.lower() in check_suffixes:
if file.name in ('sitemap.txt', 'meson-test-run.txt'):
continue
check_file(root / file)
def check_meson_commands_work(options):
global backend, compile_commands, test_commands, install_commands
testdir = PurePath('test cases', 'common', '1 trivial').as_posix()
meson_commands = mesonlib.python_command + [get_meson_script()]
with TemporaryDirectoryWinProof(prefix='b ', dir=None if options.use_tmpdir else '.') as build_dir:
print('Checking that configuring works...')
gen_cmd = meson_commands + [testdir, build_dir] + backend_flags + options.extra_args
pc, o, e = Popen_safe(gen_cmd)
if pc.returncode != 0:
raise RuntimeError('Failed to configure {!r}:\n{}\n{}'.format(testdir, e, o))
print('Checking that introspect works...')
pc, o, e = Popen_safe(meson_commands + ['introspect', '--targets'], cwd=build_dir)
json.loads(o)
if pc.returncode != 0:
raise RuntimeError('Failed to introspect --targets {!r}:\n{}\n{}'.format(testdir, e, o))
print('Checking that building works...')
dir_args = get_backend_args_for_dir(backend, build_dir)
pc, o, e = Popen_safe(compile_commands + dir_args, cwd=build_dir)
if pc.returncode != 0:
raise RuntimeError('Failed to build {!r}:\n{}\n{}'.format(testdir, e, o))
print('Checking that testing works...')
pc, o, e = Popen_safe(test_commands, cwd=build_dir)
if pc.returncode != 0:
raise RuntimeError('Failed to test {!r}:\n{}\n{}'.format(testdir, e, o))
if install_commands:
print('Checking that installing works...')
pc, o, e = Popen_safe(install_commands, cwd=build_dir)
if pc.returncode != 0:
raise RuntimeError('Failed to install {!r}:\n{}\n{}'.format(testdir, e, o))
def detect_system_compiler(options):
global host_c_compiler, compiler_id_map
with TemporaryDirectoryWinProof(prefix='b ', dir=None if options.use_tmpdir else '.') as build_dir:
fake_opts = get_fake_options('/')
if options.cross_file:
fake_opts.cross_file = [options.cross_file]
if options.native_file:
fake_opts.native_file = [options.native_file]
env = environment.Environment(None, build_dir, fake_opts)
print_compilers(env, MachineChoice.HOST)
if options.cross_file:
print_compilers(env, MachineChoice.BUILD)
for lang in sorted(compilers.all_languages):
try:
comp = env.compiler_from_language(lang, MachineChoice.HOST)
# note compiler id for later use with test.json matrix
compiler_id_map[lang] = comp.get_id()
except mesonlib.MesonException:
comp = None
# note C compiler for later use by platform_fix_name()
if lang == 'c':
if comp:
host_c_compiler = comp.get_id()
else:
raise RuntimeError("Could not find C compiler.")
def print_compilers(env, machine):
print()
print('{} machine compilers'.format(machine.get_lower_case_name()))
print()
for lang in sorted(compilers.all_languages):
try:
comp = env.compiler_from_language(lang, machine)
details = '{:<10} {} {}'.format('[' + comp.get_id() + ']', ' '.join(comp.get_exelist()), comp.get_version_string())
except mesonlib.MesonException:
details = '[not found]'
print('%-7s: %s' % (lang, details))
def print_tool_versions():
tools = [
{
'tool': 'ninja',
'args': ['--version'],
'regex': re.compile(r'^([0-9]+(\.[0-9]+)*(-[a-z0-9]+)?)$'),
'match_group': 1,
},
{
'tool': 'cmake',
'args': ['--version'],
'regex': re.compile(r'^cmake version ([0-9]+(\.[0-9]+)*(-[a-z0-9]+)?)$'),
'match_group': 1,
},
{
'tool': 'hotdoc',
'args': ['--version'],
'regex': re.compile(r'^([0-9]+(\.[0-9]+)*(-[a-z0-9]+)?)$'),
'match_group': 1,
},
]
def get_version(t: dict) -> str:
exe = shutil.which(t['tool'])
if not exe:
return 'not found'
args = [t['tool']] + t['args']
pc, o, e = Popen_safe(args)
if pc.returncode != 0:
return '{} (invalid {} executable)'.format(exe, t['tool'])
for i in o.split('\n'):
i = i.strip('\n\r\t ')
m = t['regex'].match(i)
if m is not None:
tool_vers_map[t['tool']] = m.group(t['match_group'])
return '{} ({})'.format(exe, m.group(t['match_group']))
return '{} (unknown)'.format(exe)
print()
print('tools')
print()
max_width = max([len(x['tool']) for x in tools] + [7])
for tool in tools:
print('{0:<{2}}: {1}'.format(tool['tool'], get_version(tool), max_width))
print()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run the test suite of Meson.")
parser.add_argument('extra_args', nargs='*',
help='arguments that are passed directly to Meson (remember to have -- before these).')
parser.add_argument('--backend', dest='backend', choices=backendlist)
parser.add_argument('--failfast', action='store_true',
help='Stop running if test case fails')
parser.add_argument('--no-unittests', action='store_true',
help='Not used, only here to simplify run_tests.py')
parser.add_argument('--only', help='name of test(s) to run', nargs='+', choices=ALL_TESTS)
parser.add_argument('--cross-file', action='store', help='File describing cross compilation environment.')
parser.add_argument('--native-file', action='store', help='File describing native compilation environment.')
parser.add_argument('--use-tmpdir', action='store_true', help='Use tmp directory for temporary files.')
options = parser.parse_args()
if options.cross_file:
options.extra_args += ['--cross-file', options.cross_file]
if options.native_file:
options.extra_args += ['--native-file', options.native_file]
print('Meson build system', meson_version, 'Project Tests')
print('Using python', sys.version.split('\n')[0])
setup_commands(options.backend)
detect_system_compiler(options)
print_tool_versions()
script_dir = os.path.split(__file__)[0]
if script_dir != '':
os.chdir(script_dir)
check_format()
check_meson_commands_work(options)
try:
all_tests = detect_tests_to_run(options.only, options.use_tmpdir)
(passing_tests, failing_tests, skipped_tests) = run_tests(all_tests, 'meson-test-run', options.failfast, options.extra_args, options.use_tmpdir)
except StopException:
pass
print('\nTotal passed tests:', green(str(passing_tests)))
print('Total failed tests:', red(str(failing_tests)))
print('Total skipped tests:', yellow(str(skipped_tests)))
if failing_tests > 0:
print('\nMesonlogs of failing tests\n')
for l in failing_logs:
try:
print(l, '\n')
except UnicodeError:
print(l.encode('ascii', errors='replace').decode(), '\n')
for name, dirs, _ in all_tests:
dir_names = list(set(x.path.name for x in dirs))
for k, g in itertools.groupby(dir_names, key=lambda x: x.split()[0]):
tests = list(g)
if len(tests) != 1:
print('WARNING: The %s suite contains duplicate "%s" tests: "%s"' % (name, k, '", "'.join(tests)))
raise SystemExit(failing_tests)
| 40.739483
| 152
| 0.591319
|
e8395b49c160bb3bcad92a8958f271f30d647dd7
| 5,364
|
py
|
Python
|
src/datadog_api_client/v1/model/change_widget_definition_type.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 32
|
2021-01-07T15:09:56.000Z
|
2022-01-30T05:49:23.000Z
|
src/datadog_api_client/v1/model/change_widget_definition_type.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 228
|
2020-09-03T14:03:54.000Z
|
2022-03-31T20:16:12.000Z
|
src/datadog_api_client/v1/model/change_widget_definition_type.py
|
DataDog/datadog-api-client-python
|
de2fc57dbde9acf4b8c8eef94ac29911227a62a2
|
[
"Apache-2.0"
] | 12
|
2020-09-15T21:36:03.000Z
|
2022-03-31T17:13:17.000Z
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
class ChangeWidgetDefinitionType(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("value",): {
"CHANGE": "change",
},
}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"value": (str,),
}
discriminator = None
attribute_map = {}
_composed_schemas = None
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""ChangeWidgetDefinitionType - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Type of the change widget.. if omitted defaults to "change", must be one of ["change", ] # noqa: E501
Keyword Args:
value (str): Type of the change widget.. if omitted defaults to "change", must be one of ["change", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
super().__init__(kwargs)
if "value" in kwargs:
value = kwargs.pop("value")
elif args:
args = list(args)
value = args.pop(0)
else:
value = "change"
self._check_pos_args(args)
self.value = value
self._check_kw_args(kwargs)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
return cls(*args, **kwargs)
| 39.441176
| 129
| 0.576808
|
afcd06ebed3b824ea437b6dfcdf7823232577d08
| 5,266
|
py
|
Python
|
tests/test_dsl_dsbs.py
|
lelis-research/PyGames-synthesis
|
0c597243c04ce511d8ae5cfd52b1c043267b4503
|
[
"MIT"
] | 1
|
2021-06-03T15:54:16.000Z
|
2021-06-03T15:54:16.000Z
|
tests/test_dsl_dsbs.py
|
olivier-vadiaval/catcher-synthesis
|
0c597243c04ce511d8ae5cfd52b1c043267b4503
|
[
"MIT"
] | 3
|
2021-07-26T19:58:31.000Z
|
2021-07-27T17:35:51.000Z
|
tests/test_dsl_dsbs.py
|
olivier-vadiaval/catcher-synthesis
|
0c597243c04ce511d8ae5cfd52b1c043267b4503
|
[
"MIT"
] | null | null | null |
import unittest
from src.dsl import LessThan, GreaterThan, EqualTo
from unittest.mock import Mock
def mockOperand(value):
op = Mock()
op.get_size.return_value = 1
op.interpret.return_value = value
return op
class TestLessThan(unittest.TestCase):
def test_size_three(self):
lt = LessThan.new(mockOperand(5), mockOperand(3))
self.assertEqual(lt.get_size(), 3, 'LessThan object should have size 3')
def test_size_change_lessThan_size(self):
# Manually change lt.size to 100
lt = LessThan.new(mockOperand(5), mockOperand(5))
lt.size = 100
self.assertEqual(lt.get_size(), 100, 'LessThan object should have size 100')
def test_size_change_args_size(self):
# Manually change the Operand sizes
left = mockOperand(5)
right = mockOperand(14)
lt = LessThan.new(left, right)
left.get_size.return_value = 10
right.get_size.return_value = 10
self.assertEqual(lt.get_size(), 3, 'LessThan object should have size 3')
def test_interpret_true(self):
lt = LessThan.new(mockOperand(3), mockOperand(10))
self.assertEqual(type(lt.interpret({})).__name__, 'bool', 'interpret method of LessThan object should return a \'bool\'')
self.assertEqual(lt.interpret({}), True, 'interpret method of LessThan object should return True')
def test_interpret_false(self):
lt = LessThan.new(mockOperand(88), mockOperand(10))
self.assertEqual(type(lt.interpret({})).__name__, 'bool', 'interpret method of LessThan object should return a \'bool\'')
self.assertEqual(lt.interpret({}), False, 'interpret method of LessThan object should return False')
lt = LessThan.new(mockOperand(77), mockOperand(77))
self.assertEqual(lt.interpret({}), False, 'interpret method of LessThan object should return False')
class TestGreaterThan(unittest.TestCase):
def test_size_three(self):
gt = GreaterThan.new(mockOperand(5), mockOperand(3))
self.assertEqual(gt.get_size(), 3, 'GreaterThan object should have size 3')
def test_size_change_lessThan_size(self):
# Manually change gt.size to 100
gt = GreaterThan.new(mockOperand(5), mockOperand(5))
gt.size = 100
self.assertEqual(gt.get_size(), 100, 'GreaterThan object should have size 100')
def test_size_change_args_size(self):
# Manually change the Operand sizes
left = mockOperand(5)
right = mockOperand(14)
gt = GreaterThan.new(left, right)
left.get_size.return_value = 10
right.get_size.return_value = 14
self.assertEqual(gt.get_size(), 3, 'GreaterThan object should have size 3')
def test_interpret_true(self):
gt = GreaterThan.new(mockOperand(10), mockOperand(3))
self.assertEqual(type(gt.interpret({})).__name__, 'bool', 'interpret method of GreaterThan object should return a \'bool\'')
self.assertEqual(gt.interpret({}), True, 'interpret method of GreaterThan object should return True')
def test_interpret_false(self):
gt = GreaterThan.new(mockOperand(10), mockOperand(88))
self.assertEqual(type(gt.interpret({})).__name__, 'bool', 'interpret method of GreaterThan object should return a \'bool\'')
self.assertEqual(gt.interpret({}), False, 'interpret method of GreaterThan object should return False')
gt = GreaterThan.new(mockOperand(77), mockOperand(77))
self.assertEqual(gt.interpret({}), False, 'interpret method of GreaterThan object should return False')
class TestEqualTo(unittest.TestCase):
def test_size_three(self):
eq = EqualTo.new(mockOperand(5), mockOperand(3))
self.assertEqual(eq.get_size(), 3, 'EqualTo object should have size 3')
def test_size_change_lessThan_size(self):
# Manually change eq.size to 100
eq = EqualTo.new(mockOperand(5), mockOperand(5))
eq.size = 100
self.assertEqual(eq.get_size(), 100, 'EqualTo object should have size 100')
def test_size_change_args_size(self):
# Manually change the mockOperand sizes
left = mockOperand(5)
right = mockOperand(14)
eq = EqualTo.new(left, right)
left.get_size.return_value = 50
right.get_size.return_value = 10
self.assertEqual(eq.get_size(), 3, 'EqualTo object should have size 3')
def test_interpret_true(self):
eq = EqualTo.new(mockOperand(60), mockOperand(60))
self.assertEqual(type(eq.interpret({})).__name__, 'bool', 'interpret method of EqualTo object should return a \'bool\'')
self.assertEqual(eq.interpret({}), True, 'interpret method of EqualTo object should return True')
def test_interpret_false(self):
eq = EqualTo.new(mockOperand(88), mockOperand(10))
self.assertEqual(type(eq.interpret({})).__name__, 'bool', 'interpret method of EqualTo object should return a \'bool\'')
self.assertEqual(eq.interpret({}), False, 'interpret method of EqualTo object should return False')
eq = EqualTo.new(mockOperand(45), mockOperand(92))
self.assertEqual(eq.interpret({}), False, 'interpret method of EqualTo object should return False')
if __name__ == '__main__':
unittest.main()
| 44.627119
| 132
| 0.684201
|
bdb4b1b105c7eded0df4982fc2f73e6d7eb38fed
| 2,627
|
py
|
Python
|
app.py
|
ma76/searchApp-v2
|
965348b64f870b891ac114a5bb62798107f02bcc
|
[
"MIT"
] | null | null | null |
app.py
|
ma76/searchApp-v2
|
965348b64f870b891ac114a5bb62798107f02bcc
|
[
"MIT"
] | 2
|
2021-04-05T14:30:13.000Z
|
2021-04-05T14:35:37.000Z
|
app.py
|
ma76/searche
|
965348b64f870b891ac114a5bb62798107f02bcc
|
[
"MIT"
] | null | null | null |
# import sys
import os
# import glob
import shutil
import pandas as pd
# from openpyxl.workbook import Workbook
import easygui
# CONST
OUTPUT_DIR = os.getcwd()
FORMAT_DIRECTORIES_MAP = {
'text': 'notes',
'video': 'videos',
'music': 'music',
'image': 'pictures',
'zip': 'compressed',
'doc': 'documents',
'unknown': 'other'
}
FORMATS_EXT_MAP = {
'zip': ['rar', 'zip'],
'text': ['txt', 'md', 'csv'],
'video': ['mp4', 'mkv', 'avi'],
'music': ['mp3'],
'image': ['jpg', 'jpeg', 'png', 'gif'],
'doc': ['pdf', 'djvu', 'docx', 'xlsx']
}
# helper // check smell
# sift + f6 // Key Promoter X
def message_box(title, errorMsg):
easygui.msgbox(errorMsg, title, ok_button="Ok")
def get_output_path(path):
return os.path.join(OUTPUT_DIR, path)
def move_file(src, dist):
try:
shutil.move(src, dist)
except:
pass
def get_ext(path):
return path.split(".").pop()
def get_format(path):
ext = get_ext(path)
assert ext, 'Extenstion must be available'
for format, extenstions in FORMATS_EXT_MAP.items():
if ext in extenstions:
return format
return 'unknown'
def get_dist(format, output_dir=None):
ext_dir = FORMAT_DIRECTORIES_MAP[format]
ext_dir = ext_dir[0].upper() + ext_dir[1:]
if output_dir is None:
return get_output_path(ext_dir)
return os.path.join(output_dir, ext_dir)
def count_of_files(dir):
for i in range(0,len(dir)-1) :
count = len(os.listdir(dir[i+1]))
print(f"The {dir[i+1]} directory has {count} files")
# why better to have single responsibility functions
def create_report_file(dir_info):
try:
directories = dir_info[1:]
data = pd.DataFrame({
"Directory": directories,
"Count of files": [len(os.listdir(directory)) for directory in directories],
})
data.to_excel("./data.xlsx")
print("'Data-File' is created !")
except PermissionError :
message_box("Error", "Please close 'Data-File' !!! ")
if __name__ == '__main__':
root_dir = r'test'
output_dir = root_dir # or change if you want
my_files =[]
for (file_path,_,file_names) in os.walk(root_dir):
for path in file_names:
format = get_format(path)
dist = get_dist(format, output_dir)
os.makedirs(dist, exist_ok=True)
move_file(
get_output_path(os.path.join(file_path, path)),
dist
)
my_files.append(file_path)
#print(my_files)
#count_of_files(my_files)
create_report_file(my_files)
| 26.27
| 88
| 0.60906
|
fc059782ca43838406f10e88ef3ab5308837ffcc
| 199
|
py
|
Python
|
mainwindow.py
|
aberrier/ethera
|
caed5058a19db629942c847280204794da063319
|
[
"MIT"
] | 2
|
2018-06-08T09:10:41.000Z
|
2018-06-08T09:10:47.000Z
|
mainwindow.py
|
aberrier/ethera
|
caed5058a19db629942c847280204794da063319
|
[
"MIT"
] | null | null | null |
mainwindow.py
|
aberrier/ethera
|
caed5058a19db629942c847280204794da063319
|
[
"MIT"
] | 1
|
2019-02-06T19:22:53.000Z
|
2019-02-06T19:22:53.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
| 22.111111
| 68
| 0.703518
|
e78e85c3b9c66336828e9b9fae0b23bd15dd8a66
| 37,625
|
py
|
Python
|
venv/Lib/site-packages/ebcli/operations/logsops.py
|
theLambda/DBH-project1
|
9b7b1c9bd9f6629724c53872c60b1171e9ba1fa2
|
[
"MIT"
] | 110
|
2020-01-15T22:58:46.000Z
|
2022-03-27T20:47:33.000Z
|
ebcli/operations/logsops.py
|
QPC-database/aws-elastic-beanstalk-cli
|
87ad9d8bbe5e4e7cb01b1bd4392eda33cb1943f7
|
[
"Apache-2.0"
] | 89
|
2020-01-15T23:18:34.000Z
|
2022-03-31T21:56:05.000Z
|
ebcli/operations/logsops.py
|
QPC-database/aws-elastic-beanstalk-cli
|
87ad9d8bbe5e4e7cb01b1bd4392eda33cb1943f7
|
[
"Apache-2.0"
] | 50
|
2020-01-15T22:58:53.000Z
|
2022-02-11T17:39:28.000Z
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import calendar
from datetime import datetime
import os
import threading
import time
import traceback
from cement.utils.misc import minimal_logger
from six import iteritems
from ebcli.core import fileoperations, io
from ebcli.lib import elasticbeanstalk, utils, cloudwatch
from ebcli.lib.aws import MaxRetriesError
from ebcli.resources.strings import strings, prompts
from ebcli.resources.statics import namespaces, option_names, logs_operations_constants
from ebcli.objects.exceptions import InvalidOptionsError, NotFoundError, ServiceError
from ebcli.operations import commonops
LOG = minimal_logger(__name__)
TAIL_LOG_SIZE = 100
BEANSTALK_LOG_PREFIX = '/aws/elasticbeanstalk'
def beanstalk_log_group_builder(env_name, log_group_name=None):
"""
Method constructs normalizes the `log_group_name` passed in by the customer.
:param env_name: current environment being used
:param log_group_name: One of the following
- None: the method defaults to using '/aws/elasticbeanstalk/<`env_name`>' as
the `log_group_name` in this case
- '/aws/elasticbeanstalk/<`env_name`>/<log_group_name>': the `log_group_name`
is used as is
- '<log_group_name>': '/aws/elasticbeanstalk/<`env_name`>' is prefixed to the
`log_group_name`
:return: a normalized `log_group_name`
"""
log_group_name = log_group_name or deployment_logs_log_group_name(env_name)
if not log_group_name.startswith(cloudwatch_log_group_prefix_for_environment(env_name)):
log_group_name = '{0}/{1}/{2}'.format(
BEANSTALK_LOG_PREFIX, env_name, log_group_name
).replace("//", "/")
return log_group_name
def cloudwatch_log_group_prefix_for_environment(env_name):
"""
Generates the CloudWatch logGroup prefix for the environment, `env_name`.
:param env_name: An Elastic Beanstalk environment name
:return: A CloudWatch logGroup name prefix of the form /aws/elasticbeanstalk/<`env_name`>
"""
return '{0}/{1}'.format(BEANSTALK_LOG_PREFIX, env_name)
def cloudwatch_log_group_for_environment_health_streaming(env_name):
"""
Generates the environment-health.log CloudWatch logGroup name for the environment, `env_name`.
:param env_name: An Elastic Beanstalk environment name
:return: A CloudWatch logGroup name prefix of the form /aws/elasticbeanstalk/<`env_name`>
"""
return '{0}/{1}'.format(cloudwatch_log_group_prefix_for_environment(env_name), 'environment-health.log')
def cloudwatch_log_stream_names(log_group, log_stream_name_prefix):
"""
Returns all of the logStream names associated with `log_group` with the
prefix, `log_stream_name_prefix` if one is specified
:param log_group: A CloudWatch logGroup whose logStream names to retrieve
:param log_stream_name_prefix: A prefix to filter logStream names by
:return: All of the logStream names associated with `log_group`
with the prefix, `log_stream_name_prefix` if one
is specified
"""
return cloudwatch.get_all_stream_names(
log_group_name=log_group,
log_stream_name_prefix=log_stream_name_prefix
)
def deployment_logs_log_group_name(env_name):
"""
Determines the default deployment logGroup for the environment, `env_name`
:param env_name: An Elastic Beanstalk environment name
:return: 'var/log/eb-activity.log' if the environment is using a
non-Windows platform, 'EBDeploy-Log' otherwise
"""
environment = elasticbeanstalk.get_environment(env_name=env_name)
if 'windows' in environment.platform.name.lower():
log_group_suffix = 'EBDeploy-Log'
elif 'Amazon Linux 2/' in environment.platform.name:
log_group_suffix = "var/log/eb-engine.log"
else:
log_group_suffix = 'var/log/eb-activity.log'
return cloudwatch_log_group_prefix_for_environment(env_name) + '/' + log_group_suffix
def disable_cloudwatch_logs(app_name, env_name, cloudwatch_log_source):
"""
Disables CloudWatch log-streaming for the given environment if the required streaming of the
specified `cloudwatch_log_source`s is not already disabled
:param app_name: application name
:param env_name: environment name
:param cloudwatch_log_source: the source of logs. Defaults to 'instance' if value is 'None'.
Use
- 'instance' to disable instance log0streaming
- 'health' to disable health transition log-streaming
- 'all': disable streaming of all CloudWatch log sources
:return None
"""
cloudwatch_log_source = cloudwatch_log_source or logs_operations_constants.LOG_SOURCES.INSTANCE_LOG_SOURCE
configuration_settings = elasticbeanstalk.describe_configuration_settings(app_name, env_name)
option_settings = []
timeout = 5
if cloudwatch_log_source in [
logs_operations_constants.LOG_SOURCES.INSTANCE_LOG_SOURCE,
logs_operations_constants.LOG_SOURCES.ALL_LOG_SOURCES
]:
if instance_log_streaming_enabled(app_name, env_name, config_settings=configuration_settings):
option_settings.append(_instance_log_streaming_option_setting(disable=True))
io.echo(strings['cloudwatch_instance_log_streaming.disable'])
timeout = 15
else:
io.echo(strings['cloudwatch_instance_log_streaming.already_disabled'])
if cloudwatch_log_source in [
logs_operations_constants.LOG_SOURCES.ENVIRONMENT_HEALTH_LOG_SOURCE,
logs_operations_constants.LOG_SOURCES.ALL_LOG_SOURCES
]:
if environment_health_streaming_enabled(app_name, env_name, config_settings=configuration_settings):
io.echo(strings['cloudwatch_environment_health_log_streaming.disable'])
option_settings.append(_environment_health_log_streaming_option_setting(disable=True))
else:
io.echo(strings['cloudwatch_environment_health_log_streaming.already_disabled'])
if option_settings:
commonops.update_environment(env_name, changes=option_settings, nohang=False, timeout=timeout)
def enable_cloudwatch_logs(app_name, env_name, cloudwatch_log_source):
"""
Enables CloudWatch log-streaming for the given environment if the required streaming of the
specified `cloudwatch_log_source`s is not already enabled
:param app_name: application name
:param env_name: environment name
:param cloudwatch_log_source: the source of logs. Defaults to 'instance' if value is 'None'.
Use
- 'instance' to enable instance log0streaming
- 'health' to enable health transition log-streaming
- 'all': enable streaming of all CloudWatch log sources
:return None
"""
cloudwatch_log_source = cloudwatch_log_source or logs_operations_constants.LOG_SOURCES.INSTANCE_LOG_SOURCE
configuration_settings = elasticbeanstalk.describe_configuration_settings(app_name, env_name)
option_settings = []
timeout = 5
if cloudwatch_log_source in [
logs_operations_constants.LOG_SOURCES.ALL_LOG_SOURCES,
logs_operations_constants.LOG_SOURCES.INSTANCE_LOG_SOURCE
]:
if not instance_log_streaming_enabled(app_name, env_name, config_settings=configuration_settings):
timeout = 15
option_settings.append(_instance_log_streaming_option_setting())
io.echo(strings['cloudwatch_instance_log_streaming.enable'])
else:
io.echo(strings['cloudwatch_instance_log_streaming.already_enabled'])
if cloudwatch_log_source in [
logs_operations_constants.LOG_SOURCES.ALL_LOG_SOURCES,
logs_operations_constants.LOG_SOURCES.ENVIRONMENT_HEALTH_LOG_SOURCE,
]:
_raise_if_environment_is_not_using_enhanced_health(configuration_settings)
if not environment_health_streaming_enabled(
app_name,
env_name,
config_settings=configuration_settings
):
option_settings.append(_environment_health_log_streaming_option_setting())
io.echo(strings['cloudwatch_environment_health_log_streaming.enable'])
else:
io.echo(strings['cloudwatch_environment_health_log_streaming.already_enabled'])
if not option_settings:
return
_echo_link_to_cloudwatch_console(env_name)
commonops.update_environment(env_name, changes=option_settings, nohang=False, timeout=timeout)
def environment_health_streaming_enabled(app_name, env_name, config_settings=None):
"""
Checks if health transition streaming is enabled for the given environment
:param app_name: application name
:param env_name: environment name
:param config_settings: the raw response of a call to describe_configuration_settings
:return: boolean if the given environment has health transition streaming enabled
"""
config_settings = config_settings or elasticbeanstalk.describe_configuration_settings(app_name, env_name)
stream_enabled = elasticbeanstalk.get_specific_configuration(
config_settings,
namespaces.CLOUDWATCH_ENVIRONMENT_HEALTH_LOGS,
option_names.CLOUDWATCH_ENVIRONMENT_HEALTH_LOGS_ENABLED
)
return stream_enabled == 'true'
def get_cloudwatch_log_stream_events(log_group_name, stream_name, num_log_events=None):
"""
Gets log events from CloudWatch and appends them to a single string to output with each line prefixed with
the stream name.
:param log_group_name: cloudwatch logGroup
:param stream_name: cloudwatch stream name
:param num_log_events: number of log events to retrieve; default is
cloudwatch's max: 10k or 1MB of messages
:return: single string will all log events concatenated together
"""
full_log = []
full_log_blob = ''
try:
response = cloudwatch.get_log_events(log_group_name, stream_name, limit=num_log_events)
for event in response.get('events', []):
message = event.get('message')
full_log.append('[{stream_name}] {message}'.format(stream_name=stream_name, message=message))
full_log_blob = os.linesep.join(full_log)
except ServiceError as e:
LOG.debug('Received service error {}'.format(e))
except Exception as e:
LOG.debug('Exception raised: ' + str(e))
LOG.debug(traceback.format_exc())
return full_log_blob
def get_cloudwatch_messages(
log_group_name,
stream_name,
formatter,
next_token,
start_time,
messages_handler,
sleep_time=10
):
"""
Polls periodically the logStream `stream_name` until interrupted through a
KeyboardInterrupt or an unexpected exception
:param log_group_name: A CloudWatch logGroup in which the logStream `stream_name`
exists
:param stream_name: A CloudWatch logStream to poll
:param formatter: The object that formats the output to be displayed in the terminal
:param next_token: The token for the next set of items to return
:param start_time: The start of the time range, expressed as the number of
milliseconds after Jan 1, 1970 00:00:00 UTC.
Events with a time stamp earlier than this time are not included.
:param messages_handler:
:param sleep_time: Time in seconds to sleep before polling CloudWatch for newer events
:return: None
"""
while True:
try:
messages, next_token, start_time = _get_cloudwatch_messages(
log_group_name,
stream_name,
formatter,
next_token,
start_time
)
if messages:
messages_handler(messages)
else:
break
except ServiceError as e:
io.log_error(e)
break
except Exception as e:
LOG.debug('Exception raised: ' + str(e))
LOG.debug(traceback.format_exc())
except KeyboardInterrupt:
break
_wait_to_poll_cloudwatch(sleep_time)
start_time = _updated_start_time()
def get_instance_log_url_mappings(env_name, info_type):
"""
Retrieves mappings of Beanstalk instance ids to S3 URLs on which logs are stored.
:param env_name: An Elastic Beanstalk environment name
:param info_type: The type of information to request. Possible values: tail, bundle
:return: mappings of Beanstalk instance ids to S3 URLs
"""
result = elasticbeanstalk.retrieve_environment_info(env_name, info_type)
instance_id_list = dict()
for log in result['EnvironmentInfo']:
instance_id = log['Ec2InstanceId']
url = log['Message']
instance_id_list[instance_id] = url
return instance_id_list
def get_logs(env_name, info_type, do_zip=False, instance_id=None):
"""
Obtains the set of logs from ElasticBeanstalk for the environment `env_name` from ElasticBeanstalk
(and not CloudWatch) for the environment, `env_name` and determines whether to tail it or bundle/zip
it.
:param env_name: An Elastic Beanstalk environment name
:param info_type: The type of information to request. Possible values: tail, bundle
:param do_zip: Whether the information retrieved should be zipped; works only with info_type 'bundle'
:param instance_id: The specific EC2 instance associated with `env_name` whose log information to retrieve
:return: None
"""
instance_id_list = get_instance_log_url_mappings(env_name, info_type)
if instance_id:
instance_id_list = _updated_instance_id_list(instance_id_list, instance_id)
if info_type == logs_operations_constants.INFORMATION_FORMAT.BUNDLE:
_handle_bundle_logs(instance_id_list, do_zip)
else:
_handle_tail_logs(instance_id_list)
def instance_log_streaming_enabled(app_name, env_name, config_settings=None):
"""
Checks if log streaming is enabled for the given environment
:param app_name: application name
:param env_name: environment name
:param config_settings: the raw response of a call to describe_configuration_settings
:return: boolean if the given environment has log streaming enabled
"""
config_settings = (
config_settings or
elasticbeanstalk.describe_configuration_settings(app_name, env_name)
)
stream_enabled = elasticbeanstalk.get_specific_configuration(
config_settings,
namespaces.CLOUDWATCH_LOGS,
option_names.STREAM_LOGS
)
return stream_enabled == 'true'
def normalize_log_group_name(env_name, log_group=None, cloudwatch_log_source=None):
"""
Converts the given (potentially None) `log_group` name to a value that can be
consumed by `describe_log_groups`.
:param env_name: An Elastic Beanstalk environment name
:param log_group: A value for the logGroup name specified by the customer, which
is potentially None
:param cloudwatch_log_source: Name of the log source `log_group` belongs to. One among:
instance, environment-health
:return: A normalized logGroup name
"""
if (
not cloudwatch_log_source
or cloudwatch_log_source == logs_operations_constants.LOG_SOURCES.INSTANCE_LOG_SOURCE
):
log_group = beanstalk_log_group_builder(env_name, log_group)
elif cloudwatch_log_source == logs_operations_constants.LOG_SOURCES.ENVIRONMENT_HEALTH_LOG_SOURCE:
if log_group:
raise InvalidOptionsError(
strings['logs.log_group_and_environment_health_log_source']
)
log_group = beanstalk_log_group_builder(
env_name,
cloudwatch_log_group_for_environment_health_streaming(env_name)
)
else:
raise InvalidOptionsError(
strings[
'logs.cloudwatch_log_source_argumnent_is_invalid_for_retrieval'
].format(cloudwatch_log_source)
)
return log_group
def paginate_cloudwatch_logs(platform_name, version, formatter=None):
"""
Method periodically polls CloudWatch get_log_events to retrieve the logs for the
logStream `version` within the logGroup
defined by `version`
:param platform_name: A CloudWatch logGroup in which the logStream `version` exists
:param version: A CloudWatch logStream to poll
:param formatter: The object that formats the output to be displayed in the terminal
:return: None
"""
log_group_name = _get_platform_builder_group_name(platform_name)
next_token = None
start_time = None
messages_handler = (lambda messages: io.echo_with_pager(os.linesep.join(messages)))
get_cloudwatch_messages(
log_group_name,
version,
formatter,
next_token,
start_time,
messages_handler,
sleep_time=4
)
def raise_if_instance_log_streaming_is_not_enabled(app_name, env_name):
"""
Raises if CloudWatch instance log streaming is not enabled for the environment, `env_name`
:param app_name: An Elastic Beanstalk application name
:param env_name: An Elastic Beanstalk environment name contained within `app_name`
:return: None
"""
if not instance_log_streaming_enabled(app_name, env_name):
raise InvalidOptionsError(strings['logs.instance_log_streaming_disabled'].format(env_name))
def raise_if_environment_health_log_streaming_is_not_enabled(app_name, env_name):
"""
Raises if CloudWatch environment-health log streaming is not enabled for the environment, `env_name`
:param app_name: An Elastic Beanstalk application name
:param env_name: An Elastic Beanstalk environment name contained within `app_name`
:return: None
"""
if not environment_health_streaming_enabled(app_name, env_name):
raise InvalidOptionsError(strings['logs.environment_health_log_streaming_disabled'].format(env_name))
def resolve_log_result_type(zip_argument, all_argument):
"""
Determines whether logs should be tailed or bundled.
:param zip_argument: Whether the customer has requested a zipped version of the logs
:param all_argument: Whether the customer has requested the logs for all the instances
:return: The `info_type` which is one among 'bundle' and 'tail'
"""
if zip_argument or all_argument:
return logs_operations_constants.INFORMATION_FORMAT.BUNDLE
else:
return logs_operations_constants.INFORMATION_FORMAT.TAIL
def retrieve_beanstalk_logs(env_name, info_type, do_zip=False, instance_id=None):
"""
Obtains the set of logs from ElasticBeanstalk for the environment `env_name`.
:param env_name: An Elastic Beanstalk environment name
:param info_type: The type of information to request. Possible values: tail, bundle
:param do_zip: Whether the information retrieved should be zipped; works only with info_type 'bundle'
:param instance_id: The specific EC2 instance associated with `env_name` whose log information to retrieve
:return: None
"""
result = elasticbeanstalk.request_environment_info(env_name, info_type)
request_id = result['ResponseMetadata']['RequestId']
io.echo(prompts['logs.retrieving'])
commonops.wait_for_success_events(
request_id,
timeout_in_minutes=2,
sleep_time=1,
stream_events=False,
log_events=True
)
get_logs(env_name, info_type, do_zip=do_zip, instance_id=instance_id)
def retrieve_cloudwatch_instance_logs(
log_group,
info_type,
do_zip=False,
specific_log_stream=None
):
"""
Retrieves CloudWatch logs for all the environment instances for the `log_group`
unless `specific_log_stream` is specified.
:param log_group: CloudWatch logGroup
:param info_type:
tail: to get the last 100 lines and returns the result to the terminal
'bundle': get all of the logs and save them to a dir under
.elasticbeanstalk/logs/
:param do_zip: If True, zip the logs for the user
:param specific_log_stream: Get logs for specific stream
"""
retrieve_cloudwatch_logs(
log_group,
info_type,
do_zip,
specific_log_stream=specific_log_stream
)
def retrieve_cloudwatch_environment_health_logs(
log_group,
info_type,
do_zip=False
):
"""
Retrieves the environment health information identified by the `log_group` from CloudWatch
:param log_group: CloudWatch logGroup
:param info_type:
tail: to get the last 100 lines and returns the result to the terminal
'bundle': get all of the logs and save them to a dir under .elasticbeanstalk/logs/
:param do_zip: If True, zip the logs for the user
:return:
"""
retrieve_cloudwatch_logs(
log_group,
info_type,
do_zip,
specific_log_stream=None,
cloudwatch_log_source=logs_operations_constants.LOG_SOURCES.ENVIRONMENT_HEALTH_LOG_SOURCE
)
def retrieve_cloudwatch_logs(
log_group,
info_type,
do_zip=False,
specific_log_stream=None,
cloudwatch_log_source=logs_operations_constants.LOG_SOURCES.INSTANCE_LOG_SOURCE
):
"""
Retrieves CloudWatch logs for every stream under `log_group` unless `specific_log_stream` is specified.
:param log_group: CloudWatch logGroup
:param info_type:
tail: to get the last 100 lines and returns the result to the terminal
'bundle': get all of the logs and save them to a dir under .elasticbeanstalk/logs/
:param do_zip: If True, zip the logs for the user
:param specific_log_stream: Get logs for specific stream
:param cloudwatch_log_source: the cloudwatch-log-source to pull from: instance or environment-health
"""
log_streams = cloudwatch.get_all_stream_names(
log_group_name=log_group,
log_stream_name_prefix=specific_log_stream
)
if info_type == logs_operations_constants.INFORMATION_FORMAT.BUNDLE:
logs_location = _setup_logs_folder(cloudwatch_log_source)
for log_stream in log_streams:
full_logs = get_cloudwatch_log_stream_events(log_group, log_stream)
_write_full_logs_to_file(full_logs, logs_location, log_stream)
if do_zip:
_zip_logs_location(logs_location)
else:
_attempt_update_symlink_to_latest_logs_retrieved(logs_location)
else:
stream_logs_in_terminal(log_group, log_streams)
def stream_environment_health_logs_from_cloudwatch(
sleep_time=10,
log_group=None,
specific_log_stream=None
):
"""
Method streams CloudWatch logs to the terminal for the logGroup given. Since it
is possible that the logGroup might match multiple logGroups, multiple threads
can be spawned to switch between streams to display all of them on the same terminal.
:param sleep_time: sleep time to refresh the logs from cloudwatch
:param log_group: cloudwatch logGroup
:param specific_log_stream: since all of our log streams are instance ids we require
this if we want a single stream
"""
streamer = io.get_event_streamer()
streamer.prompt = ' -- {0} -- (Ctrl+C to exit)'.format(log_group)
start_time = None
log_stream_names = cloudwatch_log_stream_names(log_group, specific_log_stream)
if not log_stream_names:
return
latest_log_stream_name = log_stream_names[-1]
other_log_stream_names = log_stream_names[:-1]
for log_stream_name in other_log_stream_names:
_create_log_stream_for_log_group(log_group, log_stream_name, streamer, sleep_time, start_time)
_delay_subsequent_stream_creation()
while True:
_create_log_stream_for_log_group(log_group, latest_log_stream_name, streamer, sleep_time, start_time)
_wait_to_poll_cloudwatch()
def stream_instance_logs_from_cloudwatch(
sleep_time=10,
log_group=None,
specific_log_stream=None
):
"""
Method streams CloudWatch logs to the terminal for the logGroup given.
Since it is possible that the logGroup might match multiple logGroups,
multiple threads can be spawned to switch between streams to display
all of them on the same terminal.
:param sleep_time: sleep time to refresh the logs from cloudwatch
:param log_group: cloudwatch logGroup
:param specific_log_stream: since all of our log streams are instance ids
we require this if we want a single stream
"""
streamer = io.get_event_streamer()
streamer.prompt = ' -- {0} -- (Ctrl+C to exit)'.format(log_group)
start_time = None
while True:
log_group_names = set(cloudwatch_log_stream_names(log_group, specific_log_stream))
for log_group_name in log_group_names:
_create_log_stream_for_log_group(log_group, log_group_name, streamer, sleep_time, start_time)
_delay_subsequent_stream_creation()
_wait_to_poll_cloudwatch()
start_time = _updated_start_time()
def stream_logs_in_terminal(log_group, log_streams):
"""
Prints logs of each of the `log_streams` to terminal using a scoll-able pager as opposed to printing all
available information at once.
:param log_group: name of the CloudWatch log group within which to find `stream_name`
:param log_streams: the list of log streams belonging to the `log_group` whose events to print to terminal
:return: None
"""
all_logs = ''
for log_stream in log_streams:
tail_logs = get_cloudwatch_log_stream_events(
log_group,
log_stream,
num_log_events=TAIL_LOG_SIZE
)
all_logs += '{linesep}{linesep}============= ' \
'{log_stream} - {log_group} ==============' \
'{linesep}{linesep}'.format(
log_stream=str(log_stream),
log_group=log_group,
linesep=os.linesep
)
all_logs += tail_logs
io.echo_with_pager(all_logs)
def stream_platform_logs(platform_name, version, streamer=None, sleep_time=4, log_name=None, formatter=None):
"""
Streams the logs of a custom platform
:param platform_name: A CloudWatch logGroup in which the logStream `version` exists
:param version: A CloudWatch logStream to poll
:param streamer: The object that streams events to the terminal
:param sleep_time: Time in seconds to sleep before polling CloudWatch for newer events
:param log_name: A name used to identify the blob of output in the terminal for the logStream
:param formatter: The object that formats the output to be displayed in the terminal
:return: None
"""
log_group_name = _get_platform_builder_group_name(platform_name)
wait_for_log_group_to_come_into_existence(log_group_name, sleep_time)
streamer = streamer or io.get_event_streamer()
if log_name:
streamer.prompt = ' -- Streaming logs for %s -- (Ctrl+C to exit)' % log_name
stream_single_stream(log_group_name, version, sleep_time, None, formatter)
def stream_single_stream(
log_group_name,
stream_name,
sleep_time=4,
start_time=None,
formatter=None,
):
"""
Method periodically polls CloudWatch get_log_events to retrieve the logs for the `stream_name`
within the logGroup defined by `log_group_name`
:param log_group_name: A CloudWatch logGroup in which `stream_name` exists
:param stream_name: The CloudWatch logStream to get events from
:param streamer: The object that streams events to the terminal
:param sleep_time: Time in seconds to sleep before polling CloudWatch for newer events
:param start_time: The start of the time range, expressed as the number of milliseconds
after Jan 1, 1970 00:00:00 UTC.
Events with a time stamp earlier than this time are not included.
:param formatter: The object that formats the output to be displayed in the terminal
:return: None
"""
def messages_handler(messages):
messages = '{linesep}{linesep}============= ' \
'{log_stream} - {log_group} ==============' \
'{linesep}{linesep}{messages}'.format(
log_stream=stream_name,
log_group=log_group_name,
linesep=os.linesep,
messages=os.linesep.join(messages)
)
io.echo(messages)
get_cloudwatch_messages(log_group_name, stream_name, formatter, None, start_time, messages_handler)
_wait_to_poll_cloudwatch(sleep_time)
def wait_for_log_group_to_come_into_existence(log_group_name, sleep_time=10):
while not cloudwatch.log_group_exists(log_group_name):
_wait_to_poll_cloudwatch(sleep_time)
def _attempt_update_symlink_to_latest_logs_retrieved(logs_location):
if not getattr(os, 'symlink', None):
LOG.debug("Couldn't create symlink to latest logs retrieved")
return
io.echo(strings['logs.location'].replace('{location}', logs_location))
latest_symlink_location = os.path.join(
os.path.dirname(logs_location),
'latest'
)
try:
if os.path.islink(latest_symlink_location):
os.unlink(latest_symlink_location)
os.symlink(logs_location, latest_symlink_location)
io.echo('Updated symlink at', latest_symlink_location)
except OSError:
pass
def _create_log_stream_for_log_group(log_group, stream_name, streamer, sleep_time, start_time=None):
cloudwatch_log_stream = threading.Thread(
target=stream_single_stream,
args=(log_group, stream_name, sleep_time, start_time,),
)
cloudwatch_log_stream.daemon = True
cloudwatch_log_stream.start()
def _delay_subsequent_stream_creation():
time.sleep(0.2)
def _download_logs_for_all_instances(instance_id_list, logs_location):
for instance_id, url in iteritems(instance_id_list):
zip_location = utils.save_file_from_url(
url,
logs_location,
instance_id + '.zip'
)
instance_folder = os.path.join(logs_location, instance_id)
fileoperations.unzip_folder(zip_location, instance_folder)
fileoperations.delete_file(zip_location)
def _echo_link_to_cloudwatch_console(env_name):
region = commonops.get_default_region()
if region in ['cn-north-1', 'cn-northwest-1']:
cw_link_regionalized = strings['cloudwatch-logs.bjslink']
else:
cw_link_regionalized = strings['cloudwatch-logs.link']
io.echo(cw_link_regionalized.replace('{region}', region).replace('{env_name}', env_name))
def _environment_health_log_streaming_option_setting(disable=False):
"""
Returns a dict representation of the environment-health log streaming option setting
:param disable: True if the intention is to disable the option setting
:return: A dict representation of the instance log streaming option setting
"""
return elasticbeanstalk.create_option_setting(
namespaces.CLOUDWATCH_ENVIRONMENT_HEALTH_LOGS,
option_names.CLOUDWATCH_ENVIRONMENT_HEALTH_LOGS_ENABLED,
'false' if disable else 'true'
)
def _get_cloudwatch_messages(
log_group_name,
stream_name,
formatter=None,
next_token=None,
start_time=None
):
messages = []
response = None
latest_event_timestamp = start_time
try:
response = cloudwatch.get_log_events(
log_group_name,
stream_name,
next_token=next_token,
start_time=start_time
)
except MaxRetriesError as e:
LOG.debug('Received max retries')
io.echo(e.message())
time.sleep(1)
if response and response.get('events'):
for event in response.get('events'):
message = event.get('message').encode('utf8', 'replace')
if formatter:
timestamp = event.get('timestamp')
if timestamp:
latest_event_timestamp = timestamp
formatted_message = formatter.format(message, stream_name)
else:
formatted_message = '[{1}] {0}'.format(message, stream_name)
messages.append(formatted_message)
next_token = response.get('nextForwardToken')
return messages, next_token, latest_event_timestamp
def __get_full_path_for_instance_logs(logs_location, instance_id):
return '{0}/{1}.log'.format(logs_location, instance_id)
def _get_platform_builder_group_name(platform_name):
"""
Returns the logGroup name of associated with the custom platform identified by `platform_name`
:param platform_name: A custom platform whose logs to stream
:return:
"""
return '/aws/elasticbeanstalk/platform/{}'.format(platform_name)
def _handle_bundle_logs(instance_id_list, do_zip):
logs_folder_name = _timestamped_directory_name()
logs_location = fileoperations.get_logs_location(logs_folder_name)
_download_logs_for_all_instances(instance_id_list, logs_location)
fileoperations.set_user_only_permissions(logs_location)
if do_zip:
_handle_log_zipping(logs_location)
else:
io.echo(strings['logs.location'].replace('{location}',
logs_location))
_attempt_update_symlink_to_latest_logs_retrieved(logs_location)
def _handle_log_zipping(logs_location):
logs_zip = logs_location + '.zip'
fileoperations.zip_up_folder(logs_location, logs_zip)
fileoperations.delete_directory(logs_location)
fileoperations.set_user_only_permissions(logs_zip)
io.echo(
strings['logs.location'].replace(
'{location}',
logs_zip
)
)
def _handle_tail_logs(instance_id_list):
data = []
for instance_id, url in iteritems(instance_id_list):
data.append('============= ' + str(instance_id) + ' ==============')
log_result = utils.get_data_from_url(url)
data.append(utils.decode_bytes(log_result))
io.echo_with_pager(os.linesep.join(data))
def _instance_log_streaming_option_setting(disable=False):
return elasticbeanstalk.create_option_setting(
namespaces.CLOUDWATCH_LOGS,
option_names.STREAM_LOGS,
'false' if disable else 'true'
)
def _raise_if_environment_is_not_using_enhanced_health(configuration_settings):
option_settings = configuration_settings.get('OptionSettings')
health_type = elasticbeanstalk.get_option_setting(
option_settings,
namespaces.HEALTH_SYSTEM,
option_names.SYSTEM_TYPE
)
if health_type != 'enhanced':
raise InvalidOptionsError(
strings[
'cloudwatch_environment_health_log_streaming.enhanced_health_not_found'
]
)
def _setup_logs_folder(cloudwatch_log_source):
if cloudwatch_log_source == logs_operations_constants.LOG_SOURCES.INSTANCE_LOG_SOURCE:
logs_folder_name = _timestamped_directory_name()
else:
if not os.path.exists(
fileoperations.get_logs_location(
logs_operations_constants.LOG_SOURCES.ENVIRONMENT_HEALTH_LOG_SOURCE
)
):
os.mkdir(
fileoperations.get_logs_location(
logs_operations_constants.LOG_SOURCES.ENVIRONMENT_HEALTH_LOG_SOURCE
)
)
logs_folder_name = os.path.join(
logs_operations_constants.LOG_SOURCES.ENVIRONMENT_HEALTH_LOG_SOURCE,
_timestamped_directory_name()
)
os.mkdir(fileoperations.get_logs_location(logs_folder_name))
return fileoperations.get_logs_location(logs_folder_name)
def _timestamped_directory_name():
return datetime.now().strftime("%y%m%d_%H%M%S")
def _updated_start_time():
return calendar.timegm(datetime.utcnow().timetuple()) * 1000
def _updated_instance_id_list(instance_id_list, instance_id):
try:
return {
instance_id: instance_id_list[instance_id]
}
except KeyError:
raise NotFoundError(strings['beanstalk-logs.badinstance'].format(instance_id))
def _wait_to_poll_cloudwatch(sleep_time=10):
time.sleep(sleep_time)
def _write_full_logs_to_file(full_logs, logs_location, instance_id):
full_filepath = __get_full_path_for_instance_logs(logs_location, instance_id)
with open(full_filepath, 'w+') as log_file:
log_file.write(full_logs)
fileoperations.set_user_only_permissions(full_filepath)
def _zip_logs_location(logs_location):
fileoperations.zip_up_folder(logs_location, logs_location + '.zip')
fileoperations.delete_directory(logs_location)
logs_location += '.zip'
fileoperations.set_user_only_permissions(logs_location)
io.echo(strings['logs.location'].replace('{location}', logs_location))
| 38.353721
| 110
| 0.711389
|
759f0d5c5a8d55736222be260e5e83f26eb32c30
| 4,013
|
py
|
Python
|
src/brython_jinja2/interpolatedstr.py
|
jonathanverner/brython-jinja2
|
cec6e16de1750203a858d0acf590f230fc3bf848
|
[
"BSD-3-Clause"
] | 2
|
2020-09-13T17:51:55.000Z
|
2020-11-25T18:47:12.000Z
|
src/brython_jinja2/interpolatedstr.py
|
jonathanverner/brython-jinja2
|
cec6e16de1750203a858d0acf590f230fc3bf848
|
[
"BSD-3-Clause"
] | 2
|
2020-11-25T19:18:15.000Z
|
2021-06-01T21:48:12.000Z
|
src/brython_jinja2/interpolatedstr.py
|
jonathanverner/brython-jinja2
|
cec6e16de1750203a858d0acf590f230fc3bf848
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module provides the :class:`InterpolatedStr` class which
can be used to interpolate complex strings with multiple
instances of ``{{ }}``-type circular expressions.
"""
from .utils.events import EventMixin
from .expression import parse_interpolated_str, ConstNode
class InterpolatedStr(EventMixin):
"""
The :class:`InterpolatedStr` manages string interpolations.
Use it as follows:
```
from brython_jinja2.context import Context
from brython_jinja2.interpolatedstr import InterpolatedStr
c = context()
istr = InterpolatedStr("Hello {{name}}, {{surname}}!")
assert istr.value == "Hello , !"
c.name = "John"
assert istr.value == "Hello John, !"
c.name = "Smith"
assert istr.value == "Hello John, Smith!"
```
The class tries to do some clever tricks to only evaluate the
subexpressions which have changed due to a given context change.
(e.g. c.name='Anne' would not affect the second expresssion in
the above example).
"""
def __init__(self, string, start='{{', end='}}', stop_strs=[]):
super().__init__()
if isinstance(string, InterpolatedStr):
# pylint: disable=protected-access; we are cloning ourselves, we have access to protected variables
self._src = string._src
self.asts = []
for ast in string.asts:
self.asts.append(ast.clone())
else:
self._src, self.asts = parse_interpolated_str(string, start=start, end=end, stop_strs=stop_strs)
for ast_index in range(len(self.asts)):
self.asts[ast_index].bind('change', lambda event, ast_index=ast_index: self._change_chandler(event, ast_index))
self._dirty = True
self._dirty_vals = True
self._cached_vals = []
self._cached_val = ""
self.evaluate()
def is_const(self):
for a in self.asts:
if not a.is_const():
return False
return True
def bind_ctx(self, context):
for ast in self.asts:
ast.bind_ctx(context)
self._dirty = True
self._cached_val = ""
def clone(self):
return InterpolatedStr(self)
def get_ast(self, n, strip_str=True):
if not strip_str:
return self.asts[n]
else:
return self.asts[n]._rarg._children[0]
def _change_chandler(self, event, ast_index):
if not self._dirty_vals:
if 'value' in event.data:
self._cached_vals[ast_index] = event.data['value']
else:
self._dirty_vals = True
if self._dirty:
return
self._dirty = True
self.emit('change', {})
@property
def value(self):
if self._dirty:
if self._dirty_vals:
self.evaluate()
else:
self._cached_val = "".join(self._cached_vals)
return self._cached_val
def evaluate(self):
self._cached_val = ""
self._cached_vals = []
for ast in self.asts:
try:
self._cached_vals.append(ast.eval())
# pylint: disable=bare-except; interpolated str must handle any exceptions when evaluating circular expressions
except:
self._cached_vals.append("")
self._cached_val = "".join(self._cached_vals)
self._dirty = False
def rstrip(self):
ret = self.clone()
if ret.asts:
node = self.get_ast(-1, strip_str=True)
if isinstance(node, ConstNode):
node._cached_val = node._cached_val.rstrip()
return ret
def __str__(self):
if self._dirty:
return "InterpolatedStr("+self._src+")[=dirty:"+self.value+"]"
else:
return "InterpolatedStr("+self._src+")[="+self.value+"]"
| 32.626016
| 127
| 0.573636
|
be6b0356a826dacfccdea774ccd93dabfcb7f8e8
| 2,871
|
gyp
|
Python
|
gyp/network-unittests.gyp
|
nidium/libapenetwork
|
d011cf41402b3936cbaef1c07b5086a1a0eab265
|
[
"MIT"
] | 22
|
2016-06-28T18:12:34.000Z
|
2020-12-10T05:05:54.000Z
|
gyp/network-unittests.gyp
|
nidium/libapenetwork
|
d011cf41402b3936cbaef1c07b5086a1a0eab265
|
[
"MIT"
] | 14
|
2016-10-15T19:45:00.000Z
|
2017-04-24T11:07:01.000Z
|
gyp/network-unittests.gyp
|
nidium/libapenetwork
|
d011cf41402b3936cbaef1c07b5086a1a0eab265
|
[
"MIT"
] | 8
|
2016-12-08T02:40:56.000Z
|
2020-12-11T00:27:14.000Z
|
# Copyright 2016 Nidium Inc. All rights reserved.
# Use of this source code is governed by a MIT license
# that can be found in the LICENSE file.
{
'targets': [
{
'target_name': 'unittests-settings',
'type': 'none',
'direct_dependent_settings': {
'include_dirs': [
'<(third_party_path)/gtest/googletest/include',
],
'conditions': [
['OS=="linux"', {
"link_settings": {
'libraries': [
'-lgtest_main',
'-lgtest',
'-ldl',
'-lpthread',
'-lrt'
]
}
}],
['OS=="mac"', {
"link_settings": {
'libraries': [
'libgtest_main.a',
'libgtest.a',
]
}
}]
],
},
},
{
'target_name': 'network-unittests',
'type': 'executable',
# When built from a different directory the current
# working directory of gyp is different between
# OSX and Linux. Workaround that.
'conditions': [
['OS=="mac"', {
'product_dir': '<(libapenetwork_tests_output_path)',
"xcode_settings": {
'OTHER_LDFLAGS!': [
'../build/third-party/libssl.a',
'../build/third-party/libcrypto.a',
],
'OTHER_LDFLAGS': [
'<(DEPTH)/build/third-party/libssl.a',
'<(DEPTH)/build/third-party/libcrypto.a',
],
},
}, {
'product_dir': '../build/tests/',
}]
],
'dependencies': [
'network-unittests.gyp:unittests-settings',
'network.gyp:*',
],
'sources': [
'../tests/unittest_0.cpp',
'../tests/unittest_common.cpp',
'../tests/unittest_base64.cpp',
'../tests/unittest_blowfish.cpp',
'../tests/unittest_sha1.cpp',
'../tests/unittest_buffer.cpp',
'../tests/unittest_pool.cpp',
'../tests/unittest_array.cpp',
'../tests/unittest_hash.cpp',
'../tests/unittest_netlib.cpp',
'../tests/unittest_events.cpp',
'../tests/unittest_timersng.cpp',
'../tests/unittest_socket.cpp',
'../tests/unittest_dns.cpp',
'../tests/unittest_ssl.cpp',
'../tests/unittest_websocket.cpp',
'../tests/unittest_log.cpp',
],
}]
}
| 33.383721
| 68
| 0.40404
|
d5f269812bbe07294bc2865cd94c574bca1f5460
| 10,128
|
py
|
Python
|
doc/integrations/pytorch/parlai/tasks/md_gender/wikipedia.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2020-09-27T05:00:06.000Z
|
2020-09-27T05:00:06.000Z
|
doc/integrations/pytorch/parlai/tasks/md_gender/wikipedia.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2021-08-04T11:17:39.000Z
|
2021-08-04T11:17:39.000Z
|
doc/integrations/pytorch/parlai/tasks/md_gender/wikipedia.py
|
novium258/cortx-1
|
ce5b939b33b8d24d89b31807ac3bcaa8f24096bc
|
[
"Apache-2.0"
] | 1
|
2021-05-03T13:27:14.000Z
|
2021-05-03T13:27:14.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from parlai.core.message import Message
from parlai.core.teachers import ChunkTeacher
from parlai.tasks.wikipedia.build import build
from parlai.utils.misc import warn_once
import parlai.tasks.md_gender.utils as gend_utils
from copy import deepcopy
import json
import os
import random
import re
import spacy
from typing import List
NLP = spacy.load('en_core_web_sm')
DEBUG = False
def check_if_person(text):
"""
Using spacy, check if the title of the Wikipedia passage is a person.
"""
doc = NLP(text)
is_person = False
ents = [ent for ent in doc.ents]
for ent in ents:
if ent.label_ == 'PERSON':
is_person = True
return is_person
def get_gender(text):
"""
Determine gender by the count of referring pronouns in the biography (he, she,
they).
"""
he_count = len(re.findall(' he ', text.lower()))
she_count = len(re.findall(' she ', text.lower()))
they_count = len(re.findall(' they ', text.lower()))
if he_count == max(he_count, she_count, they_count):
return gend_utils.MASC
elif she_count == max(he_count, she_count, they_count):
return gend_utils.FEM
else:
nonbinary_count = len(re.findall(' non-binary ', text.lower()))
if nonbinary_count > 0:
return gend_utils.NONBINARY
return gend_utils.NEUTRAL
class WikipediaTeacher(ChunkTeacher):
"""
Wikipedia gender.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
parser = gend_utils.add_common_args(parser)
agent = parser.add_argument_group('Wiki gender')
agent.add_argument(
'--class-task',
type=str,
default='all',
choices=['single', 'all'],
help='Rank against all possibilities vs. F/M/N/NB',
)
agent.add_argument(
'--mask-gendered-words',
type='bool',
default=False,
help='Mask explicitly gendered words.',
)
return parser
def __init__(self, opt, shared=None):
if shared is None:
# set map
self.opt = opt
self._set_chunk_idx_to_file(opt)
else:
self.chunk_idx_to_file = shared['chunk_idx_to_file']
self.class_task = opt['class_task']
self.is_train = 'train' in opt['datatype'] and 'evalmode' not in opt['datatype']
self.is_valid = 'valid' in opt['datatype']
self.add_unknown_classes = (
opt['add_unknown_classes'] and self.is_train and self.class_task == 'all'
)
if self.class_task == 'all':
self.label_candidates = gend_utils.ALL_CANDS
elif self.class_task == 'single':
self.label_candidates = {
'about': [
gend_utils.FEM,
gend_utils.MASC,
gend_utils.NEUTRAL,
gend_utils.NONBINARY,
]
}
self.mask_gendered_words = opt['mask_gendered_words']
if self.mask_gendered_words:
male, female = gend_utils.get_explicitly_gendered_words(self.opt)
self.gendered_list = male + female
self.counts = {
gend_utils.FEM: 0,
gend_utils.MASC: 0,
gend_utils.NEUTRAL: 0,
gend_utils.NONBINARY: 0,
}
super().__init__(opt, shared)
def _set_chunk_idx_to_file(self, opt):
# download wikipedia data
wiki_opt = deepcopy(opt)
wiki_opt['task'] = 'wikipedia:full'
build(wiki_opt)
# now divide data into subfolders
data_folder = self._get_data_folder()
self.chunk_idx_to_file = {}
i = 0
for subdir in os.listdir(data_folder):
if subdir != 'README.md':
for fle in os.listdir(os.path.join(data_folder, subdir)):
self.chunk_idx_to_file[i] = os.path.join(data_folder, subdir, fle)
i += 1
def _get_data_folder(self):
return os.path.join(self.opt['datapath'], 'wikipedia/full/wiki_full_extracted')
def get_num_samples(self, opt) -> int:
"""
Return the number of samples given the datatype.
"""
datatype = opt['datatype']
if 'train' in datatype:
return 12774693, 12774693
elif 'valid' in datatype:
return 7410, 7410
else:
return 7441, 7441
def get_fold_chunks(self, opt) -> List[int]: # type: ignore
"""
Return a list of chunk IDs (integer).
Given the datatype (train/test/valid), return the list of chunk IDs that
correspond to that split.
"""
datatype = opt['datatype']
all_chunk_idxs = list(self.chunk_idx_to_file.keys())
if DEBUG:
print(f'Total chunks: {len(all_chunk_idxs)}')
if 'train' in datatype:
return all_chunk_idxs[:-10]
elif 'valid' in datatype:
return all_chunk_idxs[-10:-5]
else:
return all_chunk_idxs[-5:]
def load_from_chunk(self, chunk_idx: int):
"""
[Abstract] Given the chunk index, load examples from that chunk.
Return a list of tuples. The function `_create_message` will take these tuples
to form the Message object that is returned by the teacher.
"""
output = []
chunk_path = self.chunk_idx_to_file[chunk_idx]
extra_data = []
with open(chunk_path) as wf:
for article_json in wf:
article = json.loads(article_json)
title = article['title']
text = article['text']
title = title.split(' (')[0]
is_person = check_if_person(title)
if not is_person:
continue
gender = get_gender(text)
label = f'ABOUT:{gender}'
for par in text.split('\n'):
if par:
output.append((par, title, label, gender, 'about'))
self.counts[gender] += 1
if self.add_unknown_classes:
extra_data.append(
(
par,
title,
f'SELF:{gend_utils.UNKNOWN}',
gender,
'self',
)
)
extra_data.append(
(
par,
title,
f'PARTNER:{gend_utils.NEUTRAL}',
gender,
'partner',
)
)
if len(extra_data) > 0:
# possibly sample unknown classes
sample_rate = self.opt['unknown_temp']
if sample_rate < 1.0:
to_samp = int(sample_rate * len(extra_data))
sampled = random.sample(extra_data, to_samp)
output += sampled
else:
output += extra_data
if DEBUG:
print('\n\nGender count update:')
for k, v in self.counts.items():
print(f'{k}: {v}')
if (self.is_train and self.opt['balance']) or (
self.is_valid and self.opt['balance_valid']
):
exclude_lst = [
f'ABOUT:{gend_utils.NONBINARY}',
f'SELF:{gend_utils.UNKNOWN}',
f'PARTNER:{gend_utils.NEUTRAL}',
] # not enough of each of these examples to balance
output = gend_utils.balance_data(output, key=2, exclude_labels=exclude_lst)
if len(output) == 0:
warn_once(f'CHUNK {chunk_idx} is empty')
return output
def _mask_gendered_words(self, text):
return gend_utils.mask_gendered_words(text, self.gendered_list)
def create_message(self, queue_output, entry_idx=0) -> 'Message':
"""
[Abstract] Given the tuple output of the queue, return an act.
"""
par, title, lbl, gender, class_type = queue_output
if self.class_task == 'all':
if class_type == 'self':
labels = gend_utils.UNKNOWN_LABELS['self'] # Not True neutral
else:
labels = [lbl]
elif self.class_task == 'single':
labels = [gender]
if self.mask_gendered_words:
par = self._mask_gendered_words(par)
return Message(
{
'text': par,
'name': title,
'labels': labels,
'label_candidates': self.label_candidates[class_type],
'episode_done': True,
'id': 'Wikipedia Gender',
}
)
def share(self):
shared = super().share()
shared['chunk_idx_to_file'] = self.chunk_idx_to_file
return shared
def write_gender_to_file(open_file, loaded_data):
prev = None
for _, title, _, gender, _ in loaded_data:
if title != prev:
open_file.write(f'{title}\t{gender}\n')
prev = title
| 33.647841
| 89
| 0.522314
|
27444ec11319d2fb81ef0d6b46492df823d8d263
| 1,330
|
py
|
Python
|
app/core/tests/test_admin.py
|
probal25/recipe-app-api
|
b4e9ca260b5406b60f74cd1e38160c1a2e831827
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
probal25/recipe-app-api
|
b4e9ca260b5406b60f74cd1e38160c1a2e831827
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
probal25/recipe-app-api
|
b4e9ca260b5406b60f74cd1e38160c1a2e831827
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@doit.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@doit.com',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 32.439024
| 68
| 0.631579
|
01ef39c54462a8976563caec8401e9dfbfa9ef03
| 1,234
|
py
|
Python
|
Python3/1129-Shortest-Path-With-Alternating-Colors/soln.py
|
wyaadarsh/LeetCode-Solutions
|
3719f5cb059eefd66b83eb8ae990652f4b7fd124
|
[
"MIT"
] | 5
|
2020-07-24T17:48:59.000Z
|
2020-12-21T05:56:00.000Z
|
Python3/1129-Shortest-Path-With-Alternating-Colors/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | null | null | null |
Python3/1129-Shortest-Path-With-Alternating-Colors/soln.py
|
zhangyaqi1989/LeetCode-Solutions
|
2655a1ffc8678ad1de6c24295071308a18c5dc6e
|
[
"MIT"
] | 2
|
2020-07-24T17:49:01.000Z
|
2020-08-31T19:57:35.000Z
|
class Solution:
def shortestAlternatingPaths(self, n: int, red_edges: List[List[int]], blue_edges: List[List[int]]) -> List[int]:
reds = collections.defaultdict(list)
blues = collections.defaultdict(list)
for i, j in red_edges:
reds[i].append(j)
for i, j in blue_edges:
blues[i].append(j)
# print(reds, blues)
frontier = collections.deque([(v, 'r') for v in reds[0]] + [(v, 'b') for v in blues[0]])
# print(frontier)
ans = [-1] * n
ans[0] = 0
step = 0
seen = {(0, 'r'), (0, 'b')}
for node, c in frontier:
seen.add((node, c))
while frontier:
step += 1
sz = len(frontier)
for _ in range(sz):
node, color = frontier.popleft()
if ans[node] == -1:
ans[node] = step
nei_lst = blues if color == 'r' else reds
for nei in nei_lst[node]:
new_color = 'r' if color == 'b' else 'b'
if (nei, new_color) not in seen:
seen.add((nei, new_color))
frontier.append((nei, new_color))
return ans
| 38.5625
| 117
| 0.471637
|
39869ef2574a3c486d2236c57c19451949ab0613
| 41
|
py
|
Python
|
indra/ontology/__init__.py
|
zebulon2/indra
|
7727ddcab52ad8012eb6592635bfa114e904bd48
|
[
"BSD-2-Clause"
] | 136
|
2016-02-11T22:06:37.000Z
|
2022-03-31T17:26:20.000Z
|
indra/ontology/__init__.py
|
zebulon2/indra
|
7727ddcab52ad8012eb6592635bfa114e904bd48
|
[
"BSD-2-Clause"
] | 748
|
2016-02-03T16:27:56.000Z
|
2022-03-09T14:27:54.000Z
|
indra/ontology/__init__.py
|
zebulon2/indra
|
7727ddcab52ad8012eb6592635bfa114e904bd48
|
[
"BSD-2-Clause"
] | 56
|
2015-08-28T14:03:44.000Z
|
2022-02-04T06:15:55.000Z
|
from .ontology_graph import IndraOntology
| 41
| 41
| 0.902439
|
137ab07968cb8906a54b25fc454428ca0c378bab
| 5,430
|
py
|
Python
|
openGaussBase/testcase/SQL/DML/set/Opengauss_Function_DML_Set_Case0078.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SQL/DML/set/Opengauss_Function_DML_Set_Case0078.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SQL/DML/set/Opengauss_Function_DML_Set_Case0078.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能测试
Case Name : 管理员执行shutdown命令
Description :
1. 创建具有shutdown权限的管理员用户
2.1. 开启事务创建表,不关闭事务
2.2. 执行shutdown(默认关闭以及带关闭模式fast关闭)并执行其他sql语句
2.2. 重启数据库,检查事务里创建的表
3. 清理环境
Expect :
1. 创建成功
2.1. 成功
2.2. shutdown为默认模式时,当前连接的数据库节点关闭
关闭fast试,不等待客户端中断连接,将所有活跃事务回滚并且强制断开客户端,然后关闭数据库节点
2.3. 重启成功,表不存在
3. 清理成功
History :
"""
import os
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
class Function(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.commonsh = CommonSH('dbuser')
self.user = Node('dbuser')
self.pwd = macro.COMMON_PASSWD
self.env = macro.DB_ENV_PATH
self.constant = Constant()
self.log.info(f'-----{os.path.basename(__file__)} start-----')
def test_shutdown(self):
test = '-----step1:创建具有shutdown权限的管理员用户 expect:成功-----'
self.log.info(test)
cmd0 = f'''drop user if exists hong cascade;
create user hong with sysadmin password '{self.pwd}';'''
msg0 = self.commonsh.execut_db_sql(cmd0)
self.log.info(msg0)
self.assertTrue(msg0.find('CREATE ROLE') > -1)
cmd1 = '''select rolname, rolsystemadmin
from pg_authid where rolname = 'hong';'''
msg1 = self.commonsh.execut_db_sql(cmd1)
self.log.info(msg1)
self.assertTrue('hong | t' in msg1, '执行失败:' + test)
test = '-----shutdown默认关闭以及带关闭模式fast关闭-----'
self.log.info(test)
shutdown_list = ['shutdown;', 'shutdown fast;']
check = 'select sysdate;'
info1 = 'SHUTDOWN'
info2 = 'failed to connect'
info3 = 'Broken pipe'
for i in range(2):
test = '-----step2.1:开启事务创建表,不关闭事务 expect:成功-----'
self.log.info(test)
cmd1 = '''start transaction;
drop table if exists xhy;
create table xhy(id int);
insert into xhy values(89);'''
msg1 = self.commonsh.execut_db_sql(cmd1)
self.log.info(msg1)
self.assertTrue('CREATE' in msg1, '执行失败:' + test)
test = '-----step2.2:执行shutdown(默认关闭以及带关闭模式fast关闭)并执行其他sql语句 ' \
'expect:shutdown为默认模式时,当前连接的数据库节点关闭;' \
'关闭fast试,不等待客户端中断连接,将所有活跃事务回滚并且强制断开客户端,然后关闭数据库节点-----'
self.log.info(test)
cmd2 = f''' source {self.env};
gsql -d {self.user.db_name} -U hong -W {self.pwd} \
-p {self.user.db_port} -c "{shutdown_list[i]}"'''
self.log.info(cmd2)
msg2 = self.user.sh(cmd2).result()
self.log.info(msg2)
self.assertTrue(info1 in msg2)
cmd0 = f''' source {self.env};
gsql -d {self.user.db_name} -U hong -W {self.pwd} \
-p {self.user.db_port} -c "{check}"'''
self.log.info(cmd0)
msg0 = self.user.sh(cmd0).result()
self.log.info(msg0)
self.assertTrue(info2 in msg0 or info3 in msg0, '执行失败:' + test)
test = '-----step2.3:重启数据库,检查事务里创建的表 expect:重启成功,表不存在-----'
self.log.info(test)
result = self.commonsh.execute_gsctl('restart', 'server started',
'-M primary')
self.assertTrue(result)
result = self.commonsh.execute_gsctl('query', '', get_detail=True)
self.log.info(result)
self.assertTrue("Degraded" in result or "Normal" in result,
'执行失败:' + test)
cmd3 = f'''source {self.env}
gsql -d {self.user.db_name} -U hong -W {self.pwd} \
-p {self.user.db_port} -c "select * from xhy;"'''
self.log.info(cmd3)
msg3 = self.user.sh(cmd3).result()
self.log.info(msg3)
self.assertTrue('relation "xhy" does not exist' in msg3,
'执行失败:' + test)
def tearDown(self):
self.log.info('-----step3:清理环境-----')
test_1 = '-----检查数据库状态,异常时重启数据库 expect:成功-----'
self.log.info(test_1)
status = self.commonsh.get_db_cluster_status()
if 'Normal' or 'Degraded' not in status:
status = self.commonsh.restart_db_cluster()
self.log.info(status)
self.assertTrue(status, '执行失败:' + test_1)
test_2 = '-----删除用户 expect:成功-----'
self.log.info(test_2)
drop_cmd = f'drop user if exists hong cascade;'
drop_res = self.commonsh.execut_db_sql(drop_cmd)
self.log.info(drop_res)
self.assertIn(self.constant.DROP_ROLE_SUCCESS_MSG, drop_res,
'执行失败:' + test_2)
self.log.info(f'-----{os.path.basename(__file__)} end-----')
| 37.708333
| 84
| 0.576243
|
78d4ef9106901c0d8826b173fe41bdac5257cdb4
| 20,953
|
py
|
Python
|
tests/probability2/empirical_distributions/empirical_distribution_marginal_test.py
|
rpazuki/algos
|
bca46326f58eb983db6efe55320bf95fcf2b895f
|
[
"MIT"
] | null | null | null |
tests/probability2/empirical_distributions/empirical_distribution_marginal_test.py
|
rpazuki/algos
|
bca46326f58eb983db6efe55320bf95fcf2b895f
|
[
"MIT"
] | 1
|
2020-08-12T06:56:59.000Z
|
2020-08-12T08:57:30.000Z
|
tests/probability2/empirical_distributions/empirical_distribution_marginal_test.py
|
chasing-entropy/algos
|
bca46326f58eb983db6efe55320bf95fcf2b895f
|
[
"MIT"
] | null | null | null |
import pytest
from probability2.empirical_distributions import DiscreteDistribution
from tests.helpers import compare
def test_marginals_names_exception_discrete_distribution():
# Wrong rv name
with pytest.raises(ValueError):
samples = {"a": 3, "b": 4, "c": 5}
disc_dist = DiscreteDistribution(samples)
disc_dist.marginal("X1")
# Wrong rv name
with pytest.raises(ValueError):
samples = {("a", "x"): 4, ("a", "y"): 4, ("b", "x"): 6, ("b", "y"): 6}
disc_dist = DiscreteDistribution(samples)
disc_dist.marginal("X0")
# Wrong rv name
with pytest.raises(ValueError):
samples = {("a", "x"): 4, ("a", "y"): 4, ("b", "x"): 6, ("b", "y"): 6}
disc_dist = DiscreteDistribution(samples)
disc_dist.marginal("X3")
# Wrong rv name
with pytest.raises(ValueError):
samples = {("a", "x"): 4, ("a", "y"): 4, ("b", "x"): 6, ("b", "y"): 6}
disc_dist = DiscreteDistribution(samples)
disc_dist2 = disc_dist.marginal("X1")
disc_dist2.marginal("X1")
# Wrong rv name
with pytest.raises(ValueError):
samples = {("a", "x"): 4, ("a", "y"): 4, ("b", "x"): 6, ("b", "y"): 6}
disc_dist = DiscreteDistribution(samples, names=["Y", "Z"])
disc_dist.marginal("X1")
# Wrong rv name
with pytest.raises(ValueError):
samples = {("a", "x"): 4, ("a", "y"): 4, ("b", "x"): 6, ("b", "y"): 6}
disc_dist = DiscreteDistribution(samples, names=["Y", "Z"])
disc_dist.marginal("X1")
# Wrong rv name
with pytest.raises(ValueError):
samples = {("a", "x"): 4, ("a", "y"): 4, ("b", "x"): 6, ("b", "y"): 6}
disc_dist = DiscreteDistribution(samples, names=["Y", "Z"])
disc_dist2 = disc_dist.marginal("Y")
disc_dist2.marginal("Y")
# Marginalize over all vars
with pytest.raises(ValueError):
samples = {("a", "x"): 4, ("a", "y"): 4, ("b", "x"): 6, ("b", "y"): 6}
disc_dist = DiscreteDistribution(samples, names=["Y", "Z"])
disc_dist2 = disc_dist.marginal("Y", "Z")
def test_marginals_names_discrete_distribution():
samples = {("a", "x"): 4, ("a", "y"): 4, ("b", "x"): 6, ("b", "y"): 6}
disc_dist = DiscreteDistribution(samples)
disc_dist2 = disc_dist.marginal("X1")
assert all(compare(disc_dist2.names, ["X2"]))
disc_dist2 = disc_dist.marginal("X2")
assert all(compare(disc_dist2.names, ["X1"]))
#
disc_dist = DiscreteDistribution(samples, names=["Y", "Z"])
disc_dist2 = disc_dist.marginal("Y")
assert all(compare(disc_dist2.names, ["Z"]))
disc_dist2 = disc_dist.marginal("Z")
assert all(compare(disc_dist2.names, ["Y"]))
# Three levels dist.
samples = {
("a", "x", 1): 4,
("a", "x", 2): 4,
("a", "y", 1): 6,
("a", "y", 2): 6,
("b", "x", 1): 8,
("b", "x", 2): 8,
("b", "y", 1): 10,
("b", "y", 2): 10,
}
disc_dist = DiscreteDistribution(samples)
disc_dist2 = disc_dist.marginal("X1")
assert all(compare(disc_dist2.names, ["X2", "X3"]))
disc_dist2 = disc_dist.marginal("X2")
assert all(compare(disc_dist2.names, ["X1", "X3"]))
disc_dist2 = disc_dist.marginal("X3")
assert all(compare(disc_dist2.names, ["X1", "X2"]))
disc_dist2 = disc_dist.marginal("X1", "X3")
assert all(compare(disc_dist2.names, ["X2"]))
disc_dist2 = disc_dist.marginal("X2", "X3")
assert all(compare(disc_dist2.names, ["X1"]))
#
disc_dist = DiscreteDistribution(samples, names=["Y", "Z", "W"])
disc_dist2 = disc_dist.marginal("Y")
assert all(compare(disc_dist2.names, ["Z", "W"]))
disc_dist2 = disc_dist.marginal("Z")
assert all(compare(disc_dist2.names, ["Y", "W"]))
disc_dist2 = disc_dist.marginal("W")
assert all(compare(disc_dist2.names, ["Y", "Z"]))
disc_dist2 = disc_dist.marginal("Y", "W")
assert all(compare(disc_dist2.names, ["Z"]))
disc_dist2 = disc_dist.marginal("Z", "W")
assert all(compare(disc_dist2.names, ["Y"]))
def test_marginals_discrete_distribution():
# Single RV dist.
with pytest.raises(ValueError):
disc_dist = DiscreteDistribution({"A": 2, "B": 3, "C": 4})
disc_dist.marginal("X1")
# Two levels dist.
samples = {(1, 1): 4, (1, 2): 4, (2, 1): 6, (2, 2): 6}
disc_dist = DiscreteDistribution(samples)
disc_dist2 = disc_dist.marginal("X1")
assert disc_dist2.total == disc_dist.total
assert all(compare(disc_dist2.keys_as_list(), [1, 2]))
assert disc_dist2[1] == 10
assert disc_dist2[2] == 10
assert disc_dist2.probability(1) == 0.5
assert disc_dist2.probability(2) == 0.5
disc_dist2 = disc_dist.marginal("X2")
assert disc_dist2.total == disc_dist.total
assert all(compare(disc_dist2.keys_as_list(), [1, 2]))
assert disc_dist2[1] == 8
assert disc_dist2[2] == 12
assert disc_dist2.probability(1) == 0.4
assert disc_dist2.probability(2) == 0.6
samples = {("a", "x"): 4, ("a", "y"): 4, ("b", "x"): 6, ("b", "y"): 6}
disc_dist = DiscreteDistribution(samples)
disc_dist2 = disc_dist.marginal("X1")
assert disc_dist2.total == disc_dist.total
assert all(compare(disc_dist2.keys_as_list(), ["x", "y"]))
assert disc_dist2["x"] == 10
assert disc_dist2["y"] == 10
assert disc_dist2.probability("x") == 0.5
assert disc_dist2.probability("y") == 0.5
disc_dist2 = disc_dist.marginal("X1")
assert disc_dist2.total == disc_dist.total
assert all(compare(disc_dist2.keys_as_list(), ["x", "y"]))
assert disc_dist2["x"] == 10
assert disc_dist2["y"] == 10
assert disc_dist2.probability("x") == 0.5
assert disc_dist2.probability("y") == 0.5
disc_dist2 = disc_dist.marginal("X2")
assert disc_dist2.total == disc_dist.total
assert all(compare(disc_dist2.keys_as_list(), ["a", "b"]))
assert disc_dist2["a"] == 8
assert disc_dist2["b"] == 12
assert disc_dist2.probability("a") == 0.4
assert disc_dist2.probability("b") == 0.6
# Three levels dist.
samples = {
("a", "x", 1): 4,
("a", "x", 2): 4,
("a", "y", 1): 6,
("a", "y", 2): 6,
("b", "x", 1): 8,
("b", "x", 2): 8,
("b", "y", 1): 10,
("b", "y", 2): 10,
}
disc_dist = DiscreteDistribution(samples)
disc_dist2 = disc_dist.marginal("X1")
assert disc_dist2.total == disc_dist.total
assert all(
compare(disc_dist2.keys_as_list(), [("x", 1), ("x", 2), ("y", 1), ("y", 2)])
)
assert disc_dist2[("x", 1)] == 12
assert disc_dist2[("x", 2)] == 12
assert disc_dist2[("y", 1)] == 16
assert disc_dist2[("y", 2)] == 16
assert disc_dist2.probability(("x", 1)) == 12 / 56
assert disc_dist2.probability(("x", 2)) == 12 / 56
assert disc_dist2.probability(("y", 1)) == 16 / 56
assert disc_dist2.probability(("y", 2)) == 16 / 56
disc_dist2 = disc_dist.marginal("X2")
assert disc_dist2.total == disc_dist.total
assert all(
compare(disc_dist2.keys_as_list(), [("a", 1), ("a", 2), ("b", 1), ("b", 2)])
)
assert disc_dist2[("a", 1)] == 10
assert disc_dist2[("a", 2)] == 10
assert disc_dist2[("b", 1)] == 18
assert disc_dist2[("b", 2)] == 18
assert disc_dist2.probability(("a", 1)) == 10 / 56
assert disc_dist2.probability(("a", 2)) == 10 / 56
assert disc_dist2.probability(("b", 1)) == 18 / 56
assert disc_dist2.probability(("b", 2)) == 18 / 56
disc_dist2 = disc_dist.marginal("X3")
assert disc_dist2.total == disc_dist.total
assert all(
compare(
disc_dist2.keys_as_list(), [("a", "x"), ("a", "y"), ("b", "x"), ("b", "y")]
)
)
assert disc_dist2[("a", "x")] == 8
assert disc_dist2[("a", "y")] == 12
assert disc_dist2[("b", "x")] == 16
assert disc_dist2[("b", "y")] == 20
assert disc_dist2.probability(("a", "x")) == 8 / 56
assert disc_dist2.probability(("a", "y")) == 12 / 56
assert disc_dist2.probability(("b", "x")) == 16 / 56
assert disc_dist2.probability(("b", "y")) == 20 / 56
disc_dist2 = disc_dist.marginal("X1", "X2")
assert disc_dist2.total == disc_dist.total
assert all(compare(disc_dist2.keys_as_list(), [1, 2]))
assert disc_dist2[1] == 28
assert disc_dist2[2] == 28
assert disc_dist2.probability(1) == 28 / 56
assert disc_dist2.probability(2) == 28 / 56
disc_dist2 = disc_dist.marginal("X1", "X3")
assert disc_dist2.total == disc_dist.total
assert all(compare(disc_dist2.keys_as_list(), ["x", "y"]))
assert disc_dist2["x"] == 24
assert disc_dist2["y"] == 32
assert disc_dist2.probability("x") == 24 / 56
assert disc_dist2.probability("y") == 32 / 56
disc_dist2 = disc_dist.marginal("X2", "X3")
assert disc_dist2.total == disc_dist.total
assert all(compare(disc_dist2.keys_as_list(), ["a", "b"]))
assert disc_dist2["a"] == 20
assert disc_dist2["b"] == 36
assert disc_dist2.probability("a") == 20 / 56
assert disc_dist2.probability("b") == 36 / 56
# Four levels dist.
samples = {
("a", "x", 1, 33): 1,
("a", "x", 2, 33): 2,
("a", "x", 1, 44): 3,
("a", "x", 2, 44): 4,
("a", "y", 1, 33): 5,
("a", "y", 2, 33): 6,
("a", "y", 1, 44): 7,
("a", "y", 2, 44): 8,
("b", "x", 1, 33): 9,
("b", "x", 2, 33): 10,
("b", "x", 1, 44): 11,
("b", "x", 2, 44): 12,
("b", "y", 1, 33): 13,
("b", "y", 2, 33): 14,
("b", "y", 1, 44): 15,
("b", "y", 2, 44): 16,
}
disc_dist = DiscreteDistribution(samples)
disc_dist2 = disc_dist.marginal("X3")
assert disc_dist2.total == disc_dist.total
assert all(
compare(
disc_dist2.keys_as_list(),
[
("a", "x", 33),
("a", "x", 44),
("a", "y", 33),
("a", "y", 44),
("b", "x", 33),
("b", "x", 44),
("b", "y", 33),
("b", "y", 44),
],
)
)
assert disc_dist2[("a", "x", 33)] == 3
assert disc_dist2[("a", "x", 44)] == 7
assert disc_dist2[("a", "y", 33)] == 11
assert disc_dist2[("a", "y", 44)] == 15
assert disc_dist2[("b", "x", 33)] == 19
assert disc_dist2[("b", "x", 44)] == 23
assert disc_dist2[("b", "y", 33)] == 27
assert disc_dist2[("b", "y", 44)] == 31
assert disc_dist2.probability(("a", "x", 33)) == 3 / 136
assert disc_dist2.probability(("a", "x", 44)) == 7 / 136
assert disc_dist2.probability(("a", "y", 33)) == 11 / 136
assert disc_dist2.probability(("a", "y", 44)) == 15 / 136
assert disc_dist2.probability(("b", "x", 33)) == 19 / 136
assert disc_dist2.probability(("b", "x", 44)) == 23 / 136
assert disc_dist2.probability(("b", "y", 33)) == 27 / 136
assert disc_dist2.probability(("b", "y", 44)) == 31 / 136
disc_dist2 = disc_dist.marginal("X4")
assert disc_dist2.total == disc_dist.total
assert all(
compare(
disc_dist2.keys_as_list(),
[
("a", "x", 1),
("a", "x", 2),
("a", "y", 1),
("a", "y", 2),
("b", "x", 1),
("b", "x", 2),
("b", "y", 1),
("b", "y", 2),
],
)
)
assert disc_dist2[("a", "x", 1)] == 4
assert disc_dist2[("a", "x", 2)] == 6
assert disc_dist2[("a", "y", 1)] == 12
assert disc_dist2[("a", "y", 2)] == 14
assert disc_dist2[("b", "x", 1)] == 20
assert disc_dist2[("b", "x", 2)] == 22
assert disc_dist2[("b", "y", 1)] == 28
assert disc_dist2[("b", "y", 2)] == 30
assert disc_dist2.probability(("a", "x", 1)) == 4 / 136
assert disc_dist2.probability(("a", "x", 2)) == 6 / 136
assert disc_dist2.probability(("a", "y", 1)) == 12 / 136
assert disc_dist2.probability(("a", "y", 2)) == 14 / 136
assert disc_dist2.probability(("b", "x", 1)) == 20 / 136
assert disc_dist2.probability(("b", "x", 2)) == 22 / 136
assert disc_dist2.probability(("b", "y", 1)) == 28 / 136
assert disc_dist2.probability(("b", "y", 2)) == 30 / 136
disc_dist2 = disc_dist.marginal("X1", "X4")
assert disc_dist2.total == disc_dist.total
assert all(
compare(disc_dist2.keys_as_list(), [("x", 1), ("x", 2), ("y", 1), ("y", 2)])
)
assert disc_dist2[("x", 1)] == 24
assert disc_dist2[("x", 2)] == 28
assert disc_dist2[("y", 1)] == 40
assert disc_dist2[("y", 2)] == 44
assert disc_dist2.probability(("x", 1)) == 24 / 136
assert disc_dist2.probability(("x", 2)) == 28 / 136
assert disc_dist2.probability(("y", 1)) == 40 / 136
assert disc_dist2.probability(("y", 2)) == 44 / 136
disc_dist2 = disc_dist.marginal("X1", "X2", "X4")
assert disc_dist2.total == disc_dist.total
assert all(compare(disc_dist2.keys_as_list(), [1, 2]))
assert disc_dist2[1] == 64
assert disc_dist2[2] == 72
assert disc_dist2.probability(1) == 64 / 136
assert disc_dist2.probability(2) == 72 / 136
# marginalize two times
disc_dist2 = disc_dist.marginal("X1", "X4")
disc_dist3 = disc_dist2.marginal("X2")
assert disc_dist3.total == disc_dist.total
assert all(compare(disc_dist3.keys_as_list(), [1, 2]))
assert disc_dist3[1] == 64
assert disc_dist3[2] == 72
assert disc_dist3.probability(1) == 64 / 136
assert disc_dist3.probability(2) == 72 / 136
# marginalize three times
disc_dist2 = disc_dist.marginal("X4")
disc_dist3 = disc_dist2.marginal("X3")
disc_dist4 = disc_dist3.marginal("X2")
assert disc_dist4.total == disc_dist.total
assert all(compare(disc_dist4.keys_as_list(), ["a", "b"]))
assert disc_dist4["a"] == 36
assert disc_dist4["b"] == 100
assert disc_dist4.probability("a") == 36 / 136
assert disc_dist4.probability("b") == 100 / 136
def test_marginal_by_name_discrete_distribution():
# Four levels dist.
samples = {
("a", "x", 1, 33): 1,
("a", "x", 2, 33): 2,
("a", "x", 1, 44): 3,
("a", "x", 2, 44): 4,
("a", "y", 1, 33): 5,
("a", "y", 2, 33): 6,
("a", "y", 1, 44): 7,
("a", "y", 2, 44): 8,
("b", "x", 1, 33): 9,
("b", "x", 2, 33): 10,
("b", "x", 1, 44): 11,
("b", "x", 2, 44): 12,
("b", "y", 1, 33): 13,
("b", "y", 2, 33): 14,
("b", "y", 1, 44): 15,
("b", "y", 2, 44): 16,
}
disc_dist = DiscreteDistribution(samples, names=["Age", "Sex", "Edu", "Etn"])
disc_dist2 = disc_dist.marginal("Edu")
assert disc_dist2.total == disc_dist.total
assert all(
compare(
disc_dist2.keys_as_list(),
[
("a", "x", 33),
("a", "x", 44),
("a", "y", 33),
("a", "y", 44),
("b", "x", 33),
("b", "x", 44),
("b", "y", 33),
("b", "y", 44),
],
)
)
assert disc_dist2[("a", "x", 33)] == 3
assert disc_dist2[("a", "x", 44)] == 7
assert disc_dist2[("a", "y", 33)] == 11
assert disc_dist2[("a", "y", 44)] == 15
assert disc_dist2[("b", "x", 33)] == 19
assert disc_dist2[("b", "x", 44)] == 23
assert disc_dist2[("b", "y", 33)] == 27
assert disc_dist2[("b", "y", 44)] == 31
assert disc_dist2.probability(("a", "x", 33)) == 3 / 136
assert disc_dist2.probability(("a", "x", 44)) == 7 / 136
assert disc_dist2.probability(("a", "y", 33)) == 11 / 136
assert disc_dist2.probability(("a", "y", 44)) == 15 / 136
assert disc_dist2.probability(("b", "x", 33)) == 19 / 136
assert disc_dist2.probability(("b", "x", 44)) == 23 / 136
assert disc_dist2.probability(("b", "y", 33)) == 27 / 136
assert disc_dist2.probability(("b", "y", 44)) == 31 / 136
disc_dist2 = disc_dist.marginal("Etn")
assert disc_dist2.total == disc_dist.total
assert all(
compare(
disc_dist2.keys_as_list(),
[
("a", "x", 1),
("a", "x", 2),
("a", "y", 1),
("a", "y", 2),
("b", "x", 1),
("b", "x", 2),
("b", "y", 1),
("b", "y", 2),
],
)
)
assert disc_dist2[("a", "x", 1)] == 4
assert disc_dist2[("a", "x", 2)] == 6
assert disc_dist2[("a", "y", 1)] == 12
assert disc_dist2[("a", "y", 2)] == 14
assert disc_dist2[("b", "x", 1)] == 20
assert disc_dist2[("b", "x", 2)] == 22
assert disc_dist2[("b", "y", 1)] == 28
assert disc_dist2[("b", "y", 2)] == 30
assert disc_dist2.probability(("a", "x", 1)) == 4 / 136
assert disc_dist2.probability(("a", "x", 2)) == 6 / 136
assert disc_dist2.probability(("a", "y", 1)) == 12 / 136
assert disc_dist2.probability(("a", "y", 2)) == 14 / 136
assert disc_dist2.probability(("b", "x", 1)) == 20 / 136
assert disc_dist2.probability(("b", "x", 2)) == 22 / 136
assert disc_dist2.probability(("b", "y", 1)) == 28 / 136
assert disc_dist2.probability(("b", "y", 2)) == 30 / 136
disc_dist2 = disc_dist.marginal("Age", "Etn")
assert disc_dist2.total == disc_dist.total
assert all(
compare(disc_dist2.keys_as_list(), [("x", 1), ("x", 2), ("y", 1), ("y", 2)])
)
assert disc_dist2[("x", 1)] == 24
assert disc_dist2[("x", 2)] == 28
assert disc_dist2[("y", 1)] == 40
assert disc_dist2[("y", 2)] == 44
assert disc_dist2.probability(("x", 1)) == 24 / 136
assert disc_dist2.probability(("x", 2)) == 28 / 136
assert disc_dist2.probability(("y", 1)) == 40 / 136
assert disc_dist2.probability(("y", 2)) == 44 / 136
disc_dist2 = disc_dist.marginal("Age", "Sex", "Etn")
assert disc_dist2.total == disc_dist.total
assert all(compare(disc_dist2.keys_as_list(), [1, 2]))
assert disc_dist2[1] == 64
assert disc_dist2[2] == 72
assert disc_dist2.probability(1) == 64 / 136
assert disc_dist2.probability(2) == 72 / 136
# marginalize two times
disc_dist2 = disc_dist.marginal("Age", "Etn")
disc_dist3 = disc_dist2.marginal("Sex")
assert disc_dist3.total == disc_dist.total
assert all(compare(disc_dist3.keys_as_list(), [1, 2]))
assert disc_dist3[1] == 64
assert disc_dist3[2] == 72
assert disc_dist3.probability(1) == 64 / 136
assert disc_dist3.probability(2) == 72 / 136
# marginalize three times
disc_dist2 = disc_dist.marginal("Etn")
disc_dist3 = disc_dist2.marginal("Edu")
disc_dist4 = disc_dist3.marginal("Sex")
assert disc_dist4.total == disc_dist.total
assert all(compare(disc_dist4.keys_as_list(), ["a", "b"]))
assert disc_dist4["a"] == 36
assert disc_dist4["b"] == 100
assert disc_dist4.probability("a") == 36 / 136
assert disc_dist4.probability("b") == 100 / 136
def test_marginals_operator_discrete_distribution():
# Four levels dist.
samples = {
("a", "x", 1, 33): 1,
("a", "x", 2, 33): 2,
("a", "x", 1, 44): 3,
("a", "x", 2, 44): 4,
("a", "y", 1, 33): 5,
("a", "y", 2, 33): 6,
("a", "y", 1, 44): 7,
("a", "y", 2, 44): 8,
("b", "x", 1, 33): 9,
("b", "x", 2, 33): 10,
("b", "x", 1, 44): 11,
("b", "x", 2, 44): 12,
("b", "y", 1, 33): 13,
("b", "y", 2, 33): 14,
("b", "y", 1, 44): 15,
("b", "y", 2, 44): 16,
}
disc_dist = DiscreteDistribution(samples)
assert (disc_dist << "X2").total == disc_dist.total
assert (disc_dist << ("X2", "X3")).total == disc_dist.total
assert (disc_dist << ("X2", "X3", "X4")).total == disc_dist.total
assert all(compare((disc_dist << ("X1", "X2", "X4")).keys_as_list(), [1, 2]))
assert all(compare((disc_dist << ("X1", "X2", "X3")).keys_as_list(), [33, 44]))
assert all(compare((disc_dist << ("X2", "X3", "X4")).keys_as_list(), ["a", "b"]))
assert all(
compare(
(disc_dist << ("X2", "X3")).keys_as_list(),
[("a", 33), ("a", 44), ("b", 33), ("b", 44)],
)
)
disc_dist = DiscreteDistribution(samples, names=["Age", "Sex", "Education", "City"])
assert (disc_dist << ("Age")).total == disc_dist.total
assert (disc_dist << ("Sex", "Education")).total == disc_dist.total
assert (disc_dist << ("Sex", "Education", "City")).total == disc_dist.total
assert all(compare((disc_dist << ("Age", "Sex", "City")).keys_as_list(), [1, 2]))
assert all(
compare((disc_dist << ("Age", "Sex", "Education")).keys_as_list(), [33, 44])
)
assert all(
compare((disc_dist << ("Sex", "Education", "City")).keys_as_list(), ["a", "b"])
)
assert all(
compare(
(disc_dist << ("Sex", "Education")).keys_as_list(),
[("a", 33), ("a", 44), ("b", 33), ("b", 44)],
)
)
| 36.954145
| 88
| 0.528325
|
37b783f71694aab22c6f84fd108982fc8d7cc22a
| 57,744
|
py
|
Python
|
test/integration/component/test_ip_reservation.py
|
ke4qqq/cloudstack
|
7e0f1cf4571f9c3f6c26b8d744d2ed102fa7a511
|
[
"Apache-2.0"
] | 1
|
2015-02-06T04:17:11.000Z
|
2015-02-06T04:17:11.000Z
|
test/integration/component/test_ip_reservation.py
|
ke4qqq/cloudstack
|
7e0f1cf4571f9c3f6c26b8d744d2ed102fa7a511
|
[
"Apache-2.0"
] | 6
|
2020-11-16T20:44:23.000Z
|
2022-02-01T01:06:16.000Z
|
test/integration/component/test_ip_reservation.py
|
ke4qqq/cloudstack
|
7e0f1cf4571f9c3f6c26b8d744d2ed102fa7a511
|
[
"Apache-2.0"
] | 15
|
2017-01-12T11:17:48.000Z
|
2019-04-19T10:09:31.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for IP reservation feature
Test Plan: https://cwiki.apache.org/confluence/display/CLOUDSTACK/IP+Range+Reservation+within+a+Network+Test+Cases
Issue Link: https://issues.apache.org/jira/browse/CLOUDSTACK-2266
Feature Specifications: https://cwiki.apache.org/confluence/display/CLOUDSTACK/FS+-+IP+Range+Reservation+within+a+Network
"""
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.lib.utils import validateList, cleanup_resources, verifyRouterState
from marvin.lib.base import (Account,
Network,
VirtualMachine,
Router,
ServiceOffering,
NetworkOffering)
from marvin.lib.common import (get_zone,
get_template,
get_domain,
wait_for_cleanup,
createEnabledNetworkOffering,
createNetworkRulesForVM,
verifyNetworkState)
from marvin.codes import (PASS, FAIL, FAILED, UNKNOWN, FAULT, MASTER,
NAT_RULE, STATIC_NAT_RULE)
import netaddr
import random
from nose.plugins.attrib import attr
from ddt import ddt, data
def createIsolatedNetwork(self, network_offering_id, gateway=None):
"""Create isolated network with given network offering and gateway if provided
and return"""
try:
isolated_network = Network.create(self.apiclient, self.testData["isolated_network"],
networkofferingid=network_offering_id,accountid=self.account.name,
domainid=self.domain.id,zoneid=self.zone.id,
gateway=gateway, netmask='255.255.255.0' if gateway else None)
except Exception as e:
return [FAIL, e]
return [PASS, isolated_network]
def matchNetworkGuestVmCIDR(self, networkid, guestvmcidr):
"""List networks with given network id and check if the guestvmcidr matches
with the given cidr"""
networks = Network.list(self.apiclient, id=networkid, listall=True)
self.assertEqual(validateList(networks)[0], PASS, "network list validation failed")
self.assertEqual(str(networks[0].cidr), guestvmcidr, "guestvmcidr of network %s \
does not match with the given value %s" % (networks[0].cidr, guestvmcidr))
return
def createVirtualMachine(self, network_id=None, ip_address=None):
"""Create and return virtual machine within network and ipaddress"""
virtual_machine = VirtualMachine.create(self.apiclient,
self.testData["virtual_machine"],
networkids=network_id,
serviceofferingid=self.service_offering.id,
accountid=self.account.name,
domainid=self.domain.id,
ipaddress=ip_address)
return virtual_machine
def CreateEnabledNetworkOffering(apiclient, networkServices):
"""Create network offering of given test data and enable it"""
result = createEnabledNetworkOffering(apiclient, networkServices)
assert result[0] == PASS, "Network offering creation/enabling failed due to %s" % result[2]
return result[1]
@ddt
class TestIpReservation(cloudstackTestCase):
"""Test IP Range Reservation with a Network
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestIpReservation, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
# Fill services from the external config file
cls.testData = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testData["ostype"]
)
if cls.template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.testData["ostype"]
cls.testData["domainid"] = cls.domain.id
cls.testData["zoneid"] = cls.zone.id
cls.testData["virtual_machine"]["zoneid"] = cls.zone.id
cls.testData["virtual_machine"]["template"] = cls.template.id
cls._cleanup = []
try:
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testData["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.isolated_network_offering = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
cls.isolated_persistent_network_offering = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["nw_off_isolated_persistent"])
cls._cleanup.append(cls.isolated_persistent_network_offering)
cls.isolated_network_offering_RVR = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["nw_off_isolated_RVR"])
cls._cleanup.append(cls.isolated_network_offering_RVR)
except Exception as e:
cls.tearDownClass()
raise unittest.SkipTest("Failure in setUpClass: %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
try:
self.account = Account.create(self.apiclient, self.testData["account"],
domainid=self.domain.id)
self.cleanup.append(self.account)
except Exception as e:
self.skipTest("Failed to create account: %s" % e)
return
def tearDown(self):
try:
# Clean up, terminate the resources created
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced"])
def test_vm_create_after_reservation(self):
""" Test creating VM in network after IP reservation
# steps
# 1. Create vm in isolated network (LB through VR or Netscaler) with ip in guestvmcidr
# 2. Update guestvmcidr
# 3. Create another VM
# validation
# 1. Guest vm cidr should be successfully updated with correct value
# 2. Existing guest vm ip should not be changed after reservation
# 3. Newly created VM should get ip in guestvmcidr"""
networkOffering = self.isolated_network_offering
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet +".1"
resultSet = createIsolatedNetwork(self, networkOffering.id, gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network = resultSet[1]
guest_vm_cidr = subnet +".0/29"
try:
virtual_machine_1 = createVirtualMachine(self, network_id=isolated_network.id,
ip_address = subnet+".3")
except Exception as e:
self.fail("VM creation failed: %s" % e)
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
matchNetworkGuestVmCIDR(self, isolated_network.id, guest_vm_cidr)
vms = VirtualMachine.list(self.apiclient,
id=virtual_machine_1.id)
self.assertEqual(validateList(vms)[0], PASS, "vm list validation failed")
self.assertEqual(vms[0].nic[0].ipaddress,
virtual_machine_1.ipaddress,
"VM IP should not change after reservation")
try:
virtual_machine_2 = createVirtualMachine(self, network_id=isolated_network.id)
if netaddr.IPAddress(virtual_machine_2.ipaddress) not in netaddr.IPNetwork(guest_vm_cidr):
self.fail("Newly created VM doesn't get IP from reserverd CIDR")
except Exception as e:
self.fail("VM creation failed, cannot validate the condition: %s" % e)
return
@attr(tags=["advanced"])
def test_vm_create_outside_cidr_after_reservation(self):
""" Test create VM outside the range of reserved IPs
# steps
# 1. update guestvmcidr of persistent isolated network (LB through VR or
# Netscaler
# 2. create another VM with ip outside guestvmcidr
"""
# validation
# 1. guest vm cidr should be successfully updated with correct value
# 2 newly created VM should not be created and result in exception
networkOffering = self.isolated_persistent_network_offering
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet +".1"
resultSet = createIsolatedNetwork(self, networkOffering.id, gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network = resultSet[1]
guest_vm_cidr = subnet+".0/29"
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
matchNetworkGuestVmCIDR(self, isolated_network.id, guest_vm_cidr)
try:
createVirtualMachine(self, network_id=self.isolated_network.id,
ip_address=subnet+".9")
self.fail("vm should not be created ")
except Exception as e:
self.debug("exception as IP is outside of guestvmcidr %s" % e)
return
@attr(tags=["advanced"])
def test_update_cidr_multiple_vms_not_all_inclusive(self):
""" Test reserve IP range such that one of the VM is not included
# steps
# 1. Create two vms in isolated network
# 2. Update guestvmcidr of network such that only one of the ipaddress of vms
# is in the given range
# validation
# 1. Network updation with this new range should fail"""
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet +".1"
resultSet = createIsolatedNetwork(self, self.isolated_network_offering.id,
gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network = resultSet[1]
guest_vm_cidr = subnet+".0/29"
try:
createVirtualMachine(self, network_id=isolated_network.id,
ip_address=subnet+".3")
except Exception as e:
self.fail("VM creation failed: %s" % e)
try:
createVirtualMachine(self, network_id=isolated_network.id,
ip_address=subnet+".9")
except Exception as e:
self.fail("VM creation failed: %s" % e)
with self.assertRaises(Exception):
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
return
@attr(tags=["advanced"])
def test_update_cidr_single_vm_not_inclusive(self):
""" Test reserving IP range in network such that existing VM is outside the range
# steps
# 1. Create vm in isolated network
# 2. Update guestvmcidr of network such that ip address of vm
# is outside the given range
#
# validation
# 1. Network updation with this new range should fail"""
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet +".1"
resultSet = createIsolatedNetwork(self, self.isolated_network_offering.id,
gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network = resultSet[1]
guest_vm_cidr = subnet+".0/29"
try:
createVirtualMachine(self, network_id=isolated_network.id,
ip_address=subnet+".9")
except Exception as e:
self.fail("VM creation failed: %s" % e)
with self.assertRaises(Exception):
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
return
@data(NAT_RULE, STATIC_NAT_RULE)
@attr(tags=["advanced"])
def test_nat_rules(self, value):
""" Test NAT rules working with IP reservation
# steps
# 1. Create vm in persistent isolated network with ip in guestvmcidr
# 2. Create NAT/static NAT rule for this VM
# 3. Update guestvmcidr
# 4. Create another VM and create network rules for this vm too
#
# validation
# 1. Guest vm cidr should be successfully updated with correct value
# 2. Existing guest vm ip should not be changed after reservation
# 3. Newly created VM should get ip in guestvmcidr
# 4. The network rules should be working"""
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet +".1"
resultSet = createIsolatedNetwork(self, self.isolated_network_offering.id,
gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network = resultSet[1]
guest_vm_cidr = subnet+".0/29"
try:
virtual_machine_1 = createVirtualMachine(self, network_id=isolated_network.id,
ip_address=subnet+".3")
except Exception as e:
self.fail("VM creation failed: %s" % e)
result = createNetworkRulesForVM(self.apiclient, virtual_machine_1,value,
self.account, self.testData)
if result[0] == FAIL:
self.fail("Failed to create network rules for VM: %s" % result[1])
else:
ipaddress_1 = result[1]
virtual_machine_1.get_ssh_client(ipaddress=ipaddress_1.ipaddress.ipaddress)
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
matchNetworkGuestVmCIDR(self, isolated_network.id, guest_vm_cidr)
vms = VirtualMachine.list(self.apiclient, id=virtual_machine_1.id)
self.assertEqual(validateList(vms)[0], PASS, "vm list validation failed")
self.assertEqual(vms[0].nic[0].ipaddress,
virtual_machine_1.ipaddress,
"VM IP should not change after reservation")
try:
virtual_machine_2 = createVirtualMachine(self, network_id=isolated_network.id)
if netaddr.IPAddress(virtual_machine_2.ipaddress) not in netaddr.IPNetwork(guest_vm_cidr):
self.fail("Newly created VM doesn't get IP from reserverd CIDR")
except Exception as e:
self.fail("VM creation failed, cannot validate the condition: %s" % e)
result = createNetworkRulesForVM(self.apiclient, virtual_machine_2, value,
self.account, self.testData)
if result[0] == FAIL:
self.fail("Failed to create network rules for VM: %s" % result[1])
else:
ipaddress_2 = result[1]
virtual_machine_2.get_ssh_client(ipaddress=ipaddress_2.ipaddress.ipaddress)
return
@unittest.skip("Skip - WIP")
@attr(tags=["advanced"])
def test_RVR_network(self):
""" Test IP reservation in network with RVR
# steps
# 1. create vm in isolated network with RVR and ip in guestvmcidr
# 2. update guestvmcidr
# 3. List routers and stop the master router, wait till backup router comes up
# 4. create another VM
#
# validation
# 1. Guest vm cidr should be successfully updated with correct value
# 2. Existing guest vm ip should not be changed after reservation
# 3. Newly created VM should get ip in guestvmcidr
# 4. Verify that the network has two routers associated with it
# 5. Backup router should come up when master router is stopped"""
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet +".1"
resultSet = createIsolatedNetwork(self, self.isolated_network_offering_RVR.id,
gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network_RVR= resultSet[1]
guest_vm_cidr = subnet+".0/29"
try:
virtual_machine_1 = createVirtualMachine(self, network_id=isolated_network_RVR.id,
ip_address=subnet+".3")
except Exception as e:
self.fail("VM creation failed: %s" % e)
isolated_network_RVR.update(self.apiclient, guestvmcidr=guest_vm_cidr)
matchNetworkGuestVmCIDR(self, isolated_network_RVR.id, guest_vm_cidr)
vms = VirtualMachine.list(self.apiclient,
id=virtual_machine_1.id)
self.assertEqual(validateList(vms)[0], PASS, "vm list validation failed")
self.assertEqual(vms[0].nic[0].ipaddress,
virtual_machine_1.ipaddress,
"VM IP should not change after reservation")
self.debug("Listing routers for network: %s" % isolated_network_RVR.name)
routers = Router.list(self.apiclient, networkid=isolated_network_RVR.id, listall=True)
self.assertEqual(validateList(routers)[0], PASS, "Routers list validation failed")
self.assertEqual(len(routers), 2, "Length of the list router should be 2 (Backup & master)")
if routers[0].redundantstate == MASTER:
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Stopping router ID: %s" % master_router.id)
try:
Router.stop(self.apiclient, id=master_router.id)
except Exception as e:
self.fail("Failed to stop master router due to error %s" % e)
# wait for VR to update state
wait_for_cleanup(self.apiclient, ["router.check.interval"])
result = verifyRouterState(master_router.id, [UNKNOWN,FAULT])
if result[0] == FAIL:
self.fail(result[1])
result = verifyRouterState(backup_router.id, [MASTER])
if result[0] == FAIL:
self.fail(result[1])
try:
virtual_machine_2 = createVirtualMachine(self, network_id=isolated_network_RVR.id)
if netaddr.IPAddress(virtual_machine_2.ipaddress) not in netaddr.IPNetwork(guest_vm_cidr):
self.fail("Newly created VM doesn't get IP from reserverd CIDR")
except Exception as e:
self.fail("VM creation failed, cannot validate the condition: %s" % e)
return
@attr(tags=["advanced"])
def test_ip_reservation_in_multiple_networks_same_account(self):
""" Test IP reservation in multiple networks created in same account
# steps
# 1. Create two isolated networks with user defined cidr in same account
# Test below conditions for both the networks in the account
# 2. Create vm in persistent isolated network with ip in guestvmcidr
# 3. Update guestvmcidr
# 4. Create another VM
#
# validation
# 1. Guest vm cidr should be successfully updated with correct value
# 2. Existing guest vm ip should not be changed after reservation
# 3. Newly created VM should get ip in guestvmcidr"""
account_1 = Account.create(self.apiclient, self.testData["account"],
domainid=self.domain.id)
self.cleanup.append(account_1)
random_subnet = str(random.randrange(1,254))
gateway = "10.1." + random_subnet +".1"
isolated_network_1 = Network.create(self.apiclient, self.testData["isolated_network"],
networkofferingid=self.isolated_network_offering.id,accountid=account_1.name,
domainid=self.domain.id,zoneid=self.zone.id,
gateway=gateway, netmask='255.255.255.0')
guest_vm_cidr = "10.1."+random_subnet+".0/29"
try:
virtual_machine_1 = VirtualMachine.create(self.apiclient, self.testData["virtual_machine"],
networkids=isolated_network_1.id, serviceofferingid=self.service_offering.id,
accountid=account_1.name, domainid=self.domain.id,
ipaddress="10.1."+random_subnet+".3")
except Exception as e:
self.fail("VM creation failed: %s" % e)
isolated_network_1.update(self.apiclient, guestvmcidr=guest_vm_cidr)
matchNetworkGuestVmCIDR(self, isolated_network_1.id, guest_vm_cidr)
vms = VirtualMachine.list(self.apiclient,
id=virtual_machine_1.id)
self.assertEqual(validateList(vms)[0], PASS, "vm list validation failed")
self.assertEqual(vms[0].nic[0].ipaddress,
virtual_machine_1.ipaddress,
"VM IP should not change after reservation")
try:
virtual_machine_2 = VirtualMachine.create(self.apiclient, self.testData["virtual_machine"],
networkids=isolated_network_1.id, serviceofferingid=self.service_offering.id,
accountid=account_1.name, domainid=self.domain.id)
if netaddr.IPAddress(virtual_machine_2.ipaddress) not in netaddr.IPNetwork(guest_vm_cidr):
self.fail("Newly created VM doesn't get IP from reserverd CIDR")
except Exception as e:
self.fail("VM creation failed, cannot validate the condition: %s" % e)
random_subnet = str(random.randrange(1,254))
gateway = "10.1." + random_subnet +".1"
isolated_network_2 = Network.create(self.apiclient, self.testData["isolated_network"],
networkofferingid=self.isolated_network_offering.id,accountid=account_1.name,
domainid=self.domain.id,zoneid=self.zone.id,
gateway=gateway, netmask='255.255.255.0')
guest_vm_cidr = "10.1."+random_subnet+".0/29"
try:
virtual_machine_3 = VirtualMachine.create(self.apiclient, self.testData["virtual_machine"],
networkids=isolated_network_2.id, serviceofferingid=self.service_offering.id,
accountid=account_1.name, domainid=self.domain.id,
ipaddress="10.1."+random_subnet+".3")
except Exception as e:
self.fail("VM creation failed: %s" % e)
isolated_network_2.update(self.apiclient, guestvmcidr=guest_vm_cidr)
matchNetworkGuestVmCIDR(self, isolated_network_2.id, guest_vm_cidr)
vms = VirtualMachine.list(self.apiclient,
id=virtual_machine_3.id)
self.assertEqual(validateList(vms)[0], PASS, "vm list validation failed")
self.assertEqual(vms[0].nic[0].ipaddress,
virtual_machine_3.ipaddress,
"VM IP should not change after reservation")
try:
virtual_machine_4 = VirtualMachine.create(self.apiclient, self.testData["virtual_machine"],
networkids=isolated_network_2.id, serviceofferingid=self.service_offering.id,
accountid=account_1.name, domainid=self.domain.id)
if netaddr.IPAddress(virtual_machine_4.ipaddress) not in netaddr.IPNetwork(guest_vm_cidr):
self.fail("Newly created VM doesn't get IP from reserverd CIDR")
except Exception as e:
self.fail("VM creation failed, cannot validate the condition: %s" % e)
return
@ddt
class TestRestartNetwork(cloudstackTestCase):
"""Test Restart Network
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRestartNetwork, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
# Fill services from the external config file
cls.testData = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testData["ostype"]
)
if cls.template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.testData["ostype"]
cls.testData["domainid"] = cls.domain.id
cls.testData["zoneid"] = cls.zone.id
cls.testData["virtual_machine"]["zoneid"] = cls.zone.id
cls.testData["virtual_machine"]["template"] = cls.template.id
cls._cleanup = []
try:
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testData["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.isolated_network_offering = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
cls.isolated_persistent_network_offering = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["nw_off_isolated_persistent"])
cls._cleanup.append(cls.isolated_persistent_network_offering)
except Exception as e:
cls.tearDownClass()
raise unittest.SkipTest("Failure in setUpClass: %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
try:
self.account = Account.create(self.apiclient, self.testData["account"],
domainid=self.domain.id)
self.cleanup.append(self.account)
except Exception as e:
self.skipTest("Failed to create account: %s" % e)
return
def tearDown(self):
try:
# Clean up, terminate the resources created
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@data(True, False)
@attr(tags=["advanced"])
def test_restart_network_with_cleanup(self, value):
""" Test IP reservation rules with network restart operation
# steps
# 1. Create vm in isolated network with ip in guestvmcidr
# 2. Update guestvmcidr
# 3. Restart network with cleanup True/False
# 4. Deploy another VM in the network
#
# validation
# 1. Guest vm cidr should be successfully updated with correct value
# 2. Existing guest vm ip should not be changed after reservation
# 3. Network should be restarted successfully with and without cleanup
# 4. Newly created VM should get ip in guestvmcidr"""
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet +".1"
resultSet = createIsolatedNetwork(self, self.isolated_network_offering.id,
gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network= resultSet[1]
guest_vm_cidr = subnet+".0/29"
try:
virtual_machine_1 = createVirtualMachine(self, network_id=isolated_network.id,
ip_address=subnet+".3")
except Exception as e:
self.fail("VM creation failed: %s" % e)
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
matchNetworkGuestVmCIDR(self, isolated_network.id, guest_vm_cidr)
vms = VirtualMachine.list(self.apiclient,
id=virtual_machine_1.id)
self.assertEqual(validateList(vms)[0], PASS, "vm list validation failed")
self.assertEqual(vms[0].nic[0].ipaddress,
virtual_machine_1.ipaddress,
"VM IP should not change after reservation")
#Restart Network
isolated_network.restart(self.apiclient, cleanup=value)
try:
virtual_machine_2 = createVirtualMachine(self, network_id=isolated_network.id)
if netaddr.IPAddress(virtual_machine_2.ipaddress) not in netaddr.IPNetwork(guest_vm_cidr):
self.fail("Newly created VM doesn't get IP from reserverd CIDR")
except Exception as e:
self.fail("VM creation failed, cannot validate the condition: %s" % e)
return
@ddt
class TestUpdateIPReservation(cloudstackTestCase):
"""Test Updating IP reservation multiple times
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestUpdateIPReservation, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
# Fill services from the external config file
cls.testData = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testData["ostype"]
)
if cls.template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.testData["ostype"]
cls.testData["domainid"] = cls.domain.id
cls.testData["zoneid"] = cls.zone.id
cls.testData["virtual_machine"]["zoneid"] = cls.zone.id
cls.testData["virtual_machine"]["template"] = cls.template.id
cls._cleanup = []
try:
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testData["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.isolated_network_offering = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
cls.isolated_persistent_network_offering = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["nw_off_isolated_persistent"])
cls._cleanup.append(cls.isolated_persistent_network_offering)
except Exception as e:
cls.tearDownClass()
raise unittest.SkipTest("Failure in setUpClass: %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
try:
self.account = Account.create(self.apiclient, self.testData["account"],
domainid=self.domain.id)
self.cleanup.append(self.account)
except Exception as e:
self.skipTest("Failed to create account: %s" % e)
return
def tearDown(self):
try:
# Clean up, terminate the resources created
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@data("existingVmInclusive", "existingVmExclusive")
@attr(tags=["advanced"])
def test_update_network_guestvmcidr(self, value):
""" Test updating guest vm cidr of the network after
VMs are already deployed in previous guest VM cidr
# steps
# 1. Create isolated network with user defined cidr
# 2. Deploy VM in the network
# 3. Try to update the guestvmcidr of the network with VM ip in the guestvmcidr and
# deploy another VM
# 4. Try to update the guestvmcidr of the network with VM ip outside the guestvmcidr
#
# validation
# 1. When vm IP is in the guestvmcidr, updation should be successful and
# new VM should get IP from this range
# 2. When VM IP is outside the guestvmcidr, updation should be unsuccessful"""
random_subnet = str(random.randrange(1,254))
gateway = "10.1." + random_subnet +".1"
resultSet = createIsolatedNetwork(self, self.isolated_network_offering.id,
gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network= resultSet[1]
guest_vm_cidr = "10.1."+random_subnet+".0/29"
try:
virtual_machine_1 = createVirtualMachine(self, network_id=isolated_network.id,
ip_address=u"10.1."+random_subnet+".3")
except Exception as e:
self.fail("VM creation failed: %s" % e)
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
matchNetworkGuestVmCIDR(self, isolated_network.id, guest_vm_cidr)
vms = VirtualMachine.list(self.apiclient,
id=virtual_machine_1.id)
self.assertEqual(validateList(vms)[0], PASS, "vm list validation failed")
self.assertEqual(vms[0].nic[0].ipaddress,
virtual_machine_1.ipaddress,
"VM IP should not change after reservation")
try:
virtual_machine_2 = createVirtualMachine(self, network_id=isolated_network.id)
if netaddr.IPAddress(virtual_machine_2.ipaddress) not in netaddr.IPNetwork(guest_vm_cidr):
self.fail("Newly created VM doesn't get IP from reserverd CIDR")
except Exception as e:
self.fail("VM creation failed, cannot validate the condition: %s" % e)
# Update guest vm cidr of network again
if value == "existingVmExclusive":
guest_vm_cidr = "10.1."+random_subnet+".10/29"
try:
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
self.fail("Network updation should fail")
except Exception as e:
self.debug("Failed to update guest VM cidr of network: %s" % e)
elif value == "existingVmInclusive":
guest_vm_cidr = "10.1."+random_subnet+".0/28"
try:
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
except Exception as e:
self.fail("Failed to update guest VM cidr of network: %s" % e)
matchNetworkGuestVmCIDR(self, isolated_network.id, guest_vm_cidr)
try:
virtual_machine_3 = createVirtualMachine(self, network_id=isolated_network.id)
if netaddr.IPAddress(virtual_machine_3.ipaddress) not in netaddr.IPNetwork(guest_vm_cidr):
self.fail("Newly created VM doesn't get IP from reserverd CIDR")
except Exception as e:
self.fail("VM creation failed, cannot validate the condition: %s" % e)
return
@ddt
class TestRouterOperations(cloudstackTestCase):
"""Test Router operations of network with IP reservation
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRouterOperations, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
# Fill services from the external config file
cls.testData = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testData["ostype"]
)
if cls.template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.testData["ostype"]
cls.testData["domainid"] = cls.domain.id
cls.testData["zoneid"] = cls.zone.id
cls.testData["virtual_machine"]["zoneid"] = cls.zone.id
cls.testData["virtual_machine"]["template"] = cls.template.id
cls._cleanup = []
try:
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testData["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.isolated_network_offering = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
cls.isolated_persistent_network_offering = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["nw_off_isolated_persistent"])
cls._cleanup.append(cls.isolated_persistent_network_offering)
except Exception as e:
cls.tearDownClass()
raise unittest.SkipTest("Failure in setUpClass: %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
try:
self.account = Account.create(self.apiclient, self.testData["account"],
domainid=self.domain.id)
self.cleanup.append(self.account)
except Exception as e:
self.skipTest("Failed to create account: %s" % e)
return
def tearDown(self):
try:
# Clean up, terminate the resources created
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced"])
def test_reservation_after_router_restart(self):
""" Test IP reservation working before and after router is restarted
# steps
# 1. Update guestvmcidr of persistent isolated network
# 2. Reboot router
#
# validation
# 1. Guest vm cidr should be successfully updated with correct value
# 2. Network cidr should remain same after router restart"""
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet +".1"
resultSet = createIsolatedNetwork(self, self.isolated_persistent_network_offering.id,
gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network= resultSet[1]
guest_vm_cidr = subnet+".0/29"
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
matchNetworkGuestVmCIDR(self, isolated_network.id, guest_vm_cidr)
routers = Router.list(self.apiclient,
networkid=isolated_network.id,
listall=True)
self.assertEqual(validateList(routers)[0], PASS, "routers list validation failed")
if not routers:
self.fail("Router list should not be empty")
Router.reboot(self.apiclient, routers[0].id)
networks = Network.list(self.apiclient, id=isolated_network.id)
self.assertEqual(validateList(networks)[0], PASS, "networks list validation failed")
self.assertEqual(networks[0].cidr, guest_vm_cidr, "guestvmcidr should match after router reboot")
return
@attr(tags=["advanced"])
def test_destroy_recreate_router(self):
""" Test IP reservation working after destroying and recreating router
# steps
# 1. Create isolated network and deploy VM in it and update network with
# guestvmcidr
# 2. List the router associated with network and destroy the router
# 3. Restart the network
# 3. Deploy another VM in the network
#
# validation
# 1. Guest vm cidr should be successfully updated with correct value
# 2. existing guest vm ip should not be changed after reservation
# 3. Router should be destroyed and recreated when network is restarted
# 4. New VM should be deployed in the guestvmcidr"""
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet +".1"
resultSet = createIsolatedNetwork(self, self.isolated_network_offering.id,
gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network= resultSet[1]
guest_vm_cidr = subnet+".0/29"
try:
virtual_machine_1 = createVirtualMachine(self, network_id=isolated_network.id,
ip_address=subnet+".3")
except Exception as e:
self.fail("VM creation failed: %s" % e)
isolated_network.update(self.apiclient, guestvmcidr=guest_vm_cidr)
matchNetworkGuestVmCIDR(self, isolated_network.id, guest_vm_cidr)
vms = VirtualMachine.list(self.apiclient,
id=virtual_machine_1.id)
self.assertEqual(validateList(vms)[0], PASS, "vm list validation failed")
self.assertEqual(vms[0].nic[0].ipaddress,
virtual_machine_1.ipaddress,
"VM IP should not change after reservation")
# List router and destroy it
routers = Router.list(self.apiclient, networkid=isolated_network.id, listall=True)
self.assertEqual(validateList(routers)[0], PASS, "Routers list validation failed")
# Destroy Router
Router.destroy(self.apiclient, id=routers[0].id)
#Restart Network
isolated_network.restart(self.apiclient)
try:
virtual_machine_2 = createVirtualMachine(self, network_id=isolated_network.id)
if netaddr.IPAddress(virtual_machine_2.ipaddress) not in netaddr.IPNetwork(guest_vm_cidr):
self.fail("Newly created VM doesn't get IP from reserverd CIDR")
except Exception as e:
self.fail("VM creation failed, cannot validate the condition: %s" % e)
return
@ddt
class TestFailureScnarios(cloudstackTestCase):
"""Test failure scenarios related to IP reservation in network
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestFailureScnarios, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
# Fill services from the external config file
cls.testData = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testData["ostype"]
)
if cls.template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.testData["ostype"]
cls.testData["domainid"] = cls.domain.id
cls.testData["zoneid"] = cls.zone.id
cls.testData["virtual_machine"]["zoneid"] = cls.zone.id
cls.testData["virtual_machine"]["template"] = cls.template.id
cls._cleanup = []
try:
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testData["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.isolated_network_offering = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
cls.isolated_persistent_network_offering = CreateEnabledNetworkOffering(cls.api_client,
cls.testData["nw_off_isolated_persistent"])
cls._cleanup.append(cls.isolated_persistent_network_offering)
cls.testData["shared_network_offering"]["specifyVlan"] = "True"
cls.testData["shared_network_offering"]["specifyIpRanges"] = "True"
#Create Network Offering
cls.shared_network_offering = NetworkOffering.create(cls.api_client,
cls.testData["shared_network_offering"],
conservemode=False)
cls._cleanup.append(cls.shared_network_offering)
#Update network offering state from disabled to enabled.
NetworkOffering.update(cls.shared_network_offering,cls.api_client,state="enabled")
except Exception as e:
cls.tearDownClass()
raise unittest.SkipTest("Failure in setUpClass: %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
try:
self.account = Account.create(self.apiclient, self.testData["account"],
domainid=self.domain.id)
self.cleanup.append(self.account)
except Exception as e:
self.skipTest("Failed to create account: %s" % e)
return
def tearDown(self):
try:
# Clean up, terminate the resources created
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced"], required_hardware="false")
def test_network_not_implemented(self):
# steps
# 1. update guestvmcidr of isolated network (non persistent)
#
# validation
# should throw exception as network is not in implemented state as no vm is created
networkOffering = self.isolated_network_offering
resultSet = createIsolatedNetwork(self, networkOffering.id)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_network = resultSet[1]
with self.assertRaises(Exception):
isolated_network.update(self.apiclient, guestvmcidr="10.1.1.0/26")
return
@attr(tags=["advanced"], required_hardware="false")
def test_vm_create_after_reservation(self):
# steps
# 1. create vm in persistent isolated network with ip in guestvmcidr
# 2. update guestvmcidr
# 3. create another VM
#
# validation
# 1. guest vm cidr should be successfully updated with correct value
# 2. existing guest vm ip should not be changed after reservation
# 3. newly created VM should get ip in guestvmcidr
networkOffering = self.isolated_persistent_network_offering
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet + ".1"
isolated_persistent_network = None
resultSet = createIsolatedNetwork(self, networkOffering.id, gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_persistent_network = resultSet[1]
guest_vm_cidr = subnet +".0/29"
virtual_machine_1 = None
try:
virtual_machine_1 = VirtualMachine.create(self.apiclient,
self.testData["virtual_machine"],
networkids=isolated_persistent_network.id,
serviceofferingid=self.service_offering.id,
accountid=self.account.name,
domainid=self.domain.id,
ipaddress=subnet+".3"
)
except Exception as e:
self.fail("VM creation fails in network: %s" % e)
update_response = Network.update(isolated_persistent_network, self.apiclient, id=isolated_persistent_network.id, guestvmcidr=guest_vm_cidr)
self.assertEqual(guest_vm_cidr, update_response.cidr, "cidr in response is not as expected")
vm_list = VirtualMachine.list(self.apiclient,
id=virtual_machine_1.id)
self.assertEqual(isinstance(vm_list, list),
True,
"VM list response in not a valid list")
self.assertEqual(vm_list[0].nic[0].ipaddress,
virtual_machine_1.ipaddress,
"VM IP should not change after reservation")
try:
virtual_machine_2 = VirtualMachine.create(self.apiclient,
self.testData["virtual_machine"],
networkids=isolated_persistent_network.id,
serviceofferingid=self.service_offering.id,
accountid=self.account.name,
domainid=self.domain.id
)
if netaddr.IPAddress(virtual_machine_2.ipaddress) not in netaddr.IPNetwork(guest_vm_cidr):
self.fail("Newly created VM doesn't get IP from reserverd CIDR")
except Exception as e:
self.skipTest("VM creation fails, cannot validate the condition: %s" % e)
return
@attr(tags=["advanced"], required_hardware="false")
def test_reservation_after_router_restart(self):
# steps
# 1. update guestvmcidr of persistent isolated network
# 2. reboot router
#
# validation
# 1. guest vm cidr should be successfully updated with correct value
# 2. network cidr should remain same after router restart
networkOffering = self.isolated_persistent_network_offering
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet + ".1"
isolated_persistent_network = None
resultSet = createIsolatedNetwork(self, networkOffering.id, gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_persistent_network = resultSet[1]
response = verifyNetworkState(self.apiclient, isolated_persistent_network.id,\
"implemented")
exceptionOccured = response[0]
isNetworkInDesiredState = response[1]
exceptionMessage = response[2]
if (exceptionOccured or (not isNetworkInDesiredState)):
self.fail(exceptionMessage)
guest_vm_cidr = subnet +".0/29"
update_response = Network.update(isolated_persistent_network, self.apiclient, id=isolated_persistent_network.id, guestvmcidr=guest_vm_cidr)
self.assertEqual(guest_vm_cidr, update_response.cidr, "cidr in response is not as expected")
routers = Router.list(self.apiclient,
networkid=isolated_persistent_network.id,
listall=True)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return valid response"
)
if not routers:
self.skipTest("Router list should not be empty, skipping test")
Router.reboot(self.apiclient, routers[0].id)
networks = Network.list(self.apiclient, id=isolated_persistent_network.id)
self.assertEqual(
isinstance(networks, list),
True,
"list Networks should return valid response"
)
self.assertEqual(networks[0].cidr, guest_vm_cidr, "guestvmcidr should match after router reboot")
return
@attr(tags=["advanced"], required_hardware="false")
def test_vm_create_outside_cidr_after_reservation(self):
# steps
# 1. update guestvmcidr of persistent isolated network
# 2. create another VM with ip outside guestvmcidr
#
# validation
# 1. guest vm cidr should be successfully updated with correct value
# 2 newly created VM should not be created and result in exception
networkOffering = self.isolated_persistent_network_offering
subnet = "10.1."+str(random.randrange(1,254))
gateway = subnet + ".1"
isolated_persistent_network = None
resultSet = createIsolatedNetwork(self, networkOffering.id, gateway=gateway)
if resultSet[0] == FAIL:
self.fail("Failed to create isolated network")
else:
isolated_persistent_network = resultSet[1]
guest_vm_cidr = subnet +".0/29"
update_response = Network.update(isolated_persistent_network, self.apiclient, id=isolated_persistent_network.id, guestvmcidr=guest_vm_cidr)
self.assertEqual(guest_vm_cidr, update_response.cidr, "cidr in response is not as expected")
with self.assertRaises(Exception):
VirtualMachine.create(self.apiclient,
self.testData["virtual_machine"],
networkids=isolated_persistent_network.id,
serviceofferingid=self.service_offering.id,
accountid=self.account.name,
domainid=self.domain.id,
ipaddress="10.1.1.9"
)
return
| 47.061125
| 147
| 0.603457
|
3400817c44441247fba7a19b3e2cbe7f7e6feee7
| 2,887
|
py
|
Python
|
utils/link_tester.py
|
ArneBinder/transformers
|
ddaafd78fb9c98d4f7b5009fb1998deff4c3d6f1
|
[
"Apache-2.0"
] | 309
|
2020-02-07T23:09:27.000Z
|
2022-03-31T08:01:53.000Z
|
utils/link_tester.py
|
ArneBinder/transformers
|
ddaafd78fb9c98d4f7b5009fb1998deff4c3d6f1
|
[
"Apache-2.0"
] | 93
|
2020-02-22T05:56:28.000Z
|
2022-03-27T08:43:38.000Z
|
utils/link_tester.py
|
ArneBinder/transformers
|
ddaafd78fb9c98d4f7b5009fb1998deff4c3d6f1
|
[
"Apache-2.0"
] | 148
|
2020-02-14T22:16:11.000Z
|
2022-03-22T17:08:04.000Z
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Link tester.
This little utility reads all the python files in the repository,
scans for links pointing to S3 and tests the links one by one. Raises an error
at the end of the scan if at least one link was reported broken.
"""
import os
import re
import sys
import requests
REGEXP_FIND_S3_LINKS = r"""([\"'])(https:\/\/s3)(.*)?\1"""
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
def list_python_files_in_repository():
"""List all python files in the repository.
This function assumes that the script is executed in the root folder.
"""
source_code_files = []
for path, subdirs, files in os.walk("."):
if "templates" in path:
continue
for name in files:
if ".py" in name and ".pyc" not in name:
path_to_files = os.path.join(path, name)
source_code_files.append(path_to_files)
return source_code_files
def find_all_links(file_paths):
links = []
for path in file_paths:
links += scan_code_for_links(path)
return [link for link in links if link != S3_BUCKET_PREFIX]
def scan_code_for_links(source):
"""Scans the file to find links using a regular expression.
Returns a list of links.
"""
with open(source, "r") as content:
content = content.read()
raw_links = re.findall(REGEXP_FIND_S3_LINKS, content)
links = [prefix + suffix for _, prefix, suffix in raw_links]
return links
def check_all_links(links):
"""Check that the provided links are valid.
Links are considered valid if a HEAD request to the server
returns a 200 status code.
"""
broken_links = []
for link in links:
head = requests.head(link)
if head.status_code != 200:
broken_links.append(link)
return broken_links
if __name__ == "__main__":
file_paths = list_python_files_in_repository()
links = find_all_links(file_paths)
broken_links = check_all_links(links)
print("Looking for broken links to pre-trained models/configs/tokenizers...")
if broken_links:
print("The following links did not respond:")
for link in broken_links:
print("- {}".format(link))
sys.exit(1)
print("All links are ok.")
| 29.762887
| 81
| 0.682716
|
74c454427b95e0d6f3dcfd6bbcd9a083a9cdd537
| 2,228
|
py
|
Python
|
vendor/src/html5lib-python/html5lib/tests/test_encoding.py
|
lgp171188/fjord
|
1cb4aa3c3e0932c586cdd2c4ee3b6b9974a6976a
|
[
"BSD-3-Clause"
] | 16
|
2015-02-06T14:35:57.000Z
|
2021-07-10T11:14:00.000Z
|
vendor/src/html5lib-python/html5lib/tests/test_encoding.py
|
lgp171188/fjord
|
1cb4aa3c3e0932c586cdd2c4ee3b6b9974a6976a
|
[
"BSD-3-Clause"
] | 310
|
2015-01-07T14:39:35.000Z
|
2016-05-02T17:41:30.000Z
|
vendor/src/html5lib-python/html5lib/tests/test_encoding.py
|
lgp171188/fjord
|
1cb4aa3c3e0932c586cdd2c4ee3b6b9974a6976a
|
[
"BSD-3-Clause"
] | 22
|
2015-01-15T13:46:03.000Z
|
2020-07-24T10:08:51.000Z
|
from __future__ import absolute_import, division, unicode_literals
import os
import unittest
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, test_dir, errorMessage
from html5lib import HTMLParser, inputstream
class Html5EncodingTestCase(unittest.TestCase):
def test_codec_name_a(self):
self.assertEqual(inputstream.codecName('utf-8'), 'utf-8')
def test_codec_name_b(self):
self.assertEqual(inputstream.codecName('utf8'), 'utf-8')
def test_codec_name_c(self):
self.assertEqual(inputstream.codecName(' utf8 '), 'utf-8')
def test_codec_name_d(self):
self.assertEqual(inputstream.codecName('ISO_8859--1'), 'windows-1252')
def runParserEncodingTest(data, encoding):
p = HTMLParser()
assert p.documentEncoding is None
p.parse(data, useChardet=False)
encoding = encoding.lower().decode('ascii')
assert encoding == p.documentEncoding, errorMessage(data, encoding, p.documentEncoding)
def runPreScanEncodingTest(data, encoding):
stream = inputstream.HTMLBinaryInputStream(data, chardet=False)
encoding = encoding.lower().decode('ascii')
# Very crude way to ignore irrelevant tests
if len(data) > stream.numBytesMeta:
return
assert encoding == stream.charEncoding[0], errorMessage(data, encoding, stream.charEncoding[0])
def test_encoding():
for filename in get_data_files('encoding'):
tests = TestData(filename, b'data', encoding=None)
for idx, test in enumerate(tests):
yield (runParserEncodingTest, test[b'data'], test[b'encoding'])
yield (runPreScanEncodingTest, test[b'data'], test[b'encoding'])
try:
try:
import charade # flake8: noqa
except ImportError:
import chardet # flake8: noqa
except ImportError:
print('charade/chardet not found, skipping chardet tests')
else:
def test_chardet():
with open(os.path.join(test_dir, 'encoding' , 'chardet', 'test_big5.txt'), 'rb') as fp:
encoding = inputstream.HTMLInputStream(fp.read()).charEncoding
assert encoding[0].lower() == 'big5'
| 32.764706
| 99
| 0.70781
|
c6fdacb231fa9d7c5485d00d3022b374d423bb1a
| 1,620
|
py
|
Python
|
train_ppo.py
|
zhuyawen/sonic-on-ray
|
418d8917aff3265f50ca37d8aed0a244af5a5fa8
|
[
"Apache-2.0"
] | 47
|
2018-04-05T20:24:39.000Z
|
2021-10-30T06:53:46.000Z
|
train_ppo.py
|
zhuyawen/sonic-on-ray
|
418d8917aff3265f50ca37d8aed0a244af5a5fa8
|
[
"Apache-2.0"
] | 2
|
2018-10-30T20:38:05.000Z
|
2019-07-15T18:44:01.000Z
|
train_ppo.py
|
LaudateCorpus1/sonic-on-ray
|
418d8917aff3265f50ca37d8aed0a244af5a5fa8
|
[
"Apache-2.0"
] | 14
|
2018-04-19T20:05:05.000Z
|
2021-09-19T06:48:17.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonic_on_ray
import ray
from ray.rllib import ppo
from ray.tune.registry import register_env
env_name = 'sonic_env'
# Note that the hyperparameters have been tuned for sonic, which can be used
# run by replacing the below function with:
#
# register_env(env_name, lambda config: sonic_on_ray.make(
# game='SonicTheHedgehog-Genesis',
# state='GreenHillZone.Act1'))
#
# However, to try Sonic, you have to obtain the ROM yourself (see then
# instructions at https://github.com/openai/retro/blob/master/README.md).
register_env(env_name,
lambda config: sonic_on_ray.make(game='Airstriker-Genesis',
state='Level1'))
ray.init()
config = ppo.DEFAULT_CONFIG.copy()
config.update({
'timesteps_per_batch': 40000,
'min_steps_per_task': 100,
'num_workers': 32,
'gamma': 0.99,
'lambda': 0.95,
'clip_param': 0.1,
'num_sgd_iter': 30,
'sgd_batchsize': 4096,
'sgd_stepsize': 5e-5,
'use_gae': True,
'horizon': 4000,
'devices': ['/gpu:0', '/gpu:1', '/gpu:2', '/gpu:3', '/gpu:4', '/gpu:5',
'/gpu:6', 'gpu:7'],
'tf_session_args': {
'gpu_options': {'allow_growth': True}
}
})
alg = ppo.PPOAgent(config=config, env=env_name)
for i in range(1000):
result = alg.train()
print('result = {}'.format(result))
if i % 10 == 0:
checkpoint = alg.save()
print('checkpoint saved at', checkpoint)
| 28.421053
| 76
| 0.621605
|
7824199b84dc568c0155e61b9775180406dec779
| 28,388
|
py
|
Python
|
parsl/executors/high_throughput/process_worker_pool.py
|
tuhz/parsl
|
8a36d0ae8f6d935441a378c4cd4e85b308a36caa
|
[
"Apache-2.0"
] | null | null | null |
parsl/executors/high_throughput/process_worker_pool.py
|
tuhz/parsl
|
8a36d0ae8f6d935441a378c4cd4e85b308a36caa
|
[
"Apache-2.0"
] | null | null | null |
parsl/executors/high_throughput/process_worker_pool.py
|
tuhz/parsl
|
8a36d0ae8f6d935441a378c4cd4e85b308a36caa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import logging
import os
import sys
import platform
# import random
import threading
import pickle
import time
import datetime
import queue
import uuid
import zmq
import math
import json
import psutil
import multiprocessing
from parsl.version import VERSION as PARSL_VERSION
from parsl.app.errors import RemoteExceptionWrapper
from parsl.executors.high_throughput.errors import WorkerLost
from parsl.executors.high_throughput.probe import probe_addresses
if platform.system() == 'Darwin':
from parsl.executors.high_throughput.mac_safe_queue import MacSafeQueue as mpQueue
else:
from multiprocessing import Queue as mpQueue
from parsl.serialize import unpack_apply_message, serialize
RESULT_TAG = 10
TASK_REQUEST_TAG = 11
HEARTBEAT_CODE = (2 ** 32) - 1
class Manager(object):
""" Manager manages task execution by the workers
| 0mq | Manager | Worker Processes
| | |
| <-----Request N task-----+--Count task reqs | Request task<--+
Interchange | -------------------------+->Receive task batch| | |
| | Distribute tasks--+----> Get(block) & |
| | | Execute task |
| | | | |
| <------------------------+--Return results----+---- Post result |
| | | | |
| | | +----------+
| | IPC-Qeueues
"""
def __init__(self,
addresses="127.0.0.1",
address_probe_timeout=30,
task_port="50097",
result_port="50098",
cores_per_worker=1,
mem_per_worker=None,
max_workers=float('inf'),
prefetch_capacity=0,
uid=None,
block_id=None,
heartbeat_threshold=120,
heartbeat_period=30,
poll_period=10):
"""
Parameters
----------
addresses : str
comma separated list of addresses for the interchange
address_probe_timeout : int
Timeout in seconds for the address probe to detect viable addresses
to the interchange. Default : 30s
worker_url : str
Worker url on which workers will attempt to connect back
uid : str
string unique identifier
block_id : str
Block identifier that maps managers to the provider blocks they belong to.
cores_per_worker : float
cores to be assigned to each worker. Oversubscription is possible
by setting cores_per_worker < 1.0. Default=1
mem_per_worker : float
GB of memory required per worker. If this option is specified, the node manager
will check the available memory at startup and limit the number of workers such that
the there's sufficient memory for each worker. If set to None, memory on node is not
considered in the determination of workers to be launched on node by the manager.
Default: None
max_workers : int
caps the maximum number of workers that can be launched.
default: infinity
prefetch_capacity : int
Number of tasks that could be prefetched over available worker capacity.
When there are a few tasks (<100) or when tasks are long running, this option should
be set to 0 for better load balancing. Default is 0.
heartbeat_threshold : int
Seconds since the last message from the interchange after which the
interchange is assumed to be un-available, and the manager initiates shutdown. Default:120s
Number of seconds since the last message from the interchange after which the worker
assumes that the interchange is lost and the manager shuts down. Default:120
heartbeat_period : int
Number of seconds after which a heartbeat message is sent to the interchange
poll_period : int
Timeout period used by the manager in milliseconds. Default: 10ms
"""
logger.info("Manager started")
try:
ix_address = probe_addresses(addresses.split(','), task_port, timeout=address_probe_timeout)
if not ix_address:
raise Exception("No viable address found")
else:
logger.info("Connection to Interchange successful on {}".format(ix_address))
task_q_url = "tcp://{}:{}".format(ix_address, task_port)
result_q_url = "tcp://{}:{}".format(ix_address, result_port)
logger.info("Task url : {}".format(task_q_url))
logger.info("Result url : {}".format(result_q_url))
except Exception:
logger.exception("Caught exception while trying to determine viable address to interchange")
print("Failed to find a viable address to connect to interchange. Exiting")
exit(5)
self.context = zmq.Context()
self.task_incoming = self.context.socket(zmq.DEALER)
self.task_incoming.setsockopt(zmq.IDENTITY, uid.encode('utf-8'))
# Linger is set to 0, so that the manager can exit even when there might be
# messages in the pipe
self.task_incoming.setsockopt(zmq.LINGER, 0)
self.task_incoming.connect(task_q_url)
self.result_outgoing = self.context.socket(zmq.DEALER)
self.result_outgoing.setsockopt(zmq.IDENTITY, uid.encode('utf-8'))
self.result_outgoing.setsockopt(zmq.LINGER, 0)
self.result_outgoing.connect(result_q_url)
logger.info("Manager connected")
self.uid = uid
self.block_id = block_id
if os.environ.get('PARSL_CORES'):
cores_on_node = int(os.environ['PARSL_CORES'])
else:
cores_on_node = multiprocessing.cpu_count()
if os.environ.get('PARSL_MEMORY_GB'):
available_mem_on_node = float(os.environ['PARSL_MEMORY_GB'])
else:
available_mem_on_node = round(psutil.virtual_memory().available / (2**30), 1)
self.max_workers = max_workers
self.prefetch_capacity = prefetch_capacity
mem_slots = max_workers
# Avoid a divide by 0 error.
if mem_per_worker and mem_per_worker > 0:
mem_slots = math.floor(available_mem_on_node / mem_per_worker)
self.worker_count = min(max_workers,
mem_slots,
math.floor(cores_on_node / cores_per_worker))
logger.info("Manager will spawn {} workers".format(self.worker_count))
self.pending_task_queue = mpQueue()
self.pending_result_queue = mpQueue()
self.ready_worker_queue = mpQueue()
self.max_queue_size = self.prefetch_capacity + self.worker_count
self.tasks_per_round = 1
self.heartbeat_period = heartbeat_period
self.heartbeat_threshold = heartbeat_threshold
self.poll_period = poll_period
def create_reg_message(self):
""" Creates a registration message to identify the worker to the interchange
"""
msg = {'parsl_v': PARSL_VERSION,
'python_v': "{}.{}.{}".format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro),
'worker_count': self.worker_count,
'block_id': self.block_id,
'prefetch_capacity': self.prefetch_capacity,
'max_capacity': self.worker_count + self.prefetch_capacity,
'os': platform.system(),
'hostname': platform.node(),
'dir': os.getcwd(),
'cpu_count': psutil.cpu_count(logical=False),
'total_memory': psutil.virtual_memory().total,
'reg_time': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
b_msg = json.dumps(msg).encode('utf-8')
return b_msg
def heartbeat(self):
""" Send heartbeat to the incoming task queue
"""
heartbeat = (HEARTBEAT_CODE).to_bytes(4, "little")
r = self.task_incoming.send(heartbeat)
logger.debug("Return from heartbeat: {}".format(r))
def pull_tasks(self, kill_event):
""" Pull tasks from the incoming tasks 0mq pipe onto the internal
pending task queue
Parameters:
-----------
kill_event : threading.Event
Event to let the thread know when it is time to die.
"""
logger.info("[TASK PULL THREAD] starting")
poller = zmq.Poller()
poller.register(self.task_incoming, zmq.POLLIN)
# Send a registration message
msg = self.create_reg_message()
logger.debug("Sending registration message: {}".format(msg))
self.task_incoming.send(msg)
last_beat = time.time()
last_interchange_contact = time.time()
task_recv_counter = 0
poll_timer = self.poll_period
while not kill_event.is_set():
ready_worker_count = self.ready_worker_queue.qsize()
pending_task_count = self.pending_task_queue.qsize()
logger.debug("[TASK_PULL_THREAD] ready workers:{}, pending tasks:{}".format(ready_worker_count,
pending_task_count))
if time.time() > last_beat + self.heartbeat_period:
self.heartbeat()
last_beat = time.time()
if pending_task_count < self.max_queue_size and ready_worker_count > 0:
logger.debug("[TASK_PULL_THREAD] Requesting tasks: {}".format(ready_worker_count))
msg = ((ready_worker_count).to_bytes(4, "little"))
self.task_incoming.send(msg)
socks = dict(poller.poll(timeout=poll_timer))
if self.task_incoming in socks and socks[self.task_incoming] == zmq.POLLIN:
poll_timer = 0
_, pkl_msg = self.task_incoming.recv_multipart()
tasks = pickle.loads(pkl_msg)
last_interchange_contact = time.time()
if tasks == 'STOP':
logger.critical("[TASK_PULL_THREAD] Received stop request")
kill_event.set()
break
elif tasks == HEARTBEAT_CODE:
logger.debug("Got heartbeat from interchange")
else:
task_recv_counter += len(tasks)
logger.debug("[TASK_PULL_THREAD] Got tasks: {} of {}".format([t['task_id'] for t in tasks],
task_recv_counter))
for task in tasks:
self.pending_task_queue.put(task)
# logger.debug("[TASK_PULL_THREAD] Ready tasks: {}".format(
# [i['task_id'] for i in self.pending_task_queue]))
else:
logger.debug("[TASK_PULL_THREAD] No incoming tasks")
# Limit poll duration to heartbeat_period
# heartbeat_period is in s vs poll_timer in ms
if not poll_timer:
poll_timer = self.poll_period
poll_timer = min(self.heartbeat_period * 1000, poll_timer * 2)
# Only check if no messages were received.
if time.time() > last_interchange_contact + self.heartbeat_threshold:
logger.critical("[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold")
kill_event.set()
logger.critical("[TASK_PULL_THREAD] Exiting")
break
def push_results(self, kill_event):
""" Listens on the pending_result_queue and sends out results via 0mq
Parameters:
-----------
kill_event : threading.Event
Event to let the thread know when it is time to die.
"""
logger.debug("[RESULT_PUSH_THREAD] Starting thread")
push_poll_period = max(10, self.poll_period) / 1000 # push_poll_period must be atleast 10 ms
logger.debug("[RESULT_PUSH_THREAD] push poll period: {}".format(push_poll_period))
last_beat = time.time()
items = []
while not kill_event.is_set():
try:
r = self.pending_result_queue.get(block=True, timeout=push_poll_period)
items.append(r)
except queue.Empty:
pass
except Exception as e:
logger.exception("[RESULT_PUSH_THREAD] Got an exception: {}".format(e))
# If we have reached poll_period duration or timer has expired, we send results
if len(items) >= self.max_queue_size or time.time() > last_beat + push_poll_period:
last_beat = time.time()
if items:
self.result_outgoing.send_multipart(items)
items = []
logger.critical("[RESULT_PUSH_THREAD] Exiting")
def worker_watchdog(self, kill_event):
""" Listens on the pending_result_queue and sends out results via 0mq
Parameters:
-----------
kill_event : threading.Event
Event to let the thread know when it is time to die.
"""
logger.debug("[WORKER_WATCHDOG_THREAD] Starting thread")
while not kill_event.is_set():
for worker_id, p in self.procs.items():
if not p.is_alive():
logger.info("[WORKER_WATCHDOG_THREAD] Worker {} has died".format(worker_id))
try:
task = self._tasks_in_progress.pop(worker_id)
logger.info("[WORKER_WATCHDOG_THREAD] Worker {} was busy when it died".format(worker_id))
try:
raise WorkerLost(worker_id, platform.node())
except Exception:
logger.info("[WORKER_WATCHDOG_THREAD] Putting exception for task {} in the pending result queue".format(task['task_id']))
result_package = {'task_id': task['task_id'], 'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))}
pkl_package = pickle.dumps(result_package)
self.pending_result_queue.put(pkl_package)
except KeyError:
logger.info("[WORKER_WATCHDOG_THREAD] Worker {} was not busy when it died".format(worker_id))
p = multiprocessing.Process(target=worker, args=(worker_id,
self.uid,
self.worker_count,
self.pending_task_queue,
self.pending_result_queue,
self.ready_worker_queue,
self._tasks_in_progress
), name="HTEX-Worker-{}".format(worker_id))
self.procs[worker_id] = p
logger.info("[WORKER_WATCHDOG_THREAD] Worker {} has been restarted".format(worker_id))
time.sleep(self.poll_period)
logger.critical("[WORKER_WATCHDOG_THREAD] Exiting")
def start(self):
""" Start the worker processes.
TODO: Move task receiving to a thread
"""
start = time.time()
self._kill_event = threading.Event()
self._tasks_in_progress = multiprocessing.Manager().dict()
self.procs = {}
for worker_id in range(self.worker_count):
p = multiprocessing.Process(target=worker, args=(worker_id,
self.uid,
self.worker_count,
self.pending_task_queue,
self.pending_result_queue,
self.ready_worker_queue,
self._tasks_in_progress
), name="HTEX-Worker-{}".format(worker_id))
p.start()
self.procs[worker_id] = p
logger.debug("Manager synced with workers")
self._task_puller_thread = threading.Thread(target=self.pull_tasks,
args=(self._kill_event,),
name="Task-Puller")
self._result_pusher_thread = threading.Thread(target=self.push_results,
args=(self._kill_event,),
name="Result-Pusher")
self._worker_watchdog_thread = threading.Thread(target=self.worker_watchdog,
args=(self._kill_event,),
name="worker-watchdog")
self._task_puller_thread.start()
self._result_pusher_thread.start()
self._worker_watchdog_thread.start()
logger.info("Loop start")
# TODO : Add mechanism in this loop to stop the worker pool
# This might need a multiprocessing event to signal back.
self._kill_event.wait()
logger.critical("[MAIN] Received kill event, terminating worker processes")
self._task_puller_thread.join()
self._result_pusher_thread.join()
self._worker_watchdog_thread.join()
for proc_id in self.procs:
self.procs[proc_id].terminate()
logger.critical("Terminating worker {}:{}".format(self.procs[proc_id],
self.procs[proc_id].is_alive()))
self.procs[proc_id].join()
logger.debug("Worker:{} joined successfully".format(self.procs[proc_id]))
self.task_incoming.close()
self.result_outgoing.close()
self.context.term()
delta = time.time() - start
logger.info("process_worker_pool ran for {} seconds".format(delta))
return
def execute_task(bufs):
"""Deserialize the buffer and execute the task.
Returns the result or throws exception.
"""
user_ns = locals()
user_ns.update({'__builtins__': __builtins__})
f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)
# We might need to look into callability of the function from itself
# since we change it's name in the new namespace
prefix = "parsl_"
fname = prefix + "f"
argname = prefix + "args"
kwargname = prefix + "kwargs"
resultname = prefix + "result"
user_ns.update({fname: f,
argname: args,
kwargname: kwargs,
resultname: resultname})
code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
argname, kwargname)
try:
# logger.debug("[RUNNER] Executing: {0}".format(code))
exec(code, user_ns, user_ns)
except Exception as e:
logger.warning("Caught exception; will raise it: {}".format(e), exc_info=True)
raise e
else:
# logger.debug("[RUNNER] Result: {0}".format(user_ns.get(resultname)))
return user_ns.get(resultname)
def worker(worker_id, pool_id, pool_size, task_queue, result_queue, worker_queue, tasks_in_progress):
"""
Put request token into queue
Get task from task_queue
Pop request from queue
Put result into result_queue
"""
start_file_logger('{}/block-{}/{}/worker_{}.log'.format(args.logdir, args.block_id, pool_id, worker_id),
worker_id,
name="worker_log",
level=logging.DEBUG if args.debug else logging.INFO)
# Store worker ID as an environment variable
os.environ['PARSL_WORKER_RANK'] = str(worker_id)
os.environ['PARSL_WORKER_COUNT'] = str(pool_size)
os.environ['PARSL_WORKER_POOL_ID'] = str(pool_id)
# Sync worker with master
logger.info('Worker {} started'.format(worker_id))
if args.debug:
logger.debug("Debug logging enabled")
while True:
worker_queue.put(worker_id)
# The worker will receive {'task_id':<tid>, 'buffer':<buf>}
req = task_queue.get()
tasks_in_progress[worker_id] = req
tid = req['task_id']
logger.info("Received task {}".format(tid))
try:
worker_queue.get()
except queue.Empty:
logger.warning("Worker ID: {} failed to remove itself from ready_worker_queue".format(worker_id))
pass
try:
result = execute_task(req['buffer'])
serialized_result = serialize(result, buffer_threshold=1e6)
except Exception as e:
logger.info('Caught an exception: {}'.format(e))
result_package = {'task_id': tid, 'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))}
else:
result_package = {'task_id': tid, 'result': serialized_result}
# logger.debug("Result: {}".format(result))
logger.info("Completed task {}".format(tid))
try:
pkl_package = pickle.dumps(result_package)
except Exception:
logger.exception("Caught exception while trying to pickle the result package")
pkl_package = pickle.dumps({'task_id': tid,
'exception': serialize(RemoteExceptionWrapper(*sys.exc_info()))
})
result_queue.put(pkl_package)
tasks_in_progress.pop(worker_id)
def start_file_logger(filename, rank, name='parsl', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Args:
- filename (string): Name of the file to write logs to
- name (string): Logger name
- level (logging.LEVEL): Set the logging level.
- format_string (string): Set the format string
Returns:
- None
"""
if format_string is None:
format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d Rank:{0} [%(levelname)s] %(message)s".format(rank)
global logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
parser.add_argument("-a", "--addresses", default='',
help="Comma separated list of addresses at which the interchange could be reached")
parser.add_argument("-l", "--logdir", default="process_worker_pool_logs",
help="Process worker pool log directory")
parser.add_argument("-u", "--uid", default=str(uuid.uuid4()).split('-')[-1],
help="Unique identifier string for Manager")
parser.add_argument("-b", "--block_id", default=None,
help="Block identifier for Manager")
parser.add_argument("-c", "--cores_per_worker", default="1.0",
help="Number of cores assigned to each worker process. Default=1.0")
parser.add_argument("-m", "--mem_per_worker", default=0,
help="GB of memory assigned to each worker process. Default=0, no assignment")
parser.add_argument("-t", "--task_port", required=True,
help="REQUIRED: Task port for receiving tasks from the interchange")
parser.add_argument("--max_workers", default=float('inf'),
help="Caps the maximum workers that can be launched, default:infinity")
parser.add_argument("-p", "--prefetch_capacity", default=0,
help="Number of tasks that can be prefetched to the manager. Default is 0.")
parser.add_argument("--hb_period", default=30,
help="Heartbeat period in seconds. Uses manager default unless set")
parser.add_argument("--hb_threshold", default=120,
help="Heartbeat threshold in seconds. Uses manager default unless set")
parser.add_argument("--address_probe_timeout", default=30,
help="Timeout to probe for viable address to interchange. Default: 30s")
parser.add_argument("--poll", default=10,
help="Poll period used in milliseconds")
parser.add_argument("-r", "--result_port", required=True,
help="REQUIRED: Result port for posting results to the interchange")
args = parser.parse_args()
os.makedirs(os.path.join(args.logdir, "block-{}".format(args.block_id), args.uid), exist_ok=True)
try:
start_file_logger('{}/block-{}/{}/manager.log'.format(args.logdir, args.block_id, args.uid),
0,
level=logging.DEBUG if args.debug is True else logging.INFO)
logger.info("Python version: {}".format(sys.version))
logger.info("Debug logging: {}".format(args.debug))
logger.info("Log dir: {}".format(args.logdir))
logger.info("Manager ID: {}".format(args.uid))
logger.info("Block ID: {}".format(args.block_id))
logger.info("cores_per_worker: {}".format(args.cores_per_worker))
logger.info("mem_per_worker: {}".format(args.mem_per_worker))
logger.info("task_port: {}".format(args.task_port))
logger.info("result_port: {}".format(args.result_port))
logger.info("addresses: {}".format(args.addresses))
logger.info("max_workers: {}".format(args.max_workers))
logger.info("poll_period: {}".format(args.poll))
logger.info("address_probe_timeout: {}".format(args.address_probe_timeout))
logger.info("Prefetch capacity: {}".format(args.prefetch_capacity))
logger.info("Heartbeat threshold: {}".format(args.hb_threshold))
logger.info("Heartbeat period: {}".format(args.hb_period))
manager = Manager(task_port=args.task_port,
result_port=args.result_port,
addresses=args.addresses,
address_probe_timeout=int(args.address_probe_timeout),
uid=args.uid,
block_id=args.block_id,
cores_per_worker=float(args.cores_per_worker),
mem_per_worker=None if args.mem_per_worker == 'None' else float(args.mem_per_worker),
max_workers=args.max_workers if args.max_workers == float('inf') else int(args.max_workers),
prefetch_capacity=int(args.prefetch_capacity),
heartbeat_threshold=int(args.hb_threshold),
heartbeat_period=int(args.hb_period),
poll_period=int(args.poll))
manager.start()
except Exception as e:
logger.critical("process_worker_pool exiting from an exception")
logger.exception("Caught error: {}".format(e))
raise
else:
logger.info("process_worker_pool exiting")
print("PROCESS_WORKER_POOL exiting")
| 44.080745
| 149
| 0.565979
|
49330c7c9e97c1a810a54ac6cb73b674defe0efd
| 280
|
py
|
Python
|
src/apps/blog/forms.py
|
Pewpewarrows/MyModernLife
|
5348792b0aedc2bae6c91d688e61391b0656e136
|
[
"X11"
] | null | null | null |
src/apps/blog/forms.py
|
Pewpewarrows/MyModernLife
|
5348792b0aedc2bae6c91d688e61391b0656e136
|
[
"X11"
] | null | null | null |
src/apps/blog/forms.py
|
Pewpewarrows/MyModernLife
|
5348792b0aedc2bae6c91d688e61391b0656e136
|
[
"X11"
] | null | null | null |
from django.forms import ModelForm
from models import Blog, Post
class BlogForm(ModelForm):
class Meta:
model = Blog
fields = ('title',)
class PostForm(ModelForm):
class Meta:
model = Post
fields = ('title', 'markup', 'content', 'tags')
| 20
| 55
| 0.617857
|
500cb572e5e26a0c84338f1618bc53a03db2e8ef
| 1,551
|
py
|
Python
|
tests/integration_tests/feature_flag_tests.py
|
cevheri/superset
|
34542db3b615ff556281f80410f322f41f5a97a6
|
[
"Apache-2.0"
] | 19
|
2018-09-02T10:52:23.000Z
|
2022-03-24T09:43:48.000Z
|
tests/integration_tests/feature_flag_tests.py
|
cevheri/superset
|
34542db3b615ff556281f80410f322f41f5a97a6
|
[
"Apache-2.0"
] | 62
|
2020-05-06T22:51:53.000Z
|
2022-03-28T20:49:17.000Z
|
tests/integration_tests/feature_flag_tests.py
|
cevheri/superset
|
34542db3b615ff556281f80410f322f41f5a97a6
|
[
"Apache-2.0"
] | 15
|
2019-04-29T05:38:31.000Z
|
2022-02-12T10:47:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest.mock import patch
from superset import is_feature_enabled
from tests.integration_tests.base_tests import SupersetTestCase
class TestFeatureFlag(SupersetTestCase):
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"FOO": True},
clear=True,
)
def test_existing_feature_flags(self):
self.assertTrue(is_feature_enabled("FOO"))
@patch.dict(
"superset.extensions.feature_flag_manager._feature_flags", {}, clear=True
)
def test_nonexistent_feature_flags(self):
self.assertFalse(is_feature_enabled("FOO"))
def test_feature_flags(self):
self.assertEqual(is_feature_enabled("foo"), "bar")
self.assertEqual(is_feature_enabled("super"), "set")
| 37.829268
| 81
| 0.746615
|
8d6a2340769e867303e71495d14a1b9df247b032
| 680
|
py
|
Python
|
setup.py
|
rolandio/python-elgato-streamdeck
|
72805ca1475f6b2638344b287dda55386095f30c
|
[
"MIT"
] | 517
|
2018-01-06T18:27:40.000Z
|
2022-03-30T23:06:46.000Z
|
setup.py
|
rolandio/python-elgato-streamdeck
|
72805ca1475f6b2638344b287dda55386095f30c
|
[
"MIT"
] | 87
|
2018-01-06T13:10:41.000Z
|
2022-03-19T02:47:23.000Z
|
setup.py
|
rolandio/python-elgato-streamdeck
|
72805ca1475f6b2638344b287dda55386095f30c
|
[
"MIT"
] | 97
|
2018-01-06T12:00:09.000Z
|
2022-03-19T01:39:32.000Z
|
import setuptools
with open("VERSION", 'r') as f:
version = f.read().strip()
with open("README.md", 'r') as f:
long_description = f.read()
setuptools.setup(
name='streamdeck',
version=version,
description='Library to control Elgato StreamDeck devices.',
author='Dean Camera',
author_email='dean@fourwalledcubicle.com',
url='https://github.com/abcminiuser/python-elgato-streamdeck',
package_dir={'': 'src'},
packages=setuptools.find_packages(where='src'),
install_requires=[],
license="MIT",
long_description=long_description,
long_description_content_type="text/markdown",
include_package_data=True,
python_requires='>=3.8',
)
| 27.2
| 65
| 0.707353
|
be9d6b540c576ed8cd7fb0130f2abbe52bd60f50
| 922
|
py
|
Python
|
scraper/storage_spiders/onlinefridayvn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | null | null | null |
scraper/storage_spiders/onlinefridayvn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | 10
|
2020-02-11T23:34:28.000Z
|
2022-03-11T23:16:12.000Z
|
scraper/storage_spiders/onlinefridayvn.py
|
chongiadung/choinho
|
d2a216fe7a5064d73cdee3e928a7beef7f511fd1
|
[
"MIT"
] | 3
|
2018-08-05T14:54:25.000Z
|
2021-06-07T01:49:59.000Z
|
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='title_sp_detail width_common']",
'price' : "//div[@class='block_item_detial']/div[@class='w70 price_at color_price_at']",
'category' : "//div[@id='breakcumb']//div[@class='cate_sales_name']/a",
'description' : "",
'images' : "//div[@class='center_thumb']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'onlinefriday.vn'
allowed_domains = ['onlinefriday.vn']
start_urls = ['http://onlinefriday.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/san-pham/']), 'parse_item'),
Rule(LinkExtractor(allow=['/nganh-hang/']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 34.148148
| 92
| 0.652928
|
7d5ca92e2aae0323480b3c8dd5b6afb5d48ec663
| 7,971
|
py
|
Python
|
grr/server/grr_response_server/server_logging.py
|
certxlm/grr
|
c2a442a27f656fb18dfa3bce098847e5c5b849d7
|
[
"Apache-2.0"
] | 1
|
2019-08-28T23:48:20.000Z
|
2019-08-28T23:48:20.000Z
|
grr/server/grr_response_server/server_logging.py
|
AjitNair2/grr
|
2a2ea891b3927775872904cdd402a18e7bb3d143
|
[
"Apache-2.0"
] | 2
|
2022-01-15T03:18:12.000Z
|
2022-02-13T22:02:43.000Z
|
grr/server/grr_response_server/server_logging.py
|
acidburn0zzz/grr
|
44e1a5b1630e8101610faaaebe15b19b5ad30cb1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Functions for server logging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
from logging import handlers
import os
import socket
import time
from absl import flags
from grr_response_core import config
from grr_response_server import data_store
from grr_response_server.rdfvalues import objects as rdf_objects
try:
# pylint: disable=g-import-not-at-top
from grr_response_server.local import log as local_log
# pylint: enable=g-import-not-at-top
except ImportError:
local_log = None
# Global Application Logger.
LOGGER = None
flags.DEFINE_bool(
"verbose",
default=False,
help="Turn on verbose logging.",
allow_override=True)
class GrrApplicationLogger(object):
"""The GRR application logger.
These records are used for machine readable authentication logging of security
critical events.
"""
def GetNewEventId(self, event_time=None):
"""Return a unique Event ID string."""
if event_time is None:
event_time = int(time.time() * 1e6)
return "%s:%s:%s" % (event_time, socket.gethostname(), os.getpid())
def LogHttpAdminUIAccess(self, request, response):
"""Log an http based api call.
Args:
request: A WSGI request object.
response: A WSGI response object.
"""
# TODO(user): generate event_id elsewhere and use it for all the log
# messages that have to do with handling corresponding request.
event_id = self.GetNewEventId()
api_method = response.headers.get("X-API-Method", "unknown")
api_reason = response.headers.get("X-GRR-Reason", "none")
log_msg = "%s API call [%s] by %s (reason: %s): %s [%d]" % (
event_id, api_method, request.user, api_reason, request.full_path,
response.status_code)
logging.info(log_msg)
if response.headers.get("X-No-Log") != "True":
entry = rdf_objects.APIAuditEntry.FromHttpRequestResponse(
request, response)
data_store.REL_DB.WriteAPIAuditEntry(entry)
def LogHttpFrontendAccess(self, request, source=None, message_count=None):
"""Write a log entry for a Frontend or UI Request.
Args:
request: A HttpRequest protobuf.
source: Client id of the client initiating the request. Optional.
message_count: Number of messages received from the client. Optional.
"""
# TODO(user): generate event_id elsewhere and use it for all the log
# messages that have to do with handling corresponding request.
event_id = self.GetNewEventId()
log_msg = "%s-%s [%s]: %s %s %s %s (%d)" % (
event_id, request.source_ip, source or "<unknown>", request.method,
request.url, request.user_agent, request.user, message_count or 0)
logging.info(log_msg)
class PreLoggingMemoryHandler(handlers.BufferingHandler):
"""Handler used before logging subsystem is initialized."""
def shouldFlush(self, record):
return len(self.buffer) >= self.capacity
def flush(self):
"""Flush the buffer.
This is called when the buffer is really full, we just just drop one oldest
message.
"""
self.buffer = self.buffer[-self.capacity:]
class RobustSysLogHandler(handlers.SysLogHandler):
"""A handler which does not raise if it fails to connect."""
def __init__(self, *args, **kwargs):
self.formatter = None
try:
super(RobustSysLogHandler, self).__init__(*args, **kwargs)
except socket.error:
pass
def handleError(self, record):
"""Just ignore socket errors - the syslog server might come back."""
BASE_LOG_LEVELS = {
"FileHandler": logging.ERROR,
"NTEventLogHandler": logging.CRITICAL,
"StreamHandler": logging.ERROR,
"RobustSysLogHandler": logging.CRITICAL,
}
VERBOSE_LOG_LEVELS = {
"FileHandler": logging.DEBUG,
"NTEventLogHandler": logging.INFO,
"StreamHandler": logging.DEBUG,
"RobustSysLogHandler": logging.INFO,
}
def SetLogLevels():
logger = logging.getLogger()
if config.CONFIG["Logging.verbose"] or flags.FLAGS.verbose:
logging.root.setLevel(logging.DEBUG)
levels = VERBOSE_LOG_LEVELS
else:
levels = BASE_LOG_LEVELS
for handler in logger.handlers:
handler.setLevel(levels[handler.__class__.__name__])
LOG_FORMAT = ("%(levelname)s:%(asctime)s %(process)d "
"%(processName)s %(thread)d %(threadName)s "
"%(module)s:%(lineno)s] %(message)s")
def GetLogHandlers():
formatter = logging.Formatter(LOG_FORMAT)
engines = config.CONFIG["Logging.engines"]
logging.debug("Will use logging engines %s", engines)
for engine in engines:
try:
if engine == "stderr":
handler = logging.StreamHandler()
handler.setFormatter(formatter)
yield handler
elif engine == "event_log":
handler = handlers.NTEventLogHandler("GRR")
handler.setFormatter(formatter)
yield handler
elif engine == "syslog":
# Allow the specification of UDP sockets.
socket_name = config.CONFIG["Logging.syslog_path"]
if ":" in socket_name:
addr, port = socket_name.split(":", 1)
handler = RobustSysLogHandler((addr, int(port)))
else:
handler = RobustSysLogHandler(socket_name)
handler.setFormatter(formatter)
yield handler
elif engine == "file":
# Create a logfile if needed.
path = config.CONFIG["Logging.filename"]
logging.info("Writing log file to %s", path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
handler = logging.FileHandler(path, mode="a", encoding="utf-8")
handler.setFormatter(formatter)
yield handler
else:
logging.error("Unknown logging engine %s", engine)
except Exception: # pylint:disable=broad-except
# Failure to log should not be fatal.
logging.exception("Unable to create logger %s", engine)
def LogInit():
"""Configure the logging subsystem."""
logging.debug("Initializing Logging subsystem.")
# The root logger.
logger = logging.getLogger()
memory_handlers = [
m for m in logger.handlers
if m.__class__.__name__ == "PreLoggingMemoryHandler"
]
# Clear all handers.
logger.handlers = list(GetLogHandlers())
SetLogLevels()
# Now flush the old messages into the log files.
for handler in memory_handlers:
for record in handler.buffer:
logger.handle(record)
def AppLogInit():
"""Initialize the Application Log.
This log is what will be used whenever someone does a log.LOGGER call. These
are used for more detailed application or event logs.
Returns:
GrrApplicationLogger object
"""
logging.debug("Initializing Application Logger.")
return GrrApplicationLogger()
def ServerLoggingStartupInit():
"""Initialize the server logging configuration."""
global LOGGER
if local_log:
logging.debug("Using local LogInit from %s", local_log)
local_log.LogInit()
logging.debug("Using local AppLogInit from %s", local_log)
LOGGER = local_log.AppLogInit()
else:
LogInit()
LOGGER = AppLogInit()
def SetTestVerbosity():
if local_log:
local_log.SetTestVerbosity()
return
# Test logging. This is only to stderr, adjust the levels according to flags
if flags.FLAGS.verbose:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(logging.WARN)
# There is a catch 22 here: We need to start logging right away but we will only
# configure the logging system once the config is read. Therefore we set up a
# memory logger now and then when the log destination is configured we replay
# the logs into that. This ensures we do not lose any log messages during early
# program start up.
root_logger = logging.root
memory_logger = PreLoggingMemoryHandler(1000)
root_logger.addHandler(memory_logger)
memory_logger.setLevel(logging.DEBUG)
logging.debug("Starting GRR Prelogging buffer.")
| 29.522222
| 80
| 0.69803
|
f2b5e779e0653889e47e400105c4e61627428874
| 17,883
|
py
|
Python
|
IEEExtreme2017/elementary.py
|
Victoralin10/ACMSolutions
|
6d6e50da87b2bc455e953629737215b74b10269c
|
[
"MIT"
] | null | null | null |
IEEExtreme2017/elementary.py
|
Victoralin10/ACMSolutions
|
6d6e50da87b2bc455e953629737215b74b10269c
|
[
"MIT"
] | null | null | null |
IEEExtreme2017/elementary.py
|
Victoralin10/ACMSolutions
|
6d6e50da87b2bc455e953629737215b74b10269c
|
[
"MIT"
] | null | null | null |
M = {}
M["a"] = "0"
M["aa"] = "0"
M["aaa"] = "0"
M["ab"] = "0"
M["aba"] = "0"
M["abc"] = "0"
M["abinombca"] = "0"
M["able"] = "0"
M["about"] = "0"
M["account"] = "0"
M["activity"] = "0"
M["adaptive"] = "0"
M["addition"] = "0"
M["adjacent"] = "0"
M["after"] = "0"
M["algorithms"] = "0"
M["alice"] = "1"
M["alices"] = "2"
M["all"] = "0"
M["allowed"] = "0"
M["allv"] = "1"
M["alphabet"] = "0"
M["alphabetical"] = "0"
M["already"] = "0"
M["also"] = "1"
M["alternating"] = "0"
M["always"] = "0"
M["amazing"] = "0"
M["amount"] = "0"
M["an"] = "0"
M["and"] = "0"
M["announcements"] = "0"
M["another"] = "0"
M["ans"] = "0"
M["answer"] = "0"
M["any"] = "0"
M["application"] = "0"
M["apps"] = "0"
M["are"] = "0"
M["area"] = "0"
M["array"] = "1"
M["arrested"] = "0"
M["as"] = "1"
M["ask"] = "1"
M["asked"] = "0"
M["asks"] = "1"
M["asparagus"] = "1"
M["assured"] = "0"
M["at"] = "1"
M["attempt"] = "0"
M["attended"] = "0"
M["available"] = "0"
M["availablehome"] = "0"
M["b"] = "1"
M["ba"] = "1"
M["back"] = "2"
M["backtracking"] = "0"
M["bad"] = "0"
M["barbascos"] = "11"
M["based"] = "0"
M["basement"] = "0"
M["batbear"] = "1"
M["battle"] = "0"
M["battles"] = "1"
M["bb"] = "1"
M["bc"] = "1"
M["be"] = "1"
M["because"] = "2"
M["becoming"] = "0"
M["been"] = "0"
M["begins"] = "0"
M["being"] = "0"
M["below"] = "0"
M["besides"] = "0"
M["best"] = "0"
M["between"] = "0"
M["bin"] = "3"
M["binationalism"] = "10"
M["binombc"] = "0"
M["binomnnm"] = "0"
M["bison"] = "2"
M["bit"] = "0"
M["bitsigned"] = "0"
M["bitsstdch"] = "0"
M["black"] = "1"
M["blackgate"] = "1"
M["blog"] = "0"
M["blue"] = "0"
M["blues"] = "1"
M["bmaxn"] = "0"
M["board"] = "0"
M["bob"] = "1"
M["bobs"] = "1"
M["boody"] = "1"
M["book"] = "1"
M["books"] = "1"
M["bool"] = "0"
M["borrowed"] = "0"
M["both"] = "1"
M["bottom"] = "0"
M["brainteaser"] = "2"
M["brings"] = "0"
M["broccoli"] = "2"
M["bubbles"] = "0"
M["but"] = "0"
M["by"] = "1"
M["c"] = "1"
M["cacophonic"] = "12"
M["calculate"] = "2"
M["called"] = "0"
M["can"] = "1"
M["cannot"] = "0"
M["capacity"] = "0"
M["capture"] = "1"
M["case"] = "1"
M["cases"] = "3"
M["catastrophic"] = "0"
M["catch"] = "2"
M["cause"] = "2"
M["cbc"] = "1"
M["cc"] = "1"
M["cell"] = "0"
M["cells"] = "0"
M["challenge"] = "0"
M["challenged"] = "0"
M["character"] = "0"
M["characters"] = "0"
M["chat"] = "1"
M["cheer"] = "1"
M["chess"] = "2"
M["chinook"] = "3"
M["chocolate"] = "4"
M["choose"] = "2"
M["chooses"] = "6"
M["cin"] = "2"
M["cinnum"] = "0"
M["city"] = "0"
M["cj"] = "0"
M["class"] = "2"
M["classes"] = "4"
M["closeclose"] = "1"
M["cmath"] = "1"
M["cmaxn"] = "0"
M["cnt"] = "0"
M["collectively"] = "0"
M["column"] = "2"
M["columns"] = "2"
M["coming"] = "0"
M["competed"] = "0"
M["complete"] = "0"
M["composed"] = "0"
M["comprised"] = "0"
M["compute"] = "0"
M["computed"] = "0"
M["conclusion"] = "8"
M["connected"] = "0"
M["connection"] = "2"
M["consider"] = "0"
M["consist"] = "0"
M["consists"] = "4"
M["const"] = "0"
M["constraints"] = "0"
M["contain"] = "4"
M["containing"] = "0"
M["contains"] = "4"
M["contest"] = "0"
M["contests"] = "2"
M["context"] = "0"
M["continue"] = "0"
M["converts"] = "2"
M["conway"] = "0"
M["coordinates"] = "0"
M["corner"] = "2"
M["corrosion"] = "0"
M["cosmos"] = "7"
M["could"] = "0"
M["counted"] = "0"
M["cout"] = "0"
M["coutmostrando"] = "0"
M["coutvalor"] = "0"
M["coutvicharid"] = "0"
M["cover"] = "2"
M["covered"] = "0"
M["cpu"] = "2"
M["crc"] = "1"
M["creating"] = "0"
M["crew"] = "1"
M["criba"] = "1"
M["crosses"] = "4"
M["cstring"] = "0"
M["cubes"] = "4"
M["curious"] = "0"
M["current"] = "0"
M["custom"] = "0"
M["d"] = "0"
M["damage"] = "0"
M["dash"] = "0"
M["dd"] = "0"
M["dddimensional"] = "0"
M["ddhyper"] = "0"
M["ddtuple"] = "0"
M["decided"] = "0"
M["defeat"] = "0"
M["defeats"] = "0"
M["define"] = "0"
M["defined"] = "0"
M["defines"] = "0"
M["definite"] = "0"
M["denoting"] = "0"
M["describe"] = "0"
M["determined"] = "0"
M["devise"] = "0"
M["dfshijox"] = "0"
M["dfsint"] = "0"
M["di"] = "0"
M["diagonal"] = "0"
M["dictionary"] = "0"
M["did"] = "0"
M["different"] = "0"
M["diid"] = "0"
M["dim"] = "0"
M["dimdim"] = "0"
M["dimension"] = "0"
M["dimensions"] = "0"
M["dimi"] = "0"
M["dimil"] = "0"
M["distinct"] = "0"
M["distribution"] = "0"
M["divides"] = "0"
M["divisible"] = "0"
M["divisors"] = "0"
M["djid"] = "0"
M["dmaxnmaxn"] = "0"
M["do"] = "0"
M["does"] = "0"
M["doesnt"] = "0"
M["dont"] = "0"
M["down"] = "0"
M["dp"] = "0"
M["dqnd"] = "0"
M["dream"] = "0"
M["dreams"] = "0"
M["drops"] = "0"
M["duv"] = "0"
M["dvu"] = "0"
M["dynamicprogramming"] = "0"
M["e"] = "0"
M["each"] = "0"
M["edge"] = "0"
M["edges"] = "0"
M["either"] = "0"
M["elementary"] = "0"
M["elements"] = "0"
M["else"] = "0"
M["em"] = "0"
M["encint"] = "0"
M["endl"] = "0"
M["ends"] = "0"
M["enemy"] = "0"
M["english"] = "0"
M["enough"] = "0"
M["ensure"] = "0"
M["ensures"] = "0"
M["enter"] = "0"
M["equal"] = "0"
M["errorload"] = "0"
M["es"] = "1"
M["even"] = "0"
M["evening"] = "0"
M["every"] = "0"
M["examples"] = "0"
M["examplessubmit"] = "0"
M["exit"] = "0"
M["expression"] = "0"
M["f"] = "1"
M["facilitate"] = "1"
M["false"] = "1"
M["few"] = "1"
M["fewest"] = "0"
M["fight"] = "0"
M["filechoose"] = "0"
M["filecompilerun"] = "0"
M["find"] = "1"
M["finding"] = "0"
M["finite"] = "3"
M["first"] = "0"
M["fit"] = "0"
M["flush"] = "2"
M["fnin"] = "3"
M["follow"] = "0"
M["followed"] = "0"
M["following"] = "0"
M["for"] = "0"
M["forall"] = "0"
M["forest"] = "0"
M["forget"] = "0"
M["forint"] = "0"
M["forll"] = "0"
M["form"] = "0"
M["formally"] = "0"
M["forum"] = "0"
M["fought"] = "0"
M["fourth"] = "0"
M["fracbc"] = "2"
M["fracyx"] = "0"
M["friend"] = "0"
M["frog"] = "1"
M["from"] = "0"
M["function"] = "1"
M["functions"] = "1"
M["further"] = "0"
M["g"] = "0"
M["game"] = "0"
M["gang"] = "0"
M["generalize"] = "0"
M["geq"] = "0"
M["getlinecin"] = "2"
M["getting"] = "0"
M["gg"] = "0"
M["giclear"] = "0"
M["give"] = "0"
M["given"] = "0"
M["gives"] = "0"
M["giving"] = "0"
M["gmaxn"] = "0"
M["gmaxnmaxn"] = "0"
M["go"] = "0"
M["goal"] = "0"
M["good"] = "0"
M["got"] = "0"
M["gotham"] = "0"
M["gotta"] = "0"
M["graph"] = "0"
M["great"] = "0"
M["greedy"] = "0"
M["grid"] = "0"
M["group"] = "0"
M["groups"] = "0"
M["guaranteed"] = "0"
M["guarantees"] = "0"
M["gupushbackv"] = "0"
M["gvpushbacku"] = "0"
M["gxi"] = "0"
M["gym"] = "0"
M["h"] = "1"
M["had"] = "0"
M["hand"] = "0"
M["hard"] = "0"
M["harleyquin"] = "0"
M["has"] = "1"
M["hat"] = "1"
M["have"] = "0"
M["he"] = "1"
M["health"] = "1"
M["height"] = "0"
M["heights"] = "0"
M["help"] = "0"
M["helper"] = "0"
M["hence"] = "1"
M["her"] = "1"
M["heres"] = "2"
M["hh"] = "1"
M["hi"] = "1"
M["higher"] = "0"
M["highest"] = "0"
M["hijo"] = "0"
M["him"] = "0"
M["his"] = "1"
M["hit"] = "0"
M["home"] = "0"
M["horizontal"] = "0"
M["horton"] = "0"
M["hours"] = "0"
M["how"] = "2"
M["i"] = "1"
M["ia"] = "0"
M["iai"] = "0"
M["id"] = "0"
M["idbook"] = "1"
M["idbookend"] = "0"
M["idbookfindline"] = "1"
M["idbookline"] = "1"
M["ieeextreme"] = "0"
M["if"] = "1"
M["ifhijoy"] = "0"
M["ifmask"] = "1"
M["ifpri"] = "1"
M["ifvishijo"] = "0"
M["ify"] = "1"
M["iidi"] = "0"
M["iigxsizei"] = "0"
M["iii"] = "1"
M["iiini"] = "3"
M["iini"] = "3"
M["ijij"] = "0"
M["implementation"] = "0"
M["important"] = "0"
M["in"] = "2"
M["include"] = "0"
M["includealgorithm"] = "0"
M["includebitsstdch"] = "0"
M["includeclimits"] = "0"
M["includecmath"] = "0"
M["includecstdio"] = "0"
M["includecstring"] = "0"
M["includeiostream"] = "0"
M["includemap"] = "0"
M["includeset"] = "0"
M["includestring"] = "0"
M["includevector"] = "0"
M["inclusive"] = "0"
M["increase"] = "0"
M["increases"] = "2"
M["increasing"] = "0"
M["index"] = "0"
M["indices"] = "2"
M["information"] = "0"
M["initial"] = "3"
M["initially"] = "0"
M["inject"] = "0"
M["inosculate"] = "13"
M["input"] = "0"
M["inputoutput"] = "0"
M["inputoutputexplanation"] = "0"
M["inputoutputstderrcompilationexecutionexamplessubmission"] = "0"
M["inputrun"] = "0"
M["inputs"] = "5"
M["insist"] = "0"
M["int"] = "0"
M["integer"] = "0"
M["integers"] = "0"
M["interaction"] = "2"
M["interactor"] = "0"
M["interrogate"] = "0"
M["interrogations"] = "0"
M["interval"] = "0"
M["intervals"] = "0"
M["interviews"] = "0"
M["into"] = "0"
M["intsqrtb"] = "0"
M["intxsize"] = "0"
M["io"] = "1"
M["ioline"] = "1"
M["iostream"] = "0"
M["is"] = "1"
M["it"] = "0"
M["iterar"] = "0"
M["iterations"] = "1"
M["iterator"] = "0"
M["ithi"] = "1"
M["its"] = "1"
M["itself"] = "0"
M["j"] = "0"
M["ji"] = "0"
M["jiijnji"] = "0"
M["jjdj"] = "0"
M["job"] = "0"
M["john"] = "0"
M["joker"] = "0"
M["jokers"] = "0"
M["journey"] = "0"
M["just"] = "0"
M["k"] = "1"
M["kale"] = "0"
M["kbruntime"] = "0"
M["kind"] = "1"
M["king"] = "0"
M["kings"] = "0"
M["know"] = "2"
M["known"] = "2"
M["knows"] = "2"
M["laid"] = "0"
M["language"] = "0"
M["laparoscopy"] = "5"
M["large"] = "0"
M["last"] = "0"
M["latin"] = "1"
M["le"] = "0"
M["lead"] = "0"
M["leaderboard"] = "0"
M["learned"] = "0"
M["least"] = "0"
M["left"] = "0"
M["length"] = "0"
M["lengths"] = "0"
M["leq"] = "0"
M["less"] = "0"
M["lessons"] = "0"
M["lets"] = "0"
M["letters"] = "0"
M["lexicographical"] = "0"
M["li"] = "1"
M["library"] = "1"
M["life"] = "1"
M["like"] = "0"
M["limit"] = "0"
M["limpiar"] = "0"
M["line"] = "1"
M["lines"] = "2"
M["list"] = "0"
M["listed"] = "0"
M["ll"] = "0"
M["load"] = "0"
M["location"] = "0"
M["logout"] = "0"
M["long"] = "0"
M["longer"] = "0"
M["longii"] = "0"
M["looks"] = "0"
M["loop"] = "0"
M["loops"] = "0"
M["lose"] = "0"
M["loses"] = "0"
M["lower"] = "0"
M["lowest"] = "0"
M["lucky"] = "1"
M["m"] = "0"
M["main"] = "0"
M["make"] = "0"
M["makepair"] = "0"
M["makes"] = "0"
M["making"] = "0"
M["manages"] = "0"
M["map"] = "0"
M["mapint"] = "0"
M["marking"] = "0"
M["marks"] = "0"
M["mask"] = "0"
M["maskdmask"] = "0"
M["master"] = "0"
M["math"] = "0"
M["matrix"] = "0"
M["maxidistmaxn"] = "0"
M["maximise"] = "0"
M["maximum"] = "0"
M["maxn"] = "0"
M["maxs"] = "0"
M["mb"] = "0"
M["me"] = "0"
M["mea"] = "0"
M["mean"] = "0"
M["means"] = "0"
M["member"] = "0"
M["members"] = "0"
M["memory"] = "0"
M["memseta"] = "0"
M["memsetc"] = "0"
M["memsetd"] = "0"
M["memsetprsizeofpr"] = "0"
M["memsetvissizeofvis"] = "0"
M["messages"] = "0"
M["methodical"] = "0"
M["mid"] = "0"
M["middle"] = "0"
M["minimum"] = "0"
M["minute"] = "0"
M["minutes"] = "0"
M["missi"] = "0"
M["mm"] = "0"
M["mod"] = "0"
M["modelled"] = "0"
M["modify"] = "0"
M["modulo"] = "0"
M["moment"] = "0"
M["monkey"] = "0"
M["more"] = "1"
M["most"] = "0"
M["mostrarvectorint"] = "0"
M["move"] = "0"
M["moves"] = "1"
M["mp"] = "0"
M["mprueba"] = "0"
M["mrhammer"] = "0"
M["ms"] = "0"
M["muggs"] = "0"
M["multiple"] = "0"
M["multisetstring"] = "0"
M["multisetstringiterator"] = "0"
M["must"] = "0"
M["mvendl"] = "0"
M["my"] = "0"
M["n"] = "1"
M["nabn"] = "1"
M["name"] = "0"
M["names"] = "1"
M["namespace"] = "1"
M["need"] = "0"
M["needed"] = "0"
M["needs"] = "0"
M["negative"] = "0"
M["neighbours"] = "0"
M["network"] = "0"
M["never"] = "1"
M["new"] = "1"
M["next"] = "0"
M["night"] = "0"
M["nine"] = "2"
M["nm"] = "0"
M["nn"] = "1"
M["no"] = "2"
M["node"] = "0"
M["nomejalestino"] = "0"
M["nondecreasing"] = "0"
M["nonnegative"] = "0"
M["not"] = "0"
M["notation"] = "2"
M["note"] = "2"
M["notes"] = "2"
M["nothing"] = "0"
M["noticed"] = "0"
M["noughts"] = "0"
M["now"] = "2"
M["nuevo"] = "0"
M["num"] = "0"
M["number"] = "0"
M["numbered"] = "0"
M["numbers"] = "0"
M["of"] = "1"
M["officer"] = "1"
M["on"] = "1"
M["one"] = "1"
M["only"] = "0"
M["oo"] = "1"
M["oos"] = "2"
M["open"] = "0"
M["operation"] = "1"
M["opponent"] = "0"
M["opponents"] = "2"
M["or"] = "0"
M["order"] = "0"
M["our"] = "0"
M["out"] = "0"
M["output"] = "0"
M["outputs"] = "0"
M["outside"] = "0"
M["overcautiousness"] = "6"
M["own"] = "1"
M["pair"] = "1"
M["pairint"] = "0"
M["pairs"] = "1"
M["paola"] = "1"
M["paperandpencil"] = "0"
M["part"] = "0"
M["pass"] = "2"
M["path"] = "2"
M["paulie"] = "0"
M["pb"] = "2"
M["penitentiary"] = "0"
M["per"] = "1"
M["performed"] = "0"
M["performing"] = "0"
M["peru"] = "1"
M["peruvian"] = "0"
M["pii"] = "1"
M["pikachu"] = "1"
M["pikachus"] = "1"
M["place"] = "1"
M["placed"] = "0"
M["placement"] = "0"
M["places"] = "2"
M["placing"] = "0"
M["plan"] = "1"
M["play"] = "1"
M["player"] = "1"
M["players"] = "1"
M["please"] = "0"
M["pneumonoconiosis"] = "48"
M["point"] = "0"
M["pokmon"] = "2"
M["police"] = "2"
M["populated"] = "0"
M["portion"] = "0"
M["position"] = "5"
M["positive"] = "0"
M["possible"] = "0"
M["potell"] = "0"
M["potexymod"] = "0"
M["potion"] = "2"
M["potions"] = "2"
M["pr"] = "1"
M["preference"] = "1"
M["preferences"] = "2"
M["preferred"] = "0"
M["prepare"] = "1"
M["preparing"] = "0"
M["prevent"] = "0"
M["previous"] = "1"
M["prime"] = "0"
M["primo"] = "1"
M["primopushbacki"] = "4"
M["print"] = "0"
M["printed"] = "0"
M["prj"] = "0"
M["prn"] = "2"
M["probably"] = "0"
M["problem"] = "0"
M["proceeding"] = "0"
M["prodid"] = "0"
M["produce"] = "0"
M["profile"] = "0"
M["program"] = "0"
M["programmer"] = "0"
M["programming"] = "0"
M["provide"] = "0"
M["prueba"] = "0"
M["pruebapushback"] = "0"
M["puffin"] = "4"
M["pumps"] = "0"
M["pushback"] = "4"
M["q"] = "0"
M["qi"] = "0"
M["qid"] = "0"
M["qn"] = "0"
M["qq"] = "0"
M["quechua"] = "0"
M["queries"] = "0"
M["query"] = "0"
M["question"] = "0"
M["questions"] = "0"
M["quickly"] = "0"
M["quipu"] = "0"
M["r"] = "0"
M["rank"] = "1"
M["rb"] = "1"
M["rc"] = "0"
M["rcrc"] = "0"
M["rd"] = "0"
M["reach"] = "1"
M["read"] = "0"
M["readgraph"] = "0"
M["reads"] = "0"
M["really"] = "0"
M["rectangle"] = "0"
M["red"] = "0"
M["reduce"] = "0"
M["reflecting"] = "0"
M["remaining"] = "0"
M["represent"] = "0"
M["represented"] = "0"
M["representing"] = "0"
M["represents"] = "1"
M["required"] = "0"
M["requried"] = "0"
M["restore"] = "0"
M["result"] = "0"
M["results"] = "0"
M["return"] = "0"
M["rhinoceros"] = "6"
M["ri"] = "0"
M["right"] = "0"
M["rival"] = "0"
M["roster"] = "0"
M["row"] = "0"
M["rows"] = "0"
M["rr"] = "0"
M["s"] = "1"
M["same"] = "0"
M["sample"] = "0"
M["scanfd"] = "0"
M["scanfdd"] = "0"
M["scn"] = "3"
M["score"] = "3"
M["scoreboard"] = "0"
M["second"] = "2"
M["selecting"] = "0"
M["selects"] = "0"
M["selfloops"] = "0"
M["separated"] = "0"
M["server"] = "1"
M["settings"] = "0"
M["share"] = "0"
M["she"] = "1"
M["should"] = "0"
M["shows"] = "2"
M["sign"] = "0"
M["signed"] = "0"
M["simulate"] = "0"
M["since"] = "3"
M["single"] = "0"
M["size"] = "0"
M["sizeof"] = "0"
M["sizeofa"] = "0"
M["smaller"] = "0"
M["snow"] = "3"
M["so"] = "1"
M["some"] = "0"
M["sorted"] = "0"
M["space"] = "1"
M["spaces"] = "3"
M["special"] = "0"
M["specific"] = "0"
M["specifically"] = "0"
M["spinach"] = "3"
M["square"] = "0"
M["squares"] = "0"
M["ss"] = "1"
M["ssi"] = "2"
M["standard"] = "0"
M["start"] = "0"
M["starting"] = "0"
M["starts"] = "0"
M["state"] = "1"
M["statementsubmissions"] = "0"
M["std"] = "0"
M["step"] = "1"
M["still"] = "0"
M["stop"] = "0"
M["stopping"] = "0"
M["strategy"] = "0"
M["streetgang"] = "0"
M["strict"] = "0"
M["string"] = "0"
M["strings"] = "0"
M["stringstream"] = "0"
M["student"] = "0"
M["students"] = "0"
M["study"] = "0"
M["sub"] = "1"
M["subarray"] = "1"
M["subarrays"] = "1"
M["submissions"] = "0"
M["subset"] = "0"
M["succeeds"] = "0"
M["successful"] = "0"
M["such"] = "1"
M["sum"] = "0"
M["sumiab"] = "0"
M["summary"] = "0"
M["sums"] = "0"
M["sure"] = "1"
M["system"] = "0"
M["szx"] = "0"
M["t"] = "0"
M["take"] = "0"
M["takes"] = "1"
M["target"] = "0"
M["task"] = "1"
M["tasks"] = "1"
M["tasksplit"] = "0"
M["tb"] = "1"
M["tcint"] = "0"
M["teacher"] = "1"
M["teachers"] = "1"
M["teammate"] = "0"
M["tell"] = "0"
M["templateinteractive"] = "0"
M["templatesettings"] = "0"
M["test"] = "0"
M["tester"] = "0"
M["tests"] = "1"
M["textlengthsi"] = "0"
M["texttime"] = "0"
M["texttimetime"] = "0"
M["texttopic"] = "0"
M["texttopicitopic"] = "0"
M["texttopicntime"] = "0"
M["th"] = "1"
M["than"] = "0"
M["that"] = "1"
M["thats"] = "1"
M["the"] = "0"
M["their"] = "0"
M["thejoker"] = "0"
M["them"] = "0"
M["then"] = "0"
M["there"] = "0"
M["these"] = "0"
M["they"] = "0"
M["third"] = "0"
M["this"] = "1"
M["though"] = "0"
M["three"] = "0"
M["through"] = "0"
M["tictactoe"] = "0"
M["time"] = "0"
M["times"] = "0"
M["tired"] = "0"
M["tmaxn"] = "0"
M["to"] = "0"
M["today"] = "0"
M["tomorrow"] = "0"
M["top"] = "0"
M["topic"] = "0"
M["topics"] = "0"
M["torus"] = "0"
M["total"] = "0"
M["trainer"] = "0"
M["trains"] = "0"
M["transport"] = "0"
M["trapped"] = "0"
M["traps"] = "0"
M["travelling"] = "0"
M["true"] = "0"
M["try"] = "0"
M["tt"] = "0"
M["turn"] = "0"
M["turns"] = "0"
M["two"] = "0"
M["typedef"] = "0"
M["u"] = "1"
M["unable"] = "0"
M["underscore"] = "3"
M["undirected"] = "0"
M["unfortunately"] = "0"
M["unicorn"] = "4"
M["unless"] = "0"
M["unpopulated"] = "0"
M["until"] = "0"
M["unused"] = "0"
M["up"] = "1"
M["usa"] = "0"
M["usage"] = "0"
M["using"] = "0"
M["v"] = "1"
M["val"] = "1"
M["valid"] = "0"
M["value"] = "0"
M["values"] = "1"
M["vangelis"] = "0"
M["vbeginvend"] = "0"
M["vector"] = "0"
M["vectorint"] = "0"
M["vectors"] = "0"
M["vertex"] = "0"
M["vertical"] = "1"
M["vertices"] = "2"
M["viewworkspacefullscreen"] = "0"
M["viridian"] = "0"
M["vis"] = "1"
M["visx"] = "0"
M["void"] = "0"
M["want"] = "0"
M["wants"] = "0"
M["warning"] = "0"
M["was"] = "1"
M["water"] = "1"
M["way"] = "0"
M["wayne"] = "0"
M["we"] = "0"
M["were"] = "0"
M["what"] = "1"
M["where"] = "1"
M["whew"] = "1"
M["which"] = "1"
M["while"] = "0"
M["white"] = "1"
M["whitespace"] = "1"
M["who"] = "2"
M["wikipedia"] = "0"
M["will"] = "0"
M["win"] = "2"
M["winning"] = "0"
M["wins"] = "2"
M["wisconsin"] = "9"
M["with"] = "1"
M["won"] = "1"
M["word"] = "0"
M["words"] = "0"
M["work"] = "0"
M["workspace"] = "0"
M["world"] = "0"
M["worlds"] = "0"
M["worst"] = "0"
M["would"] = "0"
M["write"] = "0"
M["x"] = "0"
M["xint"] = "0"
M["xll"] = "0"
M["xmod"] = "0"
M["xs"] = "0"
M["xtreme"] = "0"
M["xx"] = "0"
M["xxs"] = "0"
M["xyxy"] = "0"
M["y"] = "1"
M["year"] = "0"
M["yll"] = "0"
M["you"] = "1"
M["your"] = "0"
M["yy"] = "1"
n = int(input())
for i in range(n):
print(M[input().strip()])
| 18.15533
| 66
| 0.450036
|
ee07597a6327492ae5bb579310e53d7f7351c126
| 307,820
|
py
|
Python
|
ambari-server/src/test/python/TestAmbariServer.py
|
mkozinenko/ambari
|
9cfe9559420a1f4af89a2d645af84b1ab20d6737
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-server/src/test/python/TestAmbariServer.py
|
mkozinenko/ambari
|
9cfe9559420a1f4af89a2d645af84b1ab20d6737
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-server/src/test/python/TestAmbariServer.py
|
mkozinenko/ambari
|
9cfe9559420a1f4af89a2d645af84b1ab20d6737
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from stacks.utils.RMFTestCase import *
import sys
import os
import datetime
import errno
import json
from mock.mock import patch, MagicMock, create_autospec, call
import operator
from optparse import OptionParser
import platform
import re
import shutil
import signal
import stat
import StringIO
import tempfile
from unittest import TestCase
from only_for_platform import get_platform, not_for_platform, only_for_platform, os_distro_value, PLATFORM_LINUX, PLATFORM_WINDOWS
if get_platform() != PLATFORM_WINDOWS:
from pwd import getpwnam
# We have to use this import HACK because the filename contains a dash
with patch("platform.linux_distribution", return_value = os_distro_value):
with patch("os.symlink"):
with patch("__builtin__.open"):
with patch("glob.glob", return_value = ['/etc/init.d/postgresql-9.3']):
_ambari_server_ = __import__('ambari-server')
from ambari_commons.firewall import Firewall
from ambari_commons.os_check import OSCheck, OSConst
from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
from ambari_commons.exceptions import FatalException, NonFatalException
from ambari_commons.logging_utils import get_verbose, set_verbose, get_silent, set_silent, get_debug_mode, \
print_info_msg, print_warning_msg, print_error_msg
from ambari_commons.os_utils import run_os_command, search_file, set_file_permissions, remove_file, copy_file, \
is_valid_filepath
from ambari_server.dbConfiguration import DBMSConfigFactory, check_jdbc_drivers
from ambari_server.dbConfiguration_linux import PGConfig, LinuxDBMSConfig, OracleConfig
from ambari_server.properties import Properties
from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
from ambari_server.serverConfiguration import configDefaults, \
check_database_name_property, OS_FAMILY_PROPERTY, \
find_properties_file, get_ambari_classpath, get_ambari_jars, get_ambari_properties, get_JAVA_HOME, get_share_jars, \
parse_properties_file, read_ambari_user, update_ambari_properties, update_properties_2, write_property, find_jdk, \
AMBARI_CONF_VAR, AMBARI_SERVER_LIB, JDBC_DATABASE_PROPERTY, JDBC_RCA_PASSWORD_FILE_PROPERTY, \
PERSISTENCE_TYPE_PROPERTY, JDBC_URL_PROPERTY, get_conf_dir, JDBC_USER_NAME_PROPERTY, JDBC_PASSWORD_PROPERTY, \
JDBC_DATABASE_NAME_PROPERTY, OS_TYPE_PROPERTY, validate_jdk, JDBC_POSTGRES_SCHEMA_PROPERTY, \
RESOURCES_DIR_PROPERTY, JDBC_RCA_PASSWORD_ALIAS, JDBC_RCA_SCHEMA_PROPERTY, IS_LDAP_CONFIGURED, \
SSL_API, SSL_API_PORT, CLIENT_API_PORT_PROPERTY,\
JDBC_CONNECTION_POOL_TYPE, JDBC_CONNECTION_POOL_ACQUISITION_SIZE, \
JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL, JDBC_CONNECTION_POOL_MAX_AGE, JDBC_CONNECTION_POOL_MAX_IDLE_TIME, \
JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS,\
LDAP_MGR_PASSWORD_PROPERTY, LDAP_MGR_PASSWORD_ALIAS, JDBC_PASSWORD_FILENAME, NR_USER_PROPERTY, SECURITY_KEY_IS_PERSISTED, \
SSL_TRUSTSTORE_PASSWORD_PROPERTY, SECURITY_IS_ENCRYPTION_ENABLED, SSL_TRUSTSTORE_PASSWORD_ALIAS, \
SECURITY_MASTER_KEY_LOCATION, SECURITY_KEYS_DIR, LDAP_PRIMARY_URL_PROPERTY, store_password_file, \
get_pass_file_path, GET_FQDN_SERVICE_URL, JDBC_USE_INTEGRATED_AUTH_PROPERTY, SECURITY_KEY_ENV_VAR_NAME, \
JAVA_HOME_PROPERTY, JDK_NAME_PROPERTY, JCE_NAME_PROPERTY
from ambari_server.serverUtils import is_server_runing, refresh_stack_hash
from ambari_server.serverSetup import check_selinux, check_ambari_user, proceedJDBCProperties, SE_STATUS_DISABLED, SE_MODE_ENFORCING, configure_os_settings, \
download_and_install_jdk, prompt_db_properties, setup, \
AmbariUserChecks, AmbariUserChecksLinux, AmbariUserChecksWindows, JDKSetup, reset, setup_jce_policy, expand_jce_zip_file
from ambari_server.serverUpgrade import upgrade, upgrade_local_repo, change_objects_owner, upgrade_stack, \
run_stack_upgrade, run_metainfo_upgrade, run_schema_upgrade, move_user_custom_actions
from ambari_server.setupHttps import is_valid_https_port, setup_https, import_cert_and_key_action, get_fqdn, \
generate_random_string, get_cert_info, COMMON_NAME_ATTR, is_valid_cert_exp, NOT_AFTER_ATTR, NOT_BEFORE_ATTR, \
SSL_DATE_FORMAT, import_cert_and_key, is_valid_cert_host, setup_truststore, \
SRVR_ONE_WAY_SSL_PORT_PROPERTY, SRVR_TWO_WAY_SSL_PORT_PROPERTY, GANGLIA_HTTPS
from ambari_server.setupSecurity import adjust_directory_permissions, get_alias_string, get_ldap_event_spec_names, sync_ldap, LdapSyncOptions, \
configure_ldap_password, setup_ldap, REGEX_HOSTNAME_PORT, REGEX_TRUE_FALSE, REGEX_ANYTHING, setup_master_key, \
setup_ambari_krb5_jaas, ensure_can_start_under_current_user, generate_env
from ambari_server.userInput import get_YN_input, get_choice_string_input, get_validated_string_input, \
read_password
from ambari_server_main import get_ulimit_open_files, ULIMIT_OPEN_FILES_KEY, ULIMIT_OPEN_FILES_DEFAULT
CURR_AMBARI_VERSION = "2.0.0"
@patch("ambari_server.dbConfiguration_linux.get_postgre_hba_dir", new = MagicMock(return_value = "/var/lib/pgsql/data"))
@patch("ambari_server.dbConfiguration_linux.get_postgre_running_status", new = MagicMock(return_value = "running"))
class TestAmbariServer(TestCase):
def setUp(self):
out = StringIO.StringIO()
sys.stdout = out
def tearDown(self):
sys.stdout = sys.__stdout__
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.dbConfiguration_linux.run_os_command")
def test_configure_pg_hba_ambaridb_users(self, run_os_command_method):
# Prepare mocks
run_os_command_method.return_value = (0, "", "")
database_username = "ffdf"
tf1 = tempfile.NamedTemporaryFile()
# Run test
PGConfig._configure_pg_hba_ambaridb_users(tf1.name, database_username)
# Check results
self.assertTrue(run_os_command_method.called)
string_expected = self.get_file_string(self.get_samples_dir("configure_pg_hba_ambaridb_users1"))
string_actual = self.get_file_string(tf1.name)
self.assertEquals(string_expected, string_actual)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("__builtin__.raw_input")
def test_servicename_regex(self, raw_input_method):
''' Test to make sure the service name can contain digits '''
set_silent(False)
raw_input_method.return_value = "OT100"
result = OracleConfig._get_validated_service_name("ambari", 1)
self.assertEqual("OT100", result, "Not accepting digits")
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("__builtin__.raw_input")
def test_dbname_regex(self, raw_input_method):
''' Test to make sure the service name can contain digits '''
set_silent(False)
raw_input_method.return_value = "OT100"
result = LinuxDBMSConfig._get_validated_db_name("Database", "ambari")
self.assertEqual("OT100", result, "Not accepting digits")
pass
@not_for_platform(PLATFORM_WINDOWS)
def test_configure_pg_hba_postgres_user(self):
tf1 = tempfile.NamedTemporaryFile()
PGConfig.PG_HBA_CONF_FILE = tf1.name
with open(PGConfig.PG_HBA_CONF_FILE, 'w') as fout:
fout.write("\n")
fout.write("local all all md5\n")
fout.write("host all all 0.0.0.0/0 md5\n")
fout.write("host all all ::/0 md5\n")
PGConfig._configure_pg_hba_postgres_user()
expected = self.get_file_string(self.get_samples_dir(
"configure_pg_hba_ambaridb_users2"))
result = self.get_file_string(PGConfig.PG_HBA_CONF_FILE)
self.assertEqual(expected, result, "pg_hba_conf not processed")
mode = oct(os.stat(PGConfig.PG_HBA_CONF_FILE)[stat.ST_MODE])
str_mode = str(mode)[-4:]
self.assertEqual("0644", str_mode, "Wrong file permissions")
pass
@patch("__builtin__.raw_input")
def test_get_choice_string_input(self, raw_input_method):
prompt = "blablabla"
default = "default blablabla"
firstChoice = set(['yes', 'ye', 'y'])
secondChoice = set(['no', 'n'])
# test first input
raw_input_method.return_value = "Y"
result = get_choice_string_input(prompt, default,
firstChoice, secondChoice)
self.assertEquals(result, True)
raw_input_method.reset_mock()
# test second input
raw_input_method.return_value = "N"
result = get_choice_string_input(prompt, default,
firstChoice, secondChoice)
self.assertEquals(result, False)
raw_input_method.reset_mock()
# test enter pressed
raw_input_method.return_value = ""
result = get_choice_string_input(prompt, default,
firstChoice, secondChoice)
self.assertEquals(result, default)
raw_input_method.reset_mock()
# test wrong input
list_of_return_values = ['yes', 'dsad', 'fdsfds']
def side_effect(list):
return list_of_return_values.pop()
raw_input_method.side_effect = side_effect
result = get_choice_string_input(prompt, default,
firstChoice, secondChoice)
self.assertEquals(result, True)
self.assertEquals(raw_input_method.call_count, 3)
pass
@patch("re.search")
@patch("__builtin__.raw_input")
@patch("getpass.getpass")
def test_get_validated_string_input(self, get_pass_method,
raw_input_method, re_search_method):
prompt = "blabla"
default = "default_pass"
pattern = "pattern_pp"
description = "blabla2"
# check password input
self.assertFalse(False, get_silent())
is_pass = True
get_pass_method.return_value = "dfdsfdsfds"
result = get_validated_string_input(prompt, default,
pattern, description, is_pass)
self.assertEquals(get_pass_method.return_value, result)
get_pass_method.assure_called_once(prompt)
self.assertFalse(raw_input_method.called)
# check raw input
get_pass_method.reset_mock()
raw_input_method.reset_mock()
is_pass = False
raw_input_method.return_value = "dkf90ewuf0"
result = get_validated_string_input(prompt, default,
pattern, description, is_pass)
self.assertEquals(raw_input_method.return_value, result)
self.assertFalse(get_pass_method.called)
raw_input_method.assure_called_once(prompt)
pass
@not_for_platform(PLATFORM_WINDOWS)
def test_get_pass_file_path(self):
result = get_pass_file_path("/etc/ambari/conf_file", JDBC_PASSWORD_FILENAME)
self.assertEquals("/etc/ambari/password.dat", result)
pass
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup_security")
@patch("optparse.OptionParser")
def test_main_test_setup_security(self, OptionParserMock,
setup_security_method):
opm = OptionParserMock.return_value
options = MagicMock()
args = ["setup-security"]
opm.parse_args.return_value = (options, args)
options.dbms = None
options.sid_or_sname = "sid"
setup_security_method.return_value = None
_ambari_server_.mainBody()
_ambari_server_.mainBody()
self.assertTrue(setup_security_method.called)
self.assertFalse(False, get_verbose())
self.assertFalse(False, get_silent())
pass
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup_ambari_krb5_jaas")
@patch.object(_ambari_server_, "setup_master_key")
@patch.object(_ambari_server_, "setup_truststore")
@patch.object(_ambari_server_, "setup_https")
@patch.object(_ambari_server_, "get_validated_string_input")
def test_setup_security(self, get_validated_string_input_mock, setup_https_mock,
setup_truststore_mock, setup_master_key_mock,
setup_ambari_krb5_jaas_mock):
args = {}
get_validated_string_input_mock.return_value = '1'
_ambari_server_.setup_security(args)
self.assertTrue(setup_https_mock.called)
get_validated_string_input_mock.return_value = '2'
_ambari_server_.setup_security(args)
self.assertTrue(setup_master_key_mock.called)
get_validated_string_input_mock.return_value = '3'
_ambari_server_.setup_security(args)
self.assertTrue(setup_ambari_krb5_jaas_mock.called)
get_validated_string_input_mock.return_value = '4'
_ambari_server_.setup_security(args)
self.assertTrue(setup_truststore_mock.called)
get_validated_string_input_mock.return_value = '5'
_ambari_server_.setup_security(args)
self.assertTrue(setup_truststore_mock.called)
pass
@patch("re.sub")
@patch("fileinput.FileInput")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("os.path.exists")
def test_setup_ambari_krb5_jaas(self, exists_mock, search_mock,
get_validated_string_input_mock,
fileinput_mock, re_sub_mock):
search_mock.return_value = 'filepath'
exists_mock.return_value = False
# Negative case
try:
setup_ambari_krb5_jaas()
self.fail("Should throw exception")
except NonFatalException as fe:
# Expected
self.assertTrue("No jaas config file found at location" in fe.reason)
pass
# Positive case
exists_mock.reset_mock()
exists_mock.return_value = True
get_validated_string_input_mock.side_effect = ['aaa@aaa.cnn',
'pathtokeytab']
fileinput_mock.return_value = [ 'keyTab=xyz', 'principal=xyz' ]
setup_ambari_krb5_jaas()
self.assertTrue(fileinput_mock.called)
self.assertTrue(re_sub_mock.called)
self.assertTrue(re_sub_mock.call_args_list, [('aaa@aaa.cnn'),
('pathtokeytab')])
pass
@patch("sys.exit")
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch.object(_ambari_server_, "stop")
@patch.object(_ambari_server_, "reset")
@patch("optparse.OptionParser")
def test_main_test_setup(self, OptionParserMock, reset_method, stop_method,
start_method, setup_method, exit_mock):
opm = OptionParserMock.return_value
options = MagicMock()
args = ["setup"]
opm.parse_args.return_value = (options, args)
options.dbms = None
options.sid_or_sname = "sid"
_ambari_server_.mainBody()
self.assertTrue(setup_method.called)
self.assertFalse(start_method.called)
self.assertFalse(stop_method.called)
self.assertFalse(reset_method.called)
self.assertFalse(False, get_verbose())
self.assertFalse(False, get_silent())
setup_method.reset_mock()
start_method.reset_mock()
stop_method.reset_mock()
reset_method.reset_mock()
exit_mock.reset_mock()
args = ["setup", "-v"]
options = MagicMock()
opm.parse_args.return_value = (options, args)
options.dbms = None
options.sid_or_sname = "sid"
setup_method.side_effect = Exception("Unexpected error")
try:
_ambari_server_.mainBody()
except Exception:
self.assertTrue(True)
self.assertTrue(setup_method.called)
self.assertFalse(start_method.called)
self.assertFalse(stop_method.called)
self.assertFalse(reset_method.called)
self.assertTrue(get_verbose())
setup_method.reset_mock()
start_method.reset_mock()
stop_method.reset_mock()
reset_method.reset_mock()
exit_mock.reset_mock()
args = ["setup"]
options = MagicMock()
opm.parse_args.return_value = (options, args)
options.dbms = None
options.sid_or_sname = "sid"
options.verbose = False
setup_method.side_effect = Exception("Unexpected error")
_ambari_server_.mainBody()
self.assertTrue(exit_mock.called)
self.assertTrue(setup_method.called)
self.assertFalse(start_method.called)
self.assertFalse(stop_method.called)
self.assertFalse(reset_method.called)
self.assertFalse(get_verbose())
pass
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch.object(_ambari_server_, "stop")
@patch.object(_ambari_server_, "reset")
@patch("optparse.OptionParser")
def test_main_test_start(self, optionParserMock, reset_method, stop_method,
start_method, setup_method):
opm = optionParserMock.return_value
options = MagicMock()
args = ["setup"]
opm.parse_args.return_value = (options, args)
options.dbms = None
options.sid_or_sname = "sname"
_ambari_server_.mainBody()
self.assertTrue(setup_method.called)
self.assertFalse(start_method.called)
self.assertFalse(stop_method.called)
self.assertFalse(reset_method.called)
self.assertFalse(False, get_verbose())
self.assertFalse(False, get_silent())
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch.object(_ambari_server_, "stop")
@patch.object(_ambari_server_, "reset")
def test_main_test_start_debug_short(self, reset_method, stop_method,
start_method, setup_method):
temp_args = sys.argv
try:
sys.argv = ["ambari-server", "start", "-g"]
_ambari_server_.mainBody()
self.assertFalse(setup_method.called)
self.assertTrue(start_method.called)
self.assertFalse(stop_method.called)
self.assertFalse(reset_method.called)
self.assertTrue(get_debug_mode())
finally:
sys.argv = temp_args
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch.object(_ambari_server_, "stop")
@patch.object(_ambari_server_, "reset")
def test_main_test_start_debug_short(self, reset_method, stop_method,
start_method, setup_method):
temp_args = sys.argv
try:
sys.argv = ["ambari-server", "pstart", "-g"]
_ambari_server_.mainBody()
self.assertFalse(setup_method.called)
self.assertTrue(start_method.called)
self.assertFalse(stop_method.called)
self.assertFalse(reset_method.called)
self.assertTrue(get_debug_mode())
finally:
sys.argv = temp_args
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch.object(_ambari_server_, "stop")
@patch.object(_ambari_server_, "reset")
def test_main_test_start_debug_long(self, reset_method, stop_method,
start_method, setup_method):
temp_args = sys.argv
try:
sys.argv = ["ambari-server", "start", "--debug"]
_ambari_server_.mainBody()
self.assertFalse(setup_method.called)
self.assertTrue(start_method.called)
self.assertFalse(stop_method.called)
self.assertFalse(reset_method.called)
self.assertTrue(get_debug_mode())
finally:
sys.argv = temp_args
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch.object(_ambari_server_, "stop")
@patch.object(_ambari_server_, "reset")
def test_main_test_start_debug_long(self, reset_method, stop_method,
start_method, setup_method):
temp_args = sys.argv
try:
sys.argv = ["ambari-server", "pstart", "--debug"]
_ambari_server_.mainBody()
self.assertFalse(setup_method.called)
self.assertTrue(start_method.called)
self.assertFalse(stop_method.called)
self.assertFalse(reset_method.called)
self.assertTrue(get_debug_mode())
finally:
sys.argv = temp_args
pass
#Backup is not yet supported on Windows
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch.object(_ambari_server_, "stop")
@patch.object(_ambari_server_, "reset")
@patch.object(_ambari_server_, "backup")
@patch.object(_ambari_server_, "restore")
@patch("optparse.OptionParser")
def test_main_test_backup(self, optionParserMock, restore_mock, backup_mock, reset_method, stop_method,
start_method, setup_method):
opm = optionParserMock.return_value
options = MagicMock()
args = ["backup"]
opm.parse_args.return_value = (options, args)
options.dbms = None
options.sid_or_sname = "sname"
_ambari_server_.mainBody()
self.assertTrue(backup_mock.called)
self.assertFalse(restore_mock.called)
self.assertFalse(setup_method.called)
self.assertFalse(start_method.called)
self.assertFalse(stop_method.called)
self.assertFalse(reset_method.called)
self.assertFalse(False, get_verbose())
self.assertFalse(False, get_silent())
pass
#Restore is not yet supported on Windows
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch.object(_ambari_server_, "stop")
@patch.object(_ambari_server_, "reset")
@patch.object(_ambari_server_, "backup")
@patch.object(_ambari_server_, "restore")
@patch("optparse.OptionParser")
def test_main_test_restore(self, optionParserMock, restore_mock, backup_mock, reset_method, stop_method,
start_method, setup_method):
opm = optionParserMock.return_value
options = MagicMock()
args = ["restore"]
opm.parse_args.return_value = (options, args)
options.dbms = None
options.sid_or_sname = "sname"
_ambari_server_.mainBody()
self.assertTrue(restore_mock.called)
self.assertFalse(backup_mock.called)
self.assertFalse(setup_method.called)
self.assertFalse(start_method.called)
self.assertFalse(stop_method.called)
self.assertFalse(reset_method.called)
self.assertFalse(False, get_verbose())
self.assertFalse(False, get_silent())
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch.object(_ambari_server_, "is_server_runing")
@patch.object(_ambari_server_, "reset")
@patch("optparse.OptionParser")
def test_main_test_stop(self, optionParserMock, reset_method, is_server_runing_method,
start_method, setup_method):
opm = optionParserMock.return_value
options = MagicMock()
del options.exit_message
args = ["stop"]
opm.parse_args.return_value = (options, args)
is_server_runing_method.return_value = (False, None)
options.dbms = None
options.sid_or_sname = "sid"
_ambari_server_.mainBody()
self.assertFalse(setup_method.called)
self.assertFalse(start_method.called)
self.assertTrue(is_server_runing_method.called)
self.assertFalse(reset_method.called)
self.assertFalse(False, get_verbose())
self.assertFalse(False, get_silent())
self.assertTrue(options.exit_message is None)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch("os_windows.win32serviceutil.WaitForServiceStatus")
@patch("os_windows.win32serviceutil.StopService")
@patch("os_windows.win32serviceutil.StopServiceWithDeps")
@patch.object(_ambari_server_, "reset")
def test_main_test_stop(self, reset_method, service_stop_w_deps_method,
service_stop_method, service_status_wait_method,
start_method, setup_method):
temp_args = sys.argv
try:
sys.argv = ["ambari-server", "stop"]
_ambari_server_.mainBody()
self.assertFalse(setup_method.called)
self.assertFalse(start_method.called)
self.assertTrue(service_stop_w_deps_method.called)
self.assertTrue(service_status_wait_method.called)
self.assertFalse(reset_method.called)
self.assertFalse(False, get_verbose())
self.assertFalse(False, get_silent())
finally:
sys.argv = temp_args
pass
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
@patch.object(_ambari_server_, "start")
@patch.object(_ambari_server_, "stop")
@patch.object(_ambari_server_, "reset")
@patch("optparse.OptionParser")
def test_main_test_reset(self, optionParserMock, reset_method, stop_method,
start_method, setup_method):
opm = optionParserMock.return_value
options = MagicMock()
args = ["reset"]
opm.parse_args.return_value = (options, args)
options.dbms = None
options.sid_or_sname = "sid"
_ambari_server_.mainBody()
self.assertFalse(setup_method.called)
self.assertFalse(start_method.called)
self.assertFalse(stop_method.called)
self.assertTrue(reset_method.called)
self.assertFalse(False, get_verbose())
self.assertFalse(False, get_silent())
pass
@not_for_platform(PLATFORM_WINDOWS)
def test_configure_postgresql_conf(self):
tf1 = tempfile.NamedTemporaryFile()
PGConfig.POSTGRESQL_CONF_FILE = tf1.name
with open(PGConfig.POSTGRESQL_CONF_FILE, 'w') as f:
f.write("#listen_addresses = '127.0.0.1' #\n")
f.write("#listen_addresses = '127.0.0.1'")
PGConfig._configure_postgresql_conf()
expected = self.get_file_string(self.get_samples_dir(
"configure_postgresql_conf1"))
result = self.get_file_string(PGConfig.POSTGRESQL_CONF_FILE)
self.assertEqual(expected, result, "postgresql.conf not updated")
mode = oct(os.stat(PGConfig.POSTGRESQL_CONF_FILE)[stat.ST_MODE])
str_mode = str(mode)[-4:]
self.assertEqual("0644", str_mode, "Wrong file permissions")
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(PGConfig, "_restart_postgres")
@patch.object(PGConfig, "_get_postgre_status")
@patch.object(PGConfig, "_configure_postgresql_conf")
@patch("ambari_server.dbConfiguration_linux.run_os_command")
def test_configure_postgres(self,
run_os_command_mock,
configure_postgresql_conf_mock,
get_postgre_status_mock,
restart_postgres_mock):
args = MagicMock()
properties = Properties()
args.database_index = 0
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.silent
factory = DBMSConfigFactory()
dbConfig = factory.create(args, properties)
self.assertTrue(dbConfig.dbms, "postgres")
self.assertTrue(dbConfig.persistence_type, "local")
tf1 = tempfile.NamedTemporaryFile()
tf2 = tempfile.NamedTemporaryFile()
PGConfig.PG_HBA_CONF_FILE = tf1.name
PGConfig.PG_HBA_CONF_FILE_BACKUP = tf2.name
out = StringIO.StringIO()
sys.stdout = out
retcode, out1, err = dbConfig._configure_postgres()
sys.stdout = sys.__stdout__
self.assertEqual(0, retcode)
self.assertEqual("Backup for pg_hba found, reconfiguration not required\n",
out.getvalue())
tf2.close()
get_postgre_status_mock.return_value = PGConfig.PG_STATUS_RUNNING, 0, "", ""
run_os_command_mock.return_value = 0, "", ""
restart_postgres_mock.return_value = 0, "", ""
rcode, out, err = dbConfig._configure_postgres()
self.assertTrue(os.path.isfile(PGConfig.PG_HBA_CONF_FILE_BACKUP),
"postgresql.conf backup not created")
self.assertTrue(run_os_command_mock.called)
mode = oct(os.stat(PGConfig.PG_HBA_CONF_FILE)[stat.ST_MODE])
str_mode = str(mode)[-4:]
self.assertEqual("0644", str_mode, "Wrong file permissions")
self.assertTrue(configure_postgresql_conf_mock.called)
self.assertEqual(0, rcode)
os.unlink(PGConfig.PG_HBA_CONF_FILE_BACKUP)
get_postgre_status_mock.return_value = "stopped", 0, "", ""
rcode, out, err = dbConfig._configure_postgres()
self.assertEqual(0, rcode)
os.unlink(PGConfig.PG_HBA_CONF_FILE_BACKUP)
sys.stdout = sys.__stdout__
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("time.sleep")
@patch("subprocess.Popen")
@patch("ambari_server.dbConfiguration_linux.run_os_command")
@patch.object(PGConfig, "_get_postgre_status")
@patch("ambari_server.dbConfiguration_linux.print_info_msg")
def test_restart_postgres(self, printInfoMsg_mock, get_postgre_status_mock,
run_os_command_mock, popenMock, sleepMock):
p = MagicMock()
p.poll.return_value = 0
popenMock.return_value = p
retcode, out, err = PGConfig._restart_postgres()
self.assertEqual(0, retcode)
p.poll.return_value = None
get_postgre_status_mock.return_value = "stopped", 0, "", ""
run_os_command_mock.return_value = (1, None, None)
retcode, out, err = PGConfig._restart_postgres()
self.assertEqual(1, retcode)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("shlex.split")
@patch("subprocess.Popen")
@patch("ambari_commons.os_linux.print_info_msg")
def test_run_os_command(self, printInfoMsg_mock, popenMock, splitMock):
p = MagicMock()
p.communicate.return_value = (None, None)
p.returncode = 3
popenMock.return_value = p
# with list arg
cmd = ["exec", "arg"]
run_os_command(cmd)
self.assertFalse(splitMock.called)
# with str arg
resp = run_os_command("runme")
self.assertEqual(3, resp[0])
self.assertTrue(splitMock.called)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch("shlex.split")
@patch("subprocess.Popen")
@patch("ambari_commons.os_windows.print_info_msg")
def test_run_os_command(self, printInfoMsg_mock, popenMock, splitMock):
p = MagicMock()
p.communicate.return_value = (None, None)
p.returncode = 3
popenMock.return_value = p
# with list arg
cmd = ["exec", "arg"]
run_os_command(cmd)
self.assertFalse(splitMock.called)
# with str arg
resp = run_os_command("runme")
self.assertEqual(3, resp[0])
self.assertTrue(splitMock.called)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.get_conf_dir")
@patch("ambari_server.serverConfiguration.search_file")
def test_write_property(self, search_file_mock, get_conf_dir_mock):
expected_content = "key1=val1\n"
tf1 = tempfile.NamedTemporaryFile()
search_file_mock.return_value = tf1.name
write_property("key1", "val1")
result = tf1.read()
self.assertTrue(expected_content in result)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.get_conf_dir")
@patch("ambari_server.serverConfiguration.search_file")
def test_write_property(self, search_file_mock, get_conf_dir_mock):
expected_content = "key1=val1\n"
tf1 = tempfile.NamedTemporaryFile("r+b", delete=False)
search_file_mock.return_value = tf1.name
tf1.close()
write_property("key1", "val1")
hf1 = open(tf1.name, "r")
try:
result = hf1.read()
self.assertTrue(expected_content in result)
finally:
hf1.close()
os.unlink(tf1.name)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.dbConfiguration.decrypt_password_for_alias")
@patch("ambari_server.dbConfiguration_linux.run_os_command")
def test_setup_db(self, run_os_command_mock,
decrypt_password_for_alias_mock):
args = MagicMock()
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
properties = Properties()
properties.process_pair(JDBC_PASSWORD_PROPERTY, get_alias_string("mypwdalias"))
decrypt_password_for_alias_mock.return_value = "password"
dbms = PGConfig(args, properties, "local")
self.assertTrue(decrypt_password_for_alias_mock.called)
run_os_command_mock.return_value = (0, None, None)
result = dbms._setup_db()
self.assertTrue(run_os_command_mock.called)
self.assertEqual((0, None, None), result)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.dbConfiguration.decrypt_password_for_alias")
@patch("time.sleep")
@patch("ambari_server.dbConfiguration_linux.run_os_command")
def test_setup_db_connect_attempts_fail(self, run_os_command_mock,
sleep_mock, decrypt_password_for_alias_mock):
args = MagicMock()
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
properties = Properties()
decrypt_password_for_alias_mock.return_value = "password"
dbms = PGConfig(args, properties, "local")
run_os_command_mock.side_effect = [(1, "error", "error"), (1, "error", "error"),
(1, "error", "error")]
result = dbms._setup_db()
self.assertTrue(run_os_command_mock.called)
self.assertEqual((1, 'error', 'error') , result)
self.assertEqual(2, sleep_mock.call_count)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.dbConfiguration.decrypt_password_for_alias")
@patch("time.sleep")
@patch("ambari_server.dbConfiguration_linux.run_os_command")
def test_setup_db_connect_attempts_success(self, run_os_command_mock,
sleep_mock, decrypt_password_for_alias_mock):
args = MagicMock()
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
properties = Properties()
decrypt_password_for_alias_mock.return_value = "password"
dbms = PGConfig(args, properties, "local")
run_os_command_mock.side_effect = [(1, "error", "error"), (0, None, None),
(0, None, None)]
result = dbms._setup_db()
self.assertTrue(run_os_command_mock.called)
self.assertEqual((0, None, None) , result)
self.assertEqual(1, sleep_mock.call_count)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.serverSetup.run_os_command")
def test_check_selinux(self, run_os_command_mock, getYNInput_mock):
run_os_command_mock.return_value = (0, SE_STATUS_DISABLED,
None)
rcode = check_selinux()
self.assertEqual(0, rcode)
getYNInput_mock.return_value = True
run_os_command_mock.return_value = (0, "enabled "
+ SE_MODE_ENFORCING,
None)
rcode = check_selinux()
self.assertEqual(0, rcode)
self.assertTrue(run_os_command_mock.called)
self.assertTrue(getYNInput_mock.called)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.print_info_msg")
def test_get_ambari_jars(self, printInfoMsg_mock):
env = "/ambari/jars"
os.environ[AMBARI_SERVER_LIB] = env
result = get_ambari_jars()
self.assertEqual(env, result)
del os.environ[AMBARI_SERVER_LIB]
result = get_ambari_jars()
self.assertEqual("/usr/lib/ambari-server", result)
self.assertTrue(printInfoMsg_mock.called)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.print_info_msg")
def test_get_ambari_jars(self, printInfoMsg_mock):
env = "\\ambari\\jars"
os.environ[AMBARI_SERVER_LIB] = env
result = get_ambari_jars()
self.assertEqual(env, result)
del os.environ[AMBARI_SERVER_LIB]
result = get_ambari_jars()
self.assertEqual("lib", result)
self.assertTrue(printInfoMsg_mock.called)
pass
@patch("glob.glob")
@patch("ambari_server.serverConfiguration.print_info_msg")
def test_get_share_jars(self, printInfoMsg_mock, globMock):
globMock.return_value = ["one", "two"]
expected = "one" + os.pathsep + "two" + os.pathsep + \
"one" + os.pathsep + "two" + os.pathsep + \
"one" + os.pathsep + "two"
result = get_share_jars()
self.assertEqual(expected, result)
globMock.return_value = []
expected = ""
result = get_share_jars()
self.assertEqual(expected, result)
pass
@patch("glob.glob")
@patch("ambari_server.serverConfiguration.print_info_msg")
@patch("ambari_server.serverConfiguration.get_ambari_properties")
def test_get_ambari_classpath(self, get_ambari_properties_mock, printInfoMsg_mock, globMock):
globMock.return_value = ["one"]
result = get_ambari_classpath()
self.assertTrue(get_ambari_jars() in result)
self.assertTrue(get_share_jars() in result)
globMock.return_value = []
result = get_ambari_classpath()
self.assertTrue(get_ambari_jars() in result)
self.assertFalse(":" in result[2:])
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.print_info_msg")
def test_get_conf_dir(self, printInfoMsg_mock):
env = "/dummy/ambari/conf"
os.environ[AMBARI_CONF_VAR] = env
result = get_conf_dir()
self.assertEqual(env, result)
del os.environ[AMBARI_CONF_VAR]
result = get_conf_dir()
self.assertEqual("/etc/ambari-server/conf", result)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.print_info_msg")
def test_get_conf_dir(self, printInfoMsg_mock):
env = "\\dummy\\ambari\\conf"
os.environ[AMBARI_CONF_VAR] = env
result = get_conf_dir()
self.assertEqual(env, result)
del os.environ[AMBARI_CONF_VAR]
result = get_conf_dir()
self.assertEqual("conf", result)
pass
def test_search_file(self):
path = os.path.dirname(__file__)
result = search_file(__file__, path)
expected = os.path.abspath(__file__)
self.assertEqual(expected, result)
result = search_file("non_existent_file", path)
self.assertEqual(None, result)
pass
@patch("ambari_server.serverConfiguration.search_file")
def test_find_properties_file(self, search_file_mock):
# Testing case when file is not found
search_file_mock.return_value = None
try:
find_properties_file()
self.fail("File not found'")
except FatalException:
# Expected
pass
self.assertTrue(search_file_mock.called)
# Testing case when file is found
value = MagicMock()
search_file_mock.return_value = value
result = find_properties_file()
self.assertTrue(result is value)
pass
@patch("ambari_server.serverConfiguration.get_ambari_properties")
@patch("ambari_server.serverConfiguration.Properties")
def test_read_ambari_user(self, properties_mock, get_ambari_properties_mock):
# Testing with defined user
properties_mock.__getitem__.return_value = "dummy_user"
get_ambari_properties_mock.return_value = properties_mock
user = read_ambari_user()
self.assertEquals(user, "dummy_user")
# Testing with undefined user
properties_mock.__getitem__.return_value = None
user = read_ambari_user()
self.assertEquals(user, None)
pass
@patch("ambari_server.setupSecurity.get_file_owner")
@patch("ambari_server.setupSecurity.get_ambari_repo_file_full_name")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.set_file_permissions")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.get_resources_location")
@patch("ambari_server.setupSecurity.get_value_from_properties")
@patch("os.mkdir")
@patch("shutil.rmtree")
@patch("ambari_commons.os_utils.print_info_msg")
@patch("ambari_server.setupSecurity.change_owner")
def test_adjust_directory_permissions(self, change_owner_mock, print_info_msg_mock, rmtree_mock, mkdir_mock,
get_value_from_properties_mock, get_resources_location_mock,
get_ambari_properties_mock, set_file_permissions_mock, exists_mock,
get_ambari_repo_file_full_name_mock, get_file_owner_mock):
# Testing boostrap dir wipe
properties_mock = Properties()
properties_mock.process_pair(JDK_NAME_PROPERTY, "dummy_jdk")
properties_mock.process_pair(JCE_NAME_PROPERTY, "dummy_jce")
properties_mock.process_pair(JAVA_HOME_PROPERTY, "dummy_java_home")
get_ambari_properties_mock.return_value = properties_mock
get_value_from_properties_mock.return_value = "dummy_bootstrap_dir"
get_resources_location_mock.return_value = "dummy_resources_dir"
exists_mock.return_value = False
adjust_directory_permissions("user")
self.assertEquals(rmtree_mock.call_args_list[0][0][0], os.path.join(os.getcwd(), "dummy_bootstrap_dir"))
self.assertTrue(mkdir_mock.called)
set_file_permissions_mock.reset_mock()
change_owner_mock.reset_mock()
# Test recursive calls
old_adjust_owner_list = configDefaults.NR_ADJUST_OWNERSHIP_LIST
old_change_owner_list = configDefaults.NR_CHANGE_OWNERSHIP_LIST
try:
configDefaults.NR_ADJUST_OWNERSHIP_LIST = [
( "/etc/ambari-server/conf", "755", "{0}", True ),
( "/etc/ambari-server/conf/ambari.properties", "644", "{0}", False )
]
configDefaults.NR_CHANGE_OWNERSHIP_LIST = [
( "/etc/ambari-server", "{0}", True )
]
adjust_directory_permissions("user")
self.assertTrue(len(set_file_permissions_mock.call_args_list) ==
len(configDefaults.NR_ADJUST_OWNERSHIP_LIST))
self.assertEquals(set_file_permissions_mock.call_args_list[0][0][3], True)
self.assertEquals(set_file_permissions_mock.call_args_list[1][0][3], False)
self.assertTrue(len(change_owner_mock.call_args_list) ==
len(configDefaults.NR_CHANGE_OWNERSHIP_LIST))
self.assertEquals(change_owner_mock.call_args_list[0][0][2], True)
finally:
configDefaults.NR_ADJUST_OWNERSHIP_LIST = old_adjust_owner_list
configDefaults.NR_CHANGE_OWNERSHIP_LIST = old_change_owner_list
pass
#
# Test ambari repo file permission change call
#
# Reset the set_file_permissions() mock function
set_file_permissions_mock.reset_mock()
# Save the existing permissions list
old_adjust_owner_list = configDefaults.NR_ADJUST_OWNERSHIP_LIST
# Set up the mock function for os_utils.get_ambari_repo_file_full_name()
get_ambari_repo_file_full_name_mock.return_value = "ambari.dummy.repo"
# Set up the mock function for os_utils.get_file_owner()
get_file_owner_mock.return_value = "dummy.root"
try:
# Clear the list of files whose permissions are to be changed
configDefaults.NR_ADJUST_OWNERSHIP_LIST = [
]
# Call the function to be tested.
adjust_directory_permissions("dummy_user")
# Assert that set_file_permissions() was called
self.assertTrue(set_file_permissions_mock.called)
# One of the entries in NR_ADJUST_OWNERSHIP_LIST should be the full path to the ambari repo file.
# These are the expected values:
ambari_repo_file_entry = (
get_ambari_repo_file_full_name_mock(),
'644',
get_file_owner_mock(),
False
)
# Assert the arguments to the call set_file_permissions() - got from NR_ADJUST_OWNERSHIP_LIST
# Flag to ensure we found our entry in the set_file_permissions() call
entry_found = False
for args_entry in set_file_permissions_mock.call_args_list:
if args_entry[0][0] == ambari_repo_file_entry[0]: # File name
# ambari repo file name matched; assert the rest of the entries
self.assertEquals(args_entry[0][1], ambari_repo_file_entry[1]) # Permissions
self.assertEquals(args_entry[0][2], ambari_repo_file_entry[2]) # File owner
self.assertEquals(args_entry[0][3], ambari_repo_file_entry[3]) # Non-recursive
entry_found = True
break
# Ensure that the ambari repo file entry was found
self.assertTrue(entry_found)
finally:
# Restore the permissions list
configDefaults.NR_ADJUST_OWNERSHIP_LIST = old_adjust_owner_list
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("os.path.exists")
@patch("ambari_commons.os_linux.os_run_os_command")
@patch("ambari_commons.os_linux.print_warning_msg")
@patch("ambari_commons.os_utils.print_info_msg")
def test_set_file_permissions(self, print_info_msg_mock, print_warning_msg_mock,
run_os_command_mock, exists_mock):
# Testing not existent file scenario
exists_mock.return_value = False
set_file_permissions("dummy-file", "dummy-mod",
"dummy-user", False)
self.assertFalse(run_os_command_mock.called)
self.assertTrue(print_info_msg_mock.called)
run_os_command_mock.reset_mock()
print_warning_msg_mock.reset_mock()
# Testing OK scenario
exists_mock.return_value = True
run_os_command_mock.side_effect = [(0, "", ""), (0, "", "")]
set_file_permissions("dummy-file", "dummy-mod",
"dummy-user", False)
self.assertTrue(len(run_os_command_mock.call_args_list) == 2)
self.assertFalse(print_warning_msg_mock.called)
run_os_command_mock.reset_mock()
print_warning_msg_mock.reset_mock()
# Testing first command fail
run_os_command_mock.side_effect = [(1, "", ""), (0, "", "")]
set_file_permissions("dummy-file", "dummy-mod",
"dummy-user", False)
self.assertTrue(len(run_os_command_mock.call_args_list) == 2)
self.assertTrue(print_warning_msg_mock.called)
run_os_command_mock.reset_mock()
print_warning_msg_mock.reset_mock()
# Testing second command fail
run_os_command_mock.side_effect = [(0, "", ""), (1, "", "")]
set_file_permissions("dummy-file", "dummy-mod",
"dummy-user", False)
self.assertTrue(len(run_os_command_mock.call_args_list) == 2)
self.assertTrue(print_warning_msg_mock.called)
run_os_command_mock.reset_mock()
print_warning_msg_mock.reset_mock()
# Testing recursive operation
exists_mock.return_value = True
run_os_command_mock.side_effect = [(0, "", ""), (0, "", "")]
set_file_permissions("dummy-file", "dummy-mod",
"dummy-user", True)
self.assertTrue(len(run_os_command_mock.call_args_list) == 2)
self.assertTrue("-R" in run_os_command_mock.call_args_list[0][0][0])
self.assertTrue("-R" in run_os_command_mock.call_args_list[1][0][0])
self.assertFalse(print_warning_msg_mock.called)
run_os_command_mock.reset_mock()
print_warning_msg_mock.reset_mock()
# Testing non-recursive operation
exists_mock.return_value = True
run_os_command_mock.side_effect = [(0, "", ""), (0, "", "")]
set_file_permissions("dummy-file", "dummy-mod",
"dummy-user", False)
self.assertTrue(len(run_os_command_mock.call_args_list) == 2)
self.assertFalse("-R" in run_os_command_mock.call_args_list[0][0][0])
self.assertFalse("-R" in run_os_command_mock.call_args_list[1][0][0])
self.assertFalse(print_warning_msg_mock.called)
run_os_command_mock.reset_mock()
print_warning_msg_mock.reset_mock()
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.serverSetup.get_validated_string_input")
@patch("ambari_server.serverSetup.print_info_msg")
@patch("ambari_server.serverSetup.print_warning_msg")
@patch("ambari_server.serverSetup.run_os_command")
def test_create_custom_user(self, run_os_command_mock, print_warning_msg_mock,
print_info_msg_mock, get_validated_string_input_mock):
options = MagicMock()
user = "dummy-user"
get_validated_string_input_mock.return_value = user
userChecks = AmbariUserChecks(options)
# Testing scenario: absent user
run_os_command_mock.side_effect = [(0, "", "")]
result = userChecks._create_custom_user()
self.assertFalse(print_warning_msg_mock.called)
self.assertEquals(result, 0)
self.assertEquals(userChecks.user, user)
print_info_msg_mock.reset_mock()
print_warning_msg_mock.reset_mock()
run_os_command_mock.reset_mock()
# Testing scenario: existing user
run_os_command_mock.side_effect = [(9, "", "")]
result = userChecks._create_custom_user()
self.assertTrue("User dummy-user already exists" in str(print_info_msg_mock.call_args_list[1][0]))
self.assertEquals(result, 0)
self.assertEquals(userChecks.user, user)
print_info_msg_mock.reset_mock()
print_warning_msg_mock.reset_mock()
run_os_command_mock.reset_mock()
# Testing scenario: os command fail
run_os_command_mock.side_effect = [(1, "", "")]
result = userChecks._create_custom_user()
self.assertTrue(print_warning_msg_mock.called)
self.assertEquals(result, 1)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("win32security.LsaAddAccountRights")
@patch("win32security.LookupAccountName")
@patch("win32net.NetUserAdd")
@patch("win32net.NetUserGetInfo")
@patch("win32security.LsaOpenPolicy")
@patch("win32net.NetGetDCName")
@patch("ambari_server.serverSetup.get_validated_string_input")
@patch("ambari_server.serverSetup.print_info_msg")
@patch("ambari_server.serverSetup.print_warning_msg")
def test_create_custom_user(self, print_warning_msg_mock,
print_info_msg_mock, get_validated_string_input_mock,
net_get_dc_name_mock, lsa_open_policy_mock,
net_user_get_info_mock, net_user_add_mock,
lookup_account_name_mock, lsa_add_account_rights_mock):
def _reset_mocks():
get_validated_string_input_mock.reset_mock()
print_info_msg_mock.reset_mock()
print_warning_msg_mock.reset_mock()
net_get_dc_name_mock.reset_mock()
net_user_get_info_mock.reset_mock()
net_user_add_mock.reset_mock()
lookup_account_name_mock.reset_mock()
lsa_add_account_rights_mock.reset_mock()
pass
options = MagicMock()
user = "dummy-user"
get_validated_string_input_mock.return_value = user
userChecks = AmbariUserChecks(options)
# Testing scenario: absent user
def user_not_found(*args, **keywargs):
import pywintypes
raise pywintypes.error(2221)
net_user_get_info_mock.side_effect = user_not_found
result = userChecks._create_custom_user()
self.assertTrue(print_warning_msg_mock.called)
self.assertTrue(net_user_add_mock.called)
self.assertEqual(str(net_user_add_mock.call_args_list[0][0]), str((None, 1, {'comment': 'Ambari user', 'password': 'dummy-user', 'flags': 513, 'name': 'dummy-user', 'priv': 1})))
self.assertEquals(result, 0)
self.assertEquals(userChecks.user, ".\\" + user)
_reset_mocks()
# Testing scenario: existing user
net_user_get_info_mock.side_effect = None
net_user_get_info_mock.return_value = { "name":"dummy_user" }
#lookup_account_name_mock
#lsa_add_account_rights_mock
result = userChecks._create_custom_user()
self.assertTrue("User dummy-user already exists" in print_info_msg_mock.call_args_list[0][0][0])
self.assertEquals(result, 0)
self.assertEquals(userChecks.user, ".\\" + user)
self.assertFalse(net_user_add_mock.called)
_reset_mocks()
# Testing scenario: new domain user
get_validated_string_input_mock.side_effect = ["dummy_domain\\dummy_user", "newpassword"]
net_get_dc_name_mock.return_value = "dummy_dc"
net_user_get_info_mock.side_effect = user_not_found
result = userChecks._create_custom_user()
self.assertTrue(net_get_dc_name_mock.called)
self.assertEqual(str(net_get_dc_name_mock.call_args_list[0][0]), str((None, "dummy_domain")))
self.assertTrue(net_user_add_mock.called)
self.assertEqual(str(net_user_add_mock.call_args_list[0][0]), str(('dummy_dc', 1, {'comment': 'Ambari user', 'password': 'newpassword', 'flags': 513, 'name': 'dummy_user', 'priv': 1})))
self.assertEquals(result, 0)
self.assertEquals(userChecks.user, "dummy_domain\\dummy_user")
_reset_mocks()
# Testing scenario: existing domain user
get_validated_string_input_mock.side_effect = ["dummy_domain\\dummy_user", "newpassword"]
net_user_get_info_mock.side_effect = None
net_user_get_info_mock.return_value = { "name":"dummy_domain\\dummy_user" }
result = userChecks._create_custom_user()
self.assertTrue("User dummy_domain\\dummy_user already exists" in print_info_msg_mock.call_args_list[0][0][0])
self.assertTrue(net_get_dc_name_mock.called)
self.assertEqual(str(net_get_dc_name_mock.call_args_list[0][0]), str((None, "dummy_domain")))
self.assertFalse(net_user_add_mock.called)
self.assertEquals(result, 0)
self.assertEquals(userChecks.user, "dummy_domain\\dummy_user")
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.serverSetup.read_ambari_user")
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.serverSetup.get_validated_string_input")
@patch("ambari_server.serverSetup.adjust_directory_permissions")
@patch("ambari_server.serverSetup.run_os_command")
@patch("ambari_server.serverSetup.print_error_msg")
@patch("ambari_server.serverSetup.print_warning_msg")
@patch("ambari_server.serverSetup.print_info_msg")
def test_check_ambari_user(self, print_info_msg_mock, print_warning_msg_mock, print_error_msg_mock,
run_os_command_mock, adjust_directory_permissions_mock,
get_validated_string_input_mock, get_YN_input_mock, read_ambari_user_mock):
def _reset_mocks():
get_YN_input_mock.reset_mock()
get_validated_string_input_mock.reset_mock()
run_os_command_mock.reset_mock()
adjust_directory_permissions_mock.reset_mock()
pass
options = MagicMock()
run_os_command_mock.return_value = (0, "", "")
# Scenario: user is already defined, user does not want to reconfigure it
read_ambari_user_mock.return_value = "dummy-user"
get_YN_input_mock.return_value = False
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertFalse(get_validated_string_input_mock.called)
self.assertFalse(run_os_command_mock.called)
self.assertTrue(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 0)
_reset_mocks()
# Scenario: user is already defined, but user wants to reconfigure it
read_ambari_user_mock.return_value = "dummy-user"
get_validated_string_input_mock.return_value = "new-dummy-user"
get_YN_input_mock.return_value = True
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertTrue(result[2] == "new-dummy-user")
self.assertTrue(get_validated_string_input_mock.called)
self.assertTrue(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 0)
_reset_mocks()
# Negative scenario: user is already defined, but user wants
# to reconfigure it, user creation failed
read_ambari_user_mock.return_value = "dummy-user"
run_os_command_mock.return_value = (1, "", "")
get_YN_input_mock.return_value = True
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertTrue(get_validated_string_input_mock.called)
self.assertTrue(run_os_command_mock.called)
self.assertFalse(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 1)
_reset_mocks()
# Scenario: user is not defined (setup process)
read_ambari_user_mock.return_value = None
get_YN_input_mock.return_value = True
get_validated_string_input_mock.return_value = "dummy-user"
run_os_command_mock.return_value = (0, "", "")
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertTrue(get_validated_string_input_mock.called)
self.assertTrue(run_os_command_mock.called)
self.assertTrue(result[2] == "dummy-user")
self.assertTrue(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 0)
_reset_mocks()
# Scenario: user is not defined (setup process), user creation failed
read_ambari_user_mock.return_value = None
get_YN_input_mock.return_value = True
run_os_command_mock.return_value = (1, "", "")
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertTrue(get_validated_string_input_mock.called)
self.assertTrue(run_os_command_mock.called)
self.assertFalse(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 1)
_reset_mocks()
# negative scenario: user is not defined (setup process), user creation failed
read_ambari_user_mock.return_value = None
get_YN_input_mock.return_value = True
run_os_command_mock.return_value = (1, "", "")
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertTrue(get_validated_string_input_mock.called)
self.assertTrue(run_os_command_mock.called)
self.assertFalse(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 1)
_reset_mocks()
# Scenario: user is not defined and left to be root
read_ambari_user_mock.return_value = None
get_YN_input_mock.return_value = False
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertFalse(get_validated_string_input_mock.called)
self.assertFalse(run_os_command_mock.called)
self.assertTrue(result[2] == "root")
self.assertTrue(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 0)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_commons.os_windows.UserHelper.add_user_privilege")
@patch("ambari_commons.os_windows.UserHelper.create_user")
@patch("ambari_commons.os_windows.UserHelper.find_user")
@patch("ambari_server.serverSetup.read_ambari_user")
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.serverSetup.get_validated_string_input")
@patch("ambari_server.serverSetup.adjust_directory_permissions")
@patch("ambari_server.serverSetup.run_os_command")
@patch("ambari_server.serverSetup.print_error_msg")
@patch("ambari_server.serverSetup.print_warning_msg")
@patch("ambari_server.serverSetup.print_info_msg")
def test_check_ambari_user(self, print_info_msg_mock, print_warning_msg_mock, print_error_msg_mock,
run_os_command_mock, adjust_directory_permissions_mock,
get_validated_string_input_mock, get_YN_input_mock, read_ambari_user_mock,
find_user_mock, create_user_mock, add_user_privilege_mock):
def _reset_mocks():
get_YN_input_mock.reset_mock()
get_validated_string_input_mock.reset_mock()
find_user_mock.reset_mock()
create_user_mock.reset_mock()
adjust_directory_permissions_mock.reset_mock()
pass
options = MagicMock()
options.svc_user = None
options.svc_password = None
run_os_command_mock.return_value = (0, "", "")
# Scenario: user is already defined, user does not want to reconfigure it
read_ambari_user_mock.return_value = "dummy-user"
get_YN_input_mock.return_value = False
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertFalse(get_validated_string_input_mock.called)
self.assertFalse(find_user_mock.called)
self.assertFalse(create_user_mock.called)
self.assertTrue(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 0)
_reset_mocks()
# Scenario: user is already defined, but user wants to reconfigure it
read_ambari_user_mock.return_value = "dummy-user"
get_validated_string_input_mock.side_effect = ["new-dummy-user", "new_password"]
get_YN_input_mock.return_value = True
find_user_mock.return_value = False
create_user_mock.return_value = (0, "User created")
add_user_privilege_mock.return_value = (0, "Privilege added")
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertEqual(result[2], ".\\new-dummy-user")
self.assertTrue(get_validated_string_input_mock.called)
self.assertTrue(adjust_directory_permissions_mock.called)
self.assertTrue(find_user_mock.called)
self.assertTrue(create_user_mock.called)
self.assertTrue(add_user_privilege_mock.called)
self.assertEqual(result[0], 0)
_reset_mocks()
# Negative scenario: user is already defined, but user wants
# to reconfigure it, user creation failed
read_ambari_user_mock.return_value = "dummy-user"
get_validated_string_input_mock.side_effect = ["new-dummy-user", "new_password"]
find_user_mock.return_value = False
create_user_mock.return_value = (-1, "Failed")
get_YN_input_mock.return_value = True
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertTrue(get_validated_string_input_mock.called)
self.assertTrue(create_user_mock.called)
self.assertFalse(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], -1)
_reset_mocks()
# Scenario: user is not defined (setup process)
read_ambari_user_mock.return_value = None
get_YN_input_mock.return_value = True
get_validated_string_input_mock.side_effect = ["dummy-user", "new_password"]
create_user_mock.return_value = (0, "User created")
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertTrue(get_validated_string_input_mock.called)
self.assertTrue(create_user_mock.called)
self.assertTrue(result[2] == ".\\dummy-user")
self.assertTrue(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 0)
_reset_mocks()
# Scenario: user is not defined, use system account (setup process)
read_ambari_user_mock.return_value = None
get_YN_input_mock.return_value = True
get_validated_string_input_mock.side_effect = ["NT AUTHORITY\\SYSTEM"]
create_user_mock.return_value = (0, "User created")
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertTrue(get_validated_string_input_mock.called)
self.assertEqual(get_validated_string_input_mock.call_count, 1)
self.assertFalse(find_user_mock.called)
self.assertFalse(create_user_mock.called)
self.assertTrue(result[2] == "NT AUTHORITY\\SYSTEM")
self.assertTrue(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 0)
_reset_mocks()
# Scenario: user is not defined (setup process), user creation failed
read_ambari_user_mock.return_value = None
get_YN_input_mock.return_value = True
get_validated_string_input_mock.side_effect = ["new-dummy-user", "new_password"]
find_user_mock.return_value = False
create_user_mock.return_value = (-1, "Failed")
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertTrue(get_validated_string_input_mock.called)
self.assertTrue(create_user_mock.called)
self.assertFalse(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], -1)
_reset_mocks()
# Scenario: user is not defined and left to be the default
read_ambari_user_mock.return_value = None
get_YN_input_mock.return_value = False
result = check_ambari_user(options)
self.assertTrue(get_YN_input_mock.called)
self.assertFalse(get_validated_string_input_mock.called)
self.assertFalse(run_os_command_mock.called)
self.assertTrue(result[2] == "NT AUTHORITY\\SYSTEM")
self.assertTrue(adjust_directory_permissions_mock.called)
self.assertEqual(result[0], 0)
pass
@patch("ambari_server.serverConfiguration.search_file")
@patch("__builtin__.open")
@patch("ambari_server.serverConfiguration.read_ambari_user")
@patch("ambari_server.serverConfiguration.set_file_permissions")
def test_store_password_file(self, set_file_permissions_mock,
read_ambari_user_mock, open_mock, search_file_mock):
search_file_mock.return_value = "/etc/ambari-server/conf/ambari.properties"
open_mock.return_value = MagicMock()
store_password_file("password", "passfile")
self.assertTrue(set_file_permissions_mock.called)
pass
@patch("subprocess.Popen")
@patch.object(OSCheck, "get_os_family")
@patch.object(OSCheck, "get_os_type")
@patch.object(OSCheck, "get_os_major_version")
def test_check_firewall_is_running(self, get_os_major_version_mock, get_os_type_mock, get_os_family_mock, popen_mock):
get_os_major_version_mock.return_value = 18
get_os_type_mock.return_value = OSConst.OS_FEDORA
get_os_family_mock.return_value = OSConst.REDHAT_FAMILY
firewall_obj = Firewall().getFirewallObject()
p = MagicMock()
p.communicate.return_value = ("active", "err")
p.returncode = 0
popen_mock.return_value = p
self.assertEqual("Fedora18FirewallChecks", firewall_obj.__class__.__name__)
self.assertTrue(firewall_obj.check_firewall())
p.communicate.return_value = ("", "err")
p.returncode = 3
self.assertFalse(firewall_obj.check_firewall())
self.assertEqual("err", firewall_obj.stderrdata)
get_os_type_mock.return_value = OSConst.OS_UBUNTU
get_os_family_mock.return_value = OSConst.UBUNTU_FAMILY
firewall_obj = Firewall().getFirewallObject()
p.communicate.return_value = ("Status: active", "err")
p.returncode = 0
self.assertEqual("UbuntuFirewallChecks", firewall_obj.__class__.__name__)
self.assertTrue(firewall_obj.check_firewall())
p.communicate.return_value = ("Status: inactive", "err")
p.returncode = 0
self.assertFalse(firewall_obj.check_firewall())
self.assertEqual("err", firewall_obj.stderrdata)
get_os_type_mock.return_value = ""
get_os_family_mock.return_value = OSConst.SUSE_FAMILY
firewall_obj = Firewall().getFirewallObject()
p.communicate.return_value = ("### iptables", "err")
p.returncode = 0
self.assertEqual("SuseFirewallChecks", firewall_obj.__class__.__name__)
self.assertTrue(firewall_obj.check_firewall())
p.communicate.return_value = ("SuSEfirewall2 not active", "err")
p.returncode = 0
self.assertFalse(firewall_obj.check_firewall())
self.assertEqual("err", firewall_obj.stderrdata)
get_os_type_mock.return_value = ""
get_os_family_mock.return_value = OSConst.REDHAT_FAMILY
firewall_obj = Firewall().getFirewallObject()
p.communicate.return_value = ("Table: filter", "err")
p.returncode = 0
self.assertEqual("FirewallChecks", firewall_obj.__class__.__name__)
self.assertTrue(firewall_obj.check_firewall())
p.communicate.return_value = ("", "err")
p.returncode = 3
self.assertFalse(firewall_obj.check_firewall())
self.assertEqual("err", firewall_obj.stderrdata)
pass
@patch("ambari_server.setupHttps.get_validated_filepath_input")
@patch("ambari_server.setupHttps.get_validated_string_input")
@patch("ambari_server.setupHttps.run_os_command")
@patch("ambari_server.setupHttps.get_truststore_type")
@patch("__builtin__.open")
@patch("ambari_server.setupHttps.find_properties_file")
@patch("ambari_server.setupHttps.run_component_https_cmd")
@patch("ambari_server.setupHttps.get_delete_cert_command")
@patch("ambari_server.setupHttps.get_truststore_password")
@patch("ambari_server.setupHttps.get_truststore_path")
@patch("ambari_server.setupHttps.get_YN_input")
@patch("ambari_server.setupHttps.get_ambari_properties")
@patch("ambari_server.setupHttps.find_jdk")
def test_setup_truststore(self, find_jdk_mock, get_ambari_properties_mock, get_YN_input_mock,
get_truststore_path_mock, get_truststore_password_mock,
get_delete_cert_command_mock, run_component_https_cmd_mock,
find_properties_file_mock, open_mock,
get_truststore_type_mock, run_os_command_mock,
get_validated_string_input_mock,
get_validated_filepath_input_mock):
out = StringIO.StringIO()
sys.stdout = out
component = "component"
command = "command"
property = "use_ssl"
alias = "alias"
#Silent mode
set_silent(True)
setup_truststore()
self.assertEqual('setup-security is not enabled in silent mode.\n', out.getvalue())
sys.stdout = sys.__stdout__
#Verbouse mode and jdk_path is None
set_silent(False)
p = get_ambari_properties_mock.return_value
# Dont disable ssl
get_YN_input_mock.side_effect = [False]
get_validated_string_input_mock.return_value = "alias"
setup_truststore()
self.assertTrue(get_YN_input_mock.called)
p.get_property.reset_mock()
get_YN_input_mock.reset_mock()
# Cant find jdk
find_jdk_mock.return_value = None
try:
setup_truststore()
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue('No JDK found, please run the "ambari-server setup" command to install a' +
' JDK automatically or install any JDK manually to ' in fe.reason)
pass
#Verbouse mode and jdk_path is not None (use_https = true)
find_jdk_mock.return_value = "/jdk_path"
p.get_property.side_effect = ["true"]
get_YN_input_mock.side_effect = [True,True]
get_truststore_path_mock.return_value = "/truststore_path"
get_truststore_password_mock.return_value = "/truststore_password"
get_delete_cert_command_mock.return_value = "rm -f"
setup_truststore(True)
self.assertTrue(get_truststore_path_mock.called)
self.assertTrue(get_truststore_password_mock.called)
self.assertTrue(get_delete_cert_command_mock.called)
self.assertTrue(find_properties_file_mock.called)
self.assertTrue(open_mock.called)
self.assertTrue(p.store.called)
self.assertTrue(run_component_https_cmd_mock.called)
p.process_pair.reset_mock()
get_truststore_path_mock.reset_mock()
get_truststore_password_mock.reset_mock()
get_delete_cert_command_mock.reset_mock()
find_properties_file_mock.reset_mock()
open_mock.reset_mock()
p.store.reset_mock()
#Verbouse mode and jdk_path is not None (use_https = false) and import cert
p.get_property.side_effect = ["false"]
get_YN_input_mock.side_effect = [True,True]
setup_truststore(True)
self.assertTrue(get_truststore_type_mock.called)
self.assertTrue(get_truststore_path_mock.called)
self.assertTrue(get_truststore_password_mock.called)
self.assertTrue(get_delete_cert_command_mock.called)
self.assertTrue(find_properties_file_mock.called)
self.assertTrue(open_mock.called)
self.assertTrue(p.store.called)
self.assertTrue(run_component_https_cmd_mock.called)
self.assertTrue(run_os_command_mock.called)
self.assertTrue(get_validated_filepath_input_mock.called)
p.process_pair.reset_mock()
get_truststore_type_mock.reset_mock()
get_truststore_path_mock.reset_mock()
get_truststore_password_mock.reset_mock()
get_delete_cert_command_mock.reset_mock()
find_properties_file_mock.reset_mock()
open_mock.reset_mock()
p.store.reset_mock()
run_os_command_mock.reset_mock()
get_validated_filepath_input_mock.reset_mock()
pass
@patch("ambari_server.setupHttps.adjust_directory_permissions")
@patch("ambari_server.setupHttps.read_ambari_user")
@patch("ambari_server.setupHttps.get_validated_string_input")
@patch("ambari_server.setupHttps.find_properties_file")
@patch("ambari_server.setupHttps.get_ambari_properties")
@patch("ambari_server.setupHttps.import_cert_and_key_action")
@patch("ambari_server.setupHttps.get_YN_input")
@patch("__builtin__.open")
@patch("ambari_server.setupHttps.is_root")
@patch("ambari_server.setupHttps.is_valid_cert_host")
@patch("ambari_server.setupHttps.is_valid_cert_exp")
def test_setup_https(self, is_valid_cert_exp_mock, is_valid_cert_host_mock, \
is_root_mock, open_Mock, get_YN_input_mock, \
import_cert_and_key_action_mock,
get_ambari_properties_mock, \
find_properties_file_mock, \
get_validated_string_input_mock, read_ambari_user_method, \
adjust_directory_permissions_mock):
is_valid_cert_exp_mock.return_value = True
is_valid_cert_host_mock.return_value = True
args = MagicMock()
open_Mock.return_value = file
p = get_ambari_properties_mock.return_value
# Testing call under non-root
is_root_mock.return_value = False
try:
setup_https(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue("root-level" in fe.reason)
pass
# Testing call under root
is_root_mock.return_value = True
read_ambari_user_method.return_value = "user"
#Case #1: if client ssl is on and user didnt choose
#disable ssl option and choose import certs and keys
p.get_property.side_effect = ["key_dir", "5555", "6666", "true"]
get_YN_input_mock.side_effect = [False, True]
get_validated_string_input_mock.side_effect = ["4444"]
get_property_expected = "[call('security.server.keys_dir'),\n" + \
" call('client.api.ssl.port'),\n" + \
" call('client.api.ssl.port'),\n call('api.ssl')]"
process_pair_expected = "[call('client.api.ssl.port', '4444')]"
set_silent(False)
setup_https(args)
self.assertTrue(p.process_pair.called)
self.assertTrue(p.get_property.call_count == 4)
self.assertEqual(str(p.get_property.call_args_list), get_property_expected)
self.assertEqual(str(p.process_pair.call_args_list), process_pair_expected)
self.assertTrue(p.store.called)
self.assertTrue(import_cert_and_key_action_mock.called)
p.process_pair.reset_mock()
p.get_property.reset_mock()
p.store.reset_mock()
import_cert_and_key_action_mock.reset_mock()
#Case #2: if client ssl is on and user choose to disable ssl option
p.get_property.side_effect = ["key_dir", "", "true"]
get_YN_input_mock.side_effect = [True]
get_validated_string_input_mock.side_effect = ["4444"]
get_property_expected = "[call('security.server.keys_dir'),\n" + \
" call('client.api.ssl.port'),\n call('api.ssl')]"
process_pair_expected = "[call('api.ssl', 'false')]"
setup_https(args)
self.assertTrue(p.process_pair.called)
self.assertTrue(p.get_property.call_count == 3)
self.assertEqual(str(p.get_property.call_args_list), get_property_expected)
self.assertEqual(str(p.process_pair.call_args_list), process_pair_expected)
self.assertTrue(p.store.called)
self.assertFalse(import_cert_and_key_action_mock.called)
p.process_pair.reset_mock()
p.get_property.reset_mock()
p.store.reset_mock()
import_cert_and_key_action_mock.reset_mock()
#Case #3: if client ssl is off and user choose option
#to import cert and keys
p.get_property.side_effect = ["key_dir", "", None]
get_YN_input_mock.side_effect = [True, True]
get_validated_string_input_mock.side_effect = ["4444"]
get_property_expected = "[call('security.server.keys_dir'),\n" + \
" call('client.api.ssl.port'),\n call('api.ssl')]"
process_pair_expected = "[call('client.api.ssl.port', '4444')]"
setup_https(args)
self.assertTrue(p.process_pair.called)
self.assertTrue(p.get_property.call_count == 3)
self.assertEqual(str(p.get_property.call_args_list), get_property_expected)
self.assertEqual(str(p.process_pair.call_args_list), process_pair_expected)
self.assertTrue(p.store.called)
self.assertTrue(import_cert_and_key_action_mock.called)
p.process_pair.reset_mock()
p.get_property.reset_mock()
p.store.reset_mock()
import_cert_and_key_action_mock.reset_mock()
#Case #4: if client ssl is off and
#user did not choose option to import cert and keys
p.get_property.side_effect = ["key_dir", "", None]
get_YN_input_mock.side_effect = [False]
get_validated_string_input_mock.side_effect = ["4444"]
get_property_expected = "[call('security.server.keys_dir'),\n" + \
" call('client.api.ssl.port'),\n call('api.ssl')]"
process_pair_expected = "[]"
setup_https(args)
self.assertFalse(p.process_pair.called)
self.assertTrue(p.get_property.call_count == 3)
self.assertEqual(str(p.get_property.call_args_list), get_property_expected)
self.assertEqual(str(p.process_pair.call_args_list), process_pair_expected)
self.assertFalse(p.store.called)
self.assertFalse(import_cert_and_key_action_mock.called)
p.process_pair.reset_mock()
p.get_property.reset_mock()
p.store.reset_mock()
import_cert_and_key_action_mock.reset_mock()
#Case #5: if cert must be imported but didnt imported
p.get_property.side_effect = ["key_dir", "", "false"]
get_YN_input_mock.side_effect = [True]
import_cert_and_key_action_mock.side_effect = [False]
get_validated_string_input_mock.side_effect = ["4444"]
get_property_expected = "[call('security.server.keys_dir'),\n" + \
" call('client.api.ssl.port'),\n call('api.ssl')]"
process_pair_expected = "[call('client.api.ssl.port', '4444')]"
self.assertFalse(setup_https(args))
self.assertTrue(p.process_pair.called)
self.assertTrue(p.get_property.call_count == 3)
self.assertEqual(str(p.get_property.call_args_list), get_property_expected)
self.assertEqual(str(p.process_pair.call_args_list), process_pair_expected)
self.assertFalse(p.store.called)
self.assertTrue(import_cert_and_key_action_mock.called)
p.process_pair.reset_mock()
p.get_property.reset_mock()
p.store.reset_mock()
import_cert_and_key_action_mock.reset_mock()
#Case #6: if silent mode is enabled
set_silent(True)
try:
setup_https(args)
self.fail("Should throw exception")
except NonFatalException as fe:
self.assertTrue("setup-https is not enabled in silent mode" in fe.reason)
p.process_pair.reset_mock()
p.get_property.reset_mock()
p.store.reset_mock()
import_cert_and_key_action_mock.reset_mock()
#Case #7: read property throw exception
set_silent(False)
find_properties_file_mock.return_value = "propertyFile"
p.get_property.side_effect = KeyError("Failed to read property")
try:
setup_https(args)
self.fail("Should throw exception")
except FatalException as fe:
self.assertTrue("Failed to read property" in fe.reason)
pass
@patch("ambari_server.setupHttps.import_cert_and_key")
def test_import_cert_and_key_action(self, import_cert_and_key_mock):
import_cert_and_key_mock.return_value = True
properties = MagicMock()
properties.get_property.side_effect = ["key_dir", "5555", "6666", "true"]
properties.process_pair = MagicMock()
expect_process_pair = "[call('client.api.ssl.cert_name', 'https.crt'),\n" + \
" call('client.api.ssl.key_name', 'https.key'),\n" + \
" call('api.ssl', 'true')]"
import_cert_and_key_action("key_dir", properties)
self.assertEqual(str(properties.process_pair.call_args_list), \
expect_process_pair)
pass
@patch("ambari_server.setupHttps.remove_file")
@patch("ambari_server.setupHttps.copy_file")
@patch("ambari_server.setupHttps.read_ambari_user")
@patch("ambari_server.setupHttps.set_file_permissions")
@patch("ambari_server.setupHttps.import_file_to_keystore")
@patch("__builtin__.open")
@patch("ambari_server.setupHttps.run_os_command")
@patch("os.path.join")
@patch("os.path.isfile")
@patch("__builtin__.raw_input")
@patch("ambari_server.setupHttps.get_validated_string_input")
@patch("ambari_server.setupHttps.is_valid_cert_host")
@patch("ambari_server.setupHttps.is_valid_cert_exp")
def test_import_cert_and_key(self, is_valid_cert_exp_mock, \
is_valid_cert_host_mock, \
get_validated_string_input_mock, \
raw_input_mock, \
os_path_isfile_mock, \
os_path_join_mock, run_os_command_mock, \
open_mock, import_file_to_keystore_mock, \
set_file_permissions_mock, read_ambari_user_mock, copy_file_mock, \
remove_file_mock):
is_valid_cert_exp_mock.return_value = True
is_valid_cert_host_mock.return_value = True
os_path_isfile_mock.return_value = True
get_validated_string_input_mock.return_value = "password"
raw_input_mock.side_effect = \
["cert_file_path", "key_file_path"]
os_path_join_mock.side_effect = ["keystore_file_path", "keystore_file_path_tmp", \
"pass_file_path", "pass_file_path_tmp", \
"passin_file_path", "password_file_path", \
"keystore_cert_file_path", \
"keystore_cert_key_file_path", ]
run_os_command_mock.return_value = (0, "", "")
om = open_mock.return_value
expect_import_file_to_keystore = "[call('keystore_file_path_tmp'," + \
" 'keystore_file_path'),\n" + \
" call('pass_file_path_tmp'," + \
" 'pass_file_path'),\n" + \
" call('cert_file_path'," + \
" 'keystore_cert_file_path'),\n" + \
" call('key_file_path'," + \
" 'keystore_cert_key_file_path')]"
import_cert_and_key("key_dir")
self.assertTrue(raw_input_mock.call_count == 2)
self.assertTrue(get_validated_string_input_mock.called)
self.assertEqual(os_path_join_mock.call_count, 8)
self.assertTrue(set_file_permissions_mock.call_count == 1)
self.assertEqual(str(import_file_to_keystore_mock.call_args_list), \
expect_import_file_to_keystore)
pass
@patch("ambari_server.setupHttps.remove_file")
@patch("ambari_server.setupHttps.copy_file")
@patch("ambari_server.setupHttps.generate_random_string")
@patch("ambari_server.setupHttps.read_ambari_user")
@patch("ambari_server.setupHttps.set_file_permissions")
@patch("ambari_server.setupHttps.import_file_to_keystore")
@patch("__builtin__.open")
@patch("ambari_server.setupHttps.run_os_command")
@patch("os.path.join")
@patch("ambari_server.setupHttps.get_validated_filepath_input")
@patch("ambari_server.setupHttps.get_validated_string_input")
@patch("ambari_server.setupHttps.is_valid_cert_host")
@patch("ambari_server.setupHttps.is_valid_cert_exp")
def test_import_cert_and_key_with_empty_password(self, \
is_valid_cert_exp_mock, is_valid_cert_host_mock,
get_validated_string_input_mock, get_validated_filepath_input_mock, \
os_path_join_mock, run_os_command_mock, open_mock, \
import_file_to_keystore_mock, set_file_permissions_mock,
read_ambari_user_mock, generate_random_string_mock, copy_file_mock, \
remove_file_mock):
is_valid_cert_exp_mock.return_value = True
is_valid_cert_host_mock.return_value = True
get_validated_string_input_mock.return_value = ""
get_validated_filepath_input_mock.side_effect = \
["cert_file_path", "key_file_path"]
os_path_join_mock.side_effect = ["keystore_file_path", "keystore_file_path_tmp", \
"pass_file_path", "pass_file_path_tmp", \
"passin_file_path", "password_file_path", \
"keystore_cert_file_path", \
"keystore_cert_key_file_path", ]
run_os_command_mock.return_value = (0, "", "")
expect_import_file_to_keystore = "[call('keystore_file_path_tmp'," + \
" 'keystore_file_path'),\n" + \
" call('pass_file_path_tmp'," + \
" 'pass_file_path'),\n" + \
" call('cert_file_path'," + \
" 'keystore_cert_file_path'),\n" + \
" call('key_file_path.secured'," + \
" 'keystore_cert_key_file_path')]"
import_cert_and_key("key_dir")
self.assertEquals(get_validated_filepath_input_mock.call_count, 2)
self.assertTrue(get_validated_string_input_mock.called)
self.assertEquals(os_path_join_mock.call_count, 8)
self.assertEquals(set_file_permissions_mock.call_count, 1)
self.assertEqual(str(import_file_to_keystore_mock.call_args_list), \
expect_import_file_to_keystore)
self.assertTrue(generate_random_string_mock.called)
pass
@patch("__builtin__.open")
@patch("ambari_server.setupHttps.copy_file")
@patch("ambari_server.setupHttps.is_root")
@patch("ambari_server.setupHttps.read_ambari_user")
@patch("ambari_server.setupHttps.set_file_permissions")
@patch("ambari_server.setupHttps.import_file_to_keystore")
@patch("ambari_server.setupHttps.run_os_command")
@patch("os.path.join")
@patch("ambari_server.setupHttps.get_validated_filepath_input")
@patch("ambari_server.setupHttps.get_validated_string_input")
def test_import_cert_and_key_with_incorrect_password(self,
get_validated_string_input_mock, \
get_validated_filepath_input_mock, \
os_path_join_mock, \
run_os_command_mock, \
import_file_to_keystore_mock, \
set_file_permissions_mock, \
read_ambari_user_mock, \
is_root_mock, \
copy_file_mock, \
open_mock):
get_validated_string_input_mock.return_value = "incorrect_password"
get_validated_filepath_input_mock.return_value = 'filename'
open_mock.return_value = MagicMock()
os_path_join_mock.return_value = ''
is_root_mock.return_value = True
#provided password doesn't match, openssl command returns an error
run_os_command_mock.return_value = (1, "", "Some error message")
self.assertFalse(import_cert_and_key_action(*["key_dir", None]))
self.assertFalse(import_cert_and_key("key_dir"))
pass
def test_is_valid_cert_exp(self):
#No data in certInfo
certInfo = {}
is_valid = is_valid_cert_exp(certInfo)
self.assertFalse(is_valid)
#Issued in future
issuedOn = (datetime.datetime.now() + datetime.timedelta(hours=1000)).strftime(SSL_DATE_FORMAT)
expiresOn = (datetime.datetime.now() + datetime.timedelta(hours=2000)).strftime(SSL_DATE_FORMAT)
certInfo = {NOT_BEFORE_ATTR: issuedOn,
NOT_AFTER_ATTR: expiresOn}
is_valid = is_valid_cert_exp(certInfo)
self.assertFalse(is_valid)
#Was expired
issuedOn = (datetime.datetime.now() - datetime.timedelta(hours=2000)).strftime(SSL_DATE_FORMAT)
expiresOn = (datetime.datetime.now() - datetime.timedelta(hours=1000)).strftime(SSL_DATE_FORMAT)
certInfo = {NOT_BEFORE_ATTR: issuedOn,
NOT_AFTER_ATTR: expiresOn}
is_valid = is_valid_cert_exp(certInfo)
self.assertFalse(is_valid)
#Valid
issuedOn = (datetime.datetime.now() - datetime.timedelta(hours=2000)).strftime(SSL_DATE_FORMAT)
expiresOn = (datetime.datetime.now() + datetime.timedelta(hours=1000)).strftime(SSL_DATE_FORMAT)
certInfo = {NOT_BEFORE_ATTR: issuedOn,
NOT_AFTER_ATTR: expiresOn}
is_valid = is_valid_cert_exp(certInfo)
self.assertTrue(is_valid)
pass
@patch("ambari_server.setupHttps.get_fqdn")
def test_is_valid_cert_host(self, get_fqdn_mock):
#No data in certInfo
certInfo = {}
is_valid = is_valid_cert_host(certInfo)
self.assertFalse(is_valid)
#Failed to get FQDN
get_fqdn_mock.return_value = None
is_valid = is_valid_cert_host(certInfo)
self.assertFalse(is_valid)
#FQDN and Common name in certificated don't correspond
get_fqdn_mock.return_value = 'host1'
certInfo = {COMMON_NAME_ATTR: 'host2'}
is_valid = is_valid_cert_host(certInfo)
self.assertFalse(is_valid)
#FQDN and Common name in certificated correspond
get_fqdn_mock.return_value = 'host1'
certInfo = {COMMON_NAME_ATTR: 'host1'}
is_valid = is_valid_cert_host(certInfo)
self.assertTrue(is_valid)
pass
@patch("ambari_server.setupHttps.get_ambari_properties")
def test_is_valid_https_port(self, get_ambari_properties_mock):
#No ambari.properties
get_ambari_properties_mock.return_value = -1
is_valid = is_valid_https_port(1111)
self.assertEqual(is_valid, False)
#User entered port used by one way auth
portOneWay = "1111"
portTwoWay = "2222"
validPort = "3333"
get_ambari_properties_mock.return_value = {SRVR_ONE_WAY_SSL_PORT_PROPERTY: portOneWay,
SRVR_TWO_WAY_SSL_PORT_PROPERTY: portTwoWay}
is_valid = is_valid_https_port(portOneWay)
self.assertEqual(is_valid, False)
#User entered port used by two way auth
is_valid = is_valid_https_port(portTwoWay)
self.assertEqual(is_valid, False)
#User entered valid port
get_ambari_properties_mock.return_value = {SRVR_ONE_WAY_SSL_PORT_PROPERTY: portOneWay,
SRVR_TWO_WAY_SSL_PORT_PROPERTY: portTwoWay}
is_valid = is_valid_https_port(validPort)
self.assertEqual(is_valid, True)
pass
@patch("socket.getfqdn")
@patch("urllib2.urlopen")
@patch("ambari_server.setupHttps.get_ambari_properties")
def test_get_fqdn(self, get_ambari_properties_mock, url_open_mock, getfqdn_mock):
#No ambari.properties
get_ambari_properties_mock.return_value = -1
fqdn = get_fqdn()
self.assertEqual(fqdn, None)
#Check mbari_server.GET_FQDN_SERVICE_URL property name (AMBARI-2612)
#property name should be server.fqdn.service.url
self.assertEqual(GET_FQDN_SERVICE_URL, "server.fqdn.service.url")
#Read FQDN from service
p = MagicMock()
p[GET_FQDN_SERVICE_URL] = 'someurl'
get_ambari_properties_mock.return_value = p
u = MagicMock()
host = 'host1.domain.com'
u.read.return_value = host
url_open_mock.return_value = u
fqdn = get_fqdn()
self.assertEqual(fqdn, host)
#Failed to read FQDN from service, getting from socket
u.reset_mock()
u.side_effect = Exception("Failed to read FQDN from service")
getfqdn_mock.return_value = host
fqdn = get_fqdn()
self.assertEqual(fqdn, host)
pass
def test_get_ulimit_open_files(self):
# 1 - No ambari.properties
p = Properties()
open_files = get_ulimit_open_files(p)
self.assertEqual(open_files, ULIMIT_OPEN_FILES_DEFAULT)
# 2 - With ambari.properties - ok
prop_value = 65000
p.process_pair(ULIMIT_OPEN_FILES_KEY, str(prop_value))
open_files = get_ulimit_open_files(p)
self.assertEqual(open_files, 65000)
# 2 - With ambari.properties - default
tf1 = tempfile.NamedTemporaryFile()
prop_value = 0
p.process_pair(ULIMIT_OPEN_FILES_KEY, str(prop_value))
open_files = get_ulimit_open_files(p)
self.assertEqual(open_files, ULIMIT_OPEN_FILES_DEFAULT)
pass
@patch("ambari_server.setupHttps.run_os_command")
def test_get_cert_info(self, run_os_command_mock):
# Error running openssl command
path = 'path/to/certificate'
run_os_command_mock.return_value = -1, None, None
cert_info = get_cert_info(path)
self.assertEqual(cert_info, None)
#Empty result of openssl command
run_os_command_mock.return_value = 0, None, None
cert_info = get_cert_info(path)
self.assertEqual(cert_info, None)
#Positive scenario
notAfter = 'Jul 3 14:12:57 2014 GMT'
notBefore = 'Jul 3 14:12:57 2013 GMT'
attr1_key = 'A'
attr1_value = 'foo'
attr2_key = 'B'
attr2_value = 'bar'
attr3_key = 'CN'
attr3_value = 'host.domain.com'
subject_pattern = '/{attr1_key}={attr1_value}/{attr2_key}={attr2_value}/{attr3_key}={attr3_value}'
subject = subject_pattern.format(attr1_key=attr1_key, attr1_value=attr1_value,
attr2_key=attr2_key, attr2_value=attr2_value,
attr3_key=attr3_key, attr3_value=attr3_value)
out_pattern = \
"notAfter={notAfter}" + os.linesep + \
"notBefore={notBefore}" + os.linesep + \
"subject={subject}" + os.linesep + \
"-----BEGIN CERTIFICATE-----" + os.linesep + \
"MIIFHjCCAwYCCQDpHKOBI+Lt0zANBgkqhkiG9w0BAQUFADBRMQswCQYDVQQGEwJV" + os.linesep + \
"..." + os.linesep + \
"5lqd8XxOGSYoMOf+70BLN2sB" + os.linesep + \
"-----END CERTIFICATE-----" + os.linesep + \
""
out = out_pattern.format(notAfter=notAfter, notBefore=notBefore, subject=subject)
run_os_command_mock.return_value = 0, out, None
cert_info = get_cert_info(path)
self.assertEqual(cert_info['notAfter'], notAfter)
self.assertEqual(cert_info['notBefore'], notBefore)
self.assertEqual(cert_info['subject'], subject)
self.assertEqual(cert_info[attr1_key], attr1_value)
self.assertEqual(cert_info[attr2_key], attr2_value)
self.assertEqual(cert_info[attr3_key], attr3_value)
pass
@patch("__builtin__.raw_input")
def test_get_validated_string_input(self, raw_input_mock):
prompt = 'prompt'
default_value = 'default'
description = 'desc'
validator = MagicMock()
validator.return_value = True
inputed_value1 = 'val1'
inputed_value2 = 'val2'
raw_input_mock.return_value = inputed_value1
input = get_validated_string_input(prompt, default_value, None,
description, False, False, validator)
self.assertTrue(validator.called)
self.assertEqual(inputed_value1, input)
validator.side_effect = [False, True]
raw_input_mock.side_effect = [inputed_value1, inputed_value2]
input = get_validated_string_input(prompt, default_value, None,
description, False, False, validator)
self.assertEqual(inputed_value2, input)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.serverUtils.run_os_command")
@patch("__builtin__.open")
@patch("os.path.exists")
def test_is_server_runing(self, os_path_exists_mock, open_mock, \
run_os_command_mock):
os_path_exists_mock.return_value = True
f = open_mock.return_value
f.readline.return_value = "111"
run_os_command_mock.return_value = 0, "", ""
status, pid = is_server_runing()
self.assertTrue(status)
self.assertEqual(111, pid)
os_path_exists_mock.return_value = False
status, pid = is_server_runing()
self.assertFalse(status)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("os_windows.win32serviceutil.QueryServiceStatus")
def test_is_server_runing(self, query_service_status_mock):
query_service_status_mock.return_value = ("", 4)
status, desc = is_server_runing()
self.assertTrue(status)
self.assertEqual("", desc)
query_service_status_mock.return_value = ("", 1)
status, desc = is_server_runing()
self.assertFalse(status)
self.assertEqual("stopped", desc)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.serverUtils.run_os_command")
@patch("__builtin__.open")
@patch("os.path.exists")
def test_is_server_runing_bad_file(self, os_path_exists_mock, open_mock, \
run_os_command_mock):
os_path_exists_mock.return_value = True
f = open_mock.return_value
f.readline.return_value = "" # empty file content
run_os_command_mock.return_value = 0, "", ""
self.assertRaises(NonFatalException, is_server_runing)
open_mock.side_effect = IOError('[Errno 13] Permission denied: /var/run/ambari-server/ambari-server.pid')
self.assertRaises(FatalException, is_server_runing)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("os.path.exists")
@patch("os.makedirs")
@patch("os.chdir")
@patch("ambari_server.serverSetup.run_os_command")
def test_install_jdk(self, run_os_command_mock, os_chdir_mock, os_makedirs_mock, os_path_exists_mock):
run_os_command_mock.return_value = 1, "", ""
os_path_exists_mock.return_value = False
failed = False
try:
jdkSetup = JDKSetup()
jdkSetup._install_jdk(MagicMock(), MagicMock())
self.fail("Exception was not rised!")
except FatalException:
failed = True
self.assertTrue(failed)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("os.path.exists")
@patch("os.makedirs")
@patch("os.chdir")
@patch("ambari_server.serverSetup.run_os_command")
def test_install_jdk(self, run_os_command_mock, os_chdir_mock, os_makedirs_mock, os_path_exists_mock):
jdk_cfg = MagicMock()
jdk_cfg.inst_dir = "java_home_dir"
run_os_command_mock.return_value = 1, "", ""
os_path_exists_mock.return_value = False
failed = False
try:
jdkSetup = JDKSetup()
jdkSetup._install_jdk("jdk.exe", jdk_cfg)
self.fail("Exception was not rised!")
except FatalException:
failed = True
self.assertTrue(failed)
pass
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.serverSetup.read_ambari_user")
@patch("os.stat")
@patch("os.path.isfile")
@patch("os.path.exists")
@patch("os.chdir")
@patch("os.makedirs")
@patch("ambari_server.serverSetup.JDKSetupLinux.adjust_jce_permissions")
@patch("ambari_server.serverSetup.expand_jce_zip_file")
@patch("ambari_server.serverSetup.force_download_file")
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.serverSetup.run_os_command")
@patch("ambari_server.serverSetup.update_properties")
@patch("ambari_server.serverSetup.get_validated_string_input")
@patch("ambari_server.serverSetup.print_info_msg")
@patch("ambari_server.serverSetup.validate_jdk")
@patch("ambari_server.serverSetup.get_JAVA_HOME")
@patch("ambari_server.serverSetup.get_resources_location")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("shutil.copyfile")
@patch("sys.exit")
def test_download_jdk(self, exit_mock, copyfile_mock, get_ambari_properties_mock, get_resources_location_mock, get_JAVA_HOME_mock, \
validate_jdk_mock, print_info_msg_mock, get_validated_string_input_mock, update_properties_mock, \
run_os_command_mock, get_YN_input_mock, force_download_file_mock, expand_jce_zip_file_mock,
adjust_jce_permissions_mock, os_makedirs_mock,
os_chdir_mock, path_existsMock, path_isfileMock, statMock, read_ambari_user_mock):
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def _init_test_jdk_mocks():
jdk1_url = "http://somewhere/myjdk.exe"
res_location = "resources"
p = Properties()
p.process_pair("java.releases", "jdk1")
p.process_pair("jdk1.desc", "JDK name")
p.process_pair("jdk1.url", "http://somewhere/myjdk.exe")
p.process_pair("jdk1.dest-file", "myjdk.exe")
p.process_pair("jdk1.jcpol-url", "http://somewhere/some-jcpol.zip")
p.process_pair("jdk1.jcpol-file", "some-jcpol.zip")
p.process_pair("jdk1.home", "C:\\jdk1")
p.process_pair("jdk1.re", "(jdk.*)/jre")
pem_side_effect1 = [False, True, False]
return p, jdk1_url, res_location, pem_side_effect1
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def _init_test_jdk_mocks():
jdk1_url = "http://somewhere/somewhere.tar.gz"
res_location = MagicMock()
p = Properties()
p.process_pair("java.releases", "jdk1")
p.process_pair("jdk1.desc", "JDK name")
p.process_pair("jdk1.url", jdk1_url)
p.process_pair("jdk1.dest-file", "somewhere.tar.gz")
p.process_pair("jdk1.jcpol-url", "http://somewhere/some-jcpol.tar.gz")
p.process_pair("jdk1.jcpol-file", "some-jcpol.tar.gz")
p.process_pair("jdk1.home", "/jdk1")
p.process_pair("jdk1.re", "(jdk.*)/jre")
pem_side_effect1 = [True, False, True, False]
return p, jdk1_url, res_location, pem_side_effect1
args = MagicMock()
args.java_home = "somewhere"
args.silent = False
p, jdk1_url, res_location, pem_side_effect1 = _init_test_jdk_mocks()
validate_jdk_mock.return_value = False
path_existsMock.return_value = False
get_resources_location_mock.return_value = res_location
get_JAVA_HOME_mock.return_value = False
read_ambari_user_mock.return_value = "ambari"
get_ambari_properties_mock.return_value = p
# Test case: ambari.properties not found
try:
download_and_install_jdk(args)
self.fail("Should throw exception because of not found ambari.properties")
except FatalException:
# Expected
self.assertTrue(get_ambari_properties_mock.called)
pass
# Test case: JDK already exists
args.java_home = None
args.jdk_location = None
get_JAVA_HOME_mock.return_value = "some_jdk"
validate_jdk_mock.return_value = True
get_YN_input_mock.return_value = False
path_existsMock.return_value = False
run_os_command_mock.return_value = 0, "", ""
rcode = download_and_install_jdk(args)
self.assertEqual(0, rcode)
# Test case: java home setup
args.java_home = "somewhere"
validate_jdk_mock.return_value = True
path_existsMock.return_value = False
get_JAVA_HOME_mock.return_value = None
rcode = download_and_install_jdk(args)
self.assertEqual(0, rcode)
self.assertTrue(update_properties_mock.called)
# Test case: JDK file does not exist, property not defined
validate_jdk_mock.return_value = False
path_existsMock.return_value = False
get_ambari_properties_mock.return_value = p
p.removeProp("jdk1.url")
try:
download_and_install_jdk(args)
self.fail("Should throw exception")
except FatalException:
# Expected
pass
# Test case: JDK file does not exist, HTTP response does not
# contain Content-Length
p.process_pair("jdk1.url", jdk1_url)
validate_jdk_mock.return_value = False
path_existsMock.return_value = False
get_YN_input_mock.return_value = True
get_validated_string_input_mock.return_value = "1"
run_os_command_mock.return_value = (0, "Wrong out", None)
try:
download_and_install_jdk(args)
self.fail("Should throw exception")
except FatalException:
# Expected
pass
# Successful JDK download
args.java_home = None
validate_jdk_mock.return_value = False
path_existsMock.reset_mock()
path_existsMock.side_effect = [False, False, False]
path_isfileMock.return_value = False
args.jdk_location = None
run_os_command_mock.return_value = (0, "Creating jdk1/jre", None)
statResult = MagicMock()
statResult.st_size = 32000
statMock.return_value = statResult
try:
rcode = download_and_install_jdk(args)
except Exception, e:
raise
self.assertEqual(0, rcode)
# Test case: not accept the license"
get_YN_input_mock.return_value = False
path_existsMock.reset_mock()
path_existsMock.side_effect = [False, False, True, False, True, False]
download_and_install_jdk(args)
self.assertTrue(exit_mock.called)
# Test case: jdk is already installed, ensure that JCE check is skipped if -j option is not supplied.
args.jdk_location = None
get_JAVA_HOME_mock.return_value = "some_jdk"
validate_jdk_mock.return_value = True
get_YN_input_mock.return_value = False
path_existsMock.reset_mock()
path_existsMock.side_effect = pem_side_effect1
force_download_file_mock.reset_mock()
with patch("ambari_server.serverSetup.JDKSetup._download_jce_policy") as download_jce_policy_mock:
rcode = download_and_install_jdk(args)
self.assertFalse(download_jce_policy_mock.called)
self.assertFalse(force_download_file_mock.called)
# Test case: Update JAVA_HOME location using command: ambari-server setup -j %NEW_LOCATION%
update_properties_mock.reset_mock()
args.java_home = "somewhere"
validate_jdk_mock.return_value = True
path_existsMock.reset_mock()
path_existsMock.side_effect = pem_side_effect1
get_JAVA_HOME_mock.return_value = "some_jdk"
path_isfileMock.return_value = True
download_and_install_jdk(args)
self.assertTrue(update_properties_mock.call_count == 1)
# Test case: Negative test case JAVA_HOME location should not be updated if -j option is supplied and
# jce_policy file already exists in resources dir.
#write_property_mock.reset_mock()
#args.java_home = "somewhere"
#path_existsMock.side_effect = None
#path_existsMock.return_value = True
#get_JAVA_HOME_mock.return_value = "some_jdk"
#try:
# download_and_install_jdk(args)
# self.fail("Should throw exception")
#except FatalException as fe:
# Expected
# self.assertFalse(write_property_mock.called)
# Test case: Setup ambari-server first time, Custom JDK selected, JDK exists
args.java_home = None
args.jdk_location = None
validate_jdk_mock.return_value = False
update_properties_mock.reset_mock()
path_existsMock.reset_mock()
path_existsMock.side_effect = [True, True, True, True]
get_validated_string_input_mock.return_value = "2"
get_JAVA_HOME_mock.return_value = None
rcode = download_and_install_jdk(args)
self.assertEqual(0, rcode)
self.assertTrue(update_properties_mock.called)
# Test case: Setup ambari-server first time, Custom JDK selected, JDK not exists
update_properties_mock.reset_mock()
validate_jdk_mock.return_value = False
path_existsMock.reset_mock()
path_existsMock.side_effect = pem_side_effect1
get_validated_string_input_mock.return_value = "2"
get_JAVA_HOME_mock.return_value = None
try:
download_and_install_jdk(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
pass
# Test when custom java home exists but java binary file doesn't exist
args.java_home = None
validate_jdk_mock.return_value = False
path_isfileMock.return_value = False
update_properties_mock.reset_mock()
path_existsMock.reset_mock()
path_existsMock.side_effect = pem_side_effect1
get_validated_string_input_mock.return_value = "2"
get_JAVA_HOME_mock.return_value = None
flag = False
try:
download_and_install_jdk(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
flag = True
pass
self.assertTrue(flag)
#Test case: Setup ambari-server with java home passed. Path to java home doesn't exist
args.java_home = "somewhere"
validate_jdk_mock.return_value = False
path_existsMock.reset_mock()
path_existsMock.side_effect = pem_side_effect1
try:
download_and_install_jdk(args)
self.fail("Should throw exception")
except FatalException as fe:
self.assertTrue("Path to java home somewhere or java binary file does not exists" in fe.reason)
pass
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.dbConfiguration_linux.run_os_command")
def test_get_postgre_status(self, run_os_command_mock):
run_os_command_mock.return_value = (1, "running", None)
pg_status, retcode, out, err = PGConfig._get_postgre_status()
self.assertEqual("running", pg_status)
run_os_command_mock.return_value = (1, "wrong", None)
pg_status, retcode, out, err = PGConfig._get_postgre_status()
self.assertEqual(None, pg_status)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("time.sleep")
@patch("subprocess.Popen")
@patch("ambari_server.dbConfiguration_linux.run_os_command")
@patch.object(PGConfig, "_get_postgre_status")
def test_check_postgre_up(self, get_postgre_status_mock, run_os_command_mock,
popen_mock, sleep_mock):
from ambari_server import serverConfiguration
p = MagicMock()
p.communicate.return_value = (None, None)
p.returncode = 0
popen_mock.return_value = p
get_postgre_status_mock.return_value = "running", 0, "", ""
serverConfiguration.OS_TYPE = OSConst.OS_REDHAT
p.poll.return_value = 0
run_os_command_mock.return_value = (0, None, None)
pg_status, retcode, out, err = PGConfig._check_postgre_up()
self.assertEqual(0, retcode)
serverConfiguration.OS_TYPE = OSConst.OS_SUSE
run_os_command_mock.return_value = (0, None, None)
p.poll.return_value = 0
get_postgre_status_mock.return_value = "stopped", 0, "", ""
pg_status, retcode, out, err = PGConfig._check_postgre_up()
self.assertEqual(0, retcode)
pass
@patch("platform.linux_distribution")
@patch("platform.system")
@patch("ambari_commons.logging_utils.print_info_msg")
@patch("ambari_commons.logging_utils.print_error_msg")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("ambari_server.serverSetup.write_property")
@patch("ambari_server.serverConfiguration.get_conf_dir")
def test_configure_os_settings(self, get_conf_dir_mock, write_property_mock, get_ambari_properties_mock,
print_error_msg_mock, print_info_msg_mock,
systemMock, distMock):
get_ambari_properties_mock.return_value = -1
rcode = configure_os_settings()
self.assertEqual(-1, rcode)
p = MagicMock()
p[OS_TYPE_PROPERTY] = 'somevalue'
get_ambari_properties_mock.return_value = p
rcode = configure_os_settings()
self.assertEqual(0, rcode)
p.__getitem__.return_value = ""
rcode = configure_os_settings()
self.assertEqual(0, rcode)
self.assertTrue(write_property_mock.called)
self.assertEqual(2, write_property_mock.call_count)
self.assertEquals(write_property_mock.call_args_list[0][0][0], "server.os_family")
self.assertEquals(write_property_mock.call_args_list[1][0][0], "server.os_type")
pass
@patch("__builtin__.open")
@patch("ambari_server.serverConfiguration.Properties")
@patch("ambari_server.serverConfiguration.search_file")
@patch("ambari_server.serverConfiguration.get_conf_dir")
def test_get_JAVA_HOME(self, get_conf_dir_mock, search_file_mock,
Properties_mock, openMock):
openMock.side_effect = Exception("exception")
result = get_JAVA_HOME()
self.assertEqual(None, result)
expected = os.path.dirname(__file__)
p = MagicMock()
p.__getitem__.return_value = expected
openMock.side_effect = None
Properties_mock.return_value = p
result = get_JAVA_HOME()
self.assertEqual(expected, result)
pass
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
def test_prompt_db_properties_default(self):
args = MagicMock()
args.must_set_database_options = False
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
prompt_db_properties(args)
self.assertEqual(args.database_index, 0)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(LinuxDBMSConfig, "_setup_remote_server")
@patch("ambari_server.dbConfiguration_linux.print_info_msg")
@patch("ambari_server.dbConfiguration_linux.read_password")
@patch("ambari_server.dbConfiguration_linux.get_validated_string_input")
@patch("ambari_server.dbConfiguration.get_validated_string_input")
@patch("ambari_server.serverSetup.get_YN_input")
def test_prompt_db_properties_oracle_sname(self, gyni_mock, gvsi_mock, gvsi_2_mock, rp_mock, print_info_msg_mock, srs_mock):
gyni_mock.return_value = True
list_of_return_values = ["ambari-server", "ambari", "1", "1521", "localhost", "2"]
def side_effect(*args, **kwargs):
return list_of_return_values.pop()
gvsi_mock.side_effect = side_effect
gvsi_2_mock.side_effect = side_effect
rp_mock.return_value = "password"
args = MagicMock()
args.must_set_database_options = True
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
del args.sid_or_sname
del args.jdbc_url
set_silent(False)
prompt_db_properties(args)
self.assertEqual(args.database_index, 1)
props = Properties()
factory = DBMSConfigFactory()
dbmsConfig = factory.create(args, props)
self.assertEqual(dbmsConfig.dbms, "oracle")
self.assertEqual(dbmsConfig.database_port, "1521")
self.assertEqual(dbmsConfig.database_host, "localhost")
self.assertEqual(dbmsConfig.database_name, "ambari")
self.assertEqual(dbmsConfig.database_username, "ambari")
self.assertEqual(dbmsConfig.database_password, "bigdata")
self.assertEqual(dbmsConfig.sid_or_sname, "sid")
dbmsConfig.configure_database(props)
self.assertEqual(dbmsConfig.database_username, "ambari-server")
self.assertEqual(dbmsConfig.sid_or_sname, "sname")
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(LinuxDBMSConfig, "_setup_remote_server")
@patch("ambari_server.dbConfiguration_linux.print_info_msg")
@patch("ambari_server.dbConfiguration_linux.read_password")
@patch("ambari_server.dbConfiguration_linux.get_validated_string_input")
@patch("ambari_server.dbConfiguration.get_validated_string_input")
@patch("ambari_server.serverSetup.get_YN_input")
def test_prompt_db_properties_oracle_sid(self, gyni_mock, gvsi_mock, gvsi_2_mock, rp_mock, print_info_msg_mock, srs_mock):
gyni_mock.return_value = True
list_of_return_values = ["ambari-server", "ambari", "2", "1521", "localhost", "2"]
def side_effect(*args, **kwargs):
return list_of_return_values.pop()
gvsi_mock.side_effect = side_effect
gvsi_2_mock.side_effect = side_effect
rp_mock.return_value = "password"
args = MagicMock()
args.must_set_database_options = True
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
del args.sid_or_sname
del args.jdbc_url
set_silent(False)
prompt_db_properties(args)
self.assertEqual(args.database_index, 1)
props = Properties()
factory = DBMSConfigFactory()
dbmsConfig = factory.create(args, props)
self.assertEqual(dbmsConfig.dbms, "oracle")
self.assertEqual(dbmsConfig.database_port, "1521")
self.assertEqual(dbmsConfig.database_host, "localhost")
self.assertEqual(dbmsConfig.database_name, "ambari")
self.assertEqual(dbmsConfig.database_username, "ambari")
self.assertEqual(dbmsConfig.database_password, "bigdata")
dbmsConfig.configure_database(props)
self.assertEqual(dbmsConfig.database_username, "ambari-server")
self.assertEqual(dbmsConfig.database_password, "password")
self.assertEqual(dbmsConfig.sid_or_sname, "sid")
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(PGConfig, "_setup_local_server")
@patch("ambari_server.dbConfiguration_linux.print_info_msg")
@patch("ambari_server.dbConfiguration_linux.read_password")
@patch("ambari_server.dbConfiguration_linux.get_validated_string_input")
@patch("ambari_server.dbConfiguration.get_validated_string_input")
@patch("ambari_server.serverSetup.get_YN_input")
def test_prompt_db_properties_postgre_adv(self, gyni_mock, gvsi_mock, gvsi_2_mock, rp_mock, print_info_msg_mock, sls_mock):
gyni_mock.return_value = True
list_of_return_values = ["ambari-server", "ambari", "ambari", "1"]
def side_effect(*args, **kwargs):
return list_of_return_values.pop()
gvsi_mock.side_effect = side_effect
gvsi_2_mock.side_effect = side_effect
rp_mock.return_value = "password"
args = MagicMock()
args.must_set_database_options = True
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
set_silent(False)
prompt_db_properties(args)
self.assertEqual(args.database_index, 0)
props = Properties()
factory = DBMSConfigFactory()
dbmsConfig = factory.create(args, props)
self.assertEqual(dbmsConfig.dbms, "postgres")
self.assertEqual(dbmsConfig.database_port, "5432")
self.assertEqual(dbmsConfig.database_host, "localhost")
self.assertEqual(dbmsConfig.database_name, "ambari")
self.assertEqual(dbmsConfig.database_username, "ambari")
self.assertEqual(dbmsConfig.database_password, "bigdata")
dbmsConfig.configure_database(props)
self.assertEqual(dbmsConfig.database_username, "ambari-server")
self.assertEqual(dbmsConfig.database_password, "password")
self.assertEqual(dbmsConfig.sid_or_sname, "sid")
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.dbConfiguration_linux.store_password_file")
@patch("ambari_server.dbConfiguration_linux.read_password")
@patch("ambari_server.dbConfiguration_linux.get_validated_string_input")
@patch("ambari_server.dbConfiguration_linux.get_YN_input")
def test_prompt_db_properties_for_each_database_type(self, gyni_mock, gvsi_mock, rp_mock, spf_mock):
"""
:return: Validates that installation for each database type correctly stores the database type, database name,
and optionally the postgres schema name.
"""
from ambari_server import serverConfiguration
gyni_mock.return_value = True
rp_mock.return_value = "password"
spf_mock.return_value = "encrypted password"
# Values to use while installing several database types
hostname = "localhost"
db_name = "db_ambari"
postgres_schema = "sc_ambari"
port = "1234"
oracle_service = "1"
oracle_service_name = "ambari"
user_name = "ambari"
# Input values
postgres_embedded_values = [db_name, postgres_schema, hostname]
oracle_values = [hostname, port, oracle_service, oracle_service_name, user_name]
mysql_values = [hostname, port, db_name, user_name]
postgres_external_values = [hostname, port, db_name, postgres_schema, user_name]
mssql_values = [hostname, port, db_name, user_name]
list_of_return_values = postgres_embedded_values + oracle_values + mysql_values + postgres_external_values + mssql_values
list_of_return_values = list_of_return_values[::-1] # Reverse the list since the input will be popped
def side_effect(*args, **kwargs):
return list_of_return_values.pop()
gvsi_mock.side_effect = side_effect
if AMBARI_CONF_VAR in os.environ:
del os.environ[AMBARI_CONF_VAR]
tempdir = tempfile.gettempdir()
os.environ[AMBARI_CONF_VAR] = tempdir
prop_file = os.path.join(tempdir, "ambari.properties")
for i in range(0, 5):
# Use the expected path of the ambari.properties file to delete it if it exists, and then create a new one
# during each use case.
if os.path.exists(prop_file):
os.remove(prop_file)
with open(prop_file, "w") as f:
f.write("server.jdbc.database_name=oldDBName")
f.close()
serverConfiguration.AMBARI_PROPERTIES_FILE = prop_file
args = MagicMock()
properties = Properties()
args.database_index = i
args.silent = False
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.sid_or_sname
del args.jdbc_url
factory = DBMSConfigFactory()
dbConfig = factory.create(args, properties)
dbConfig._prompt_db_properties()
if dbConfig._is_local_database():
dbConfig._setup_local_server(properties)
else:
dbConfig._setup_remote_server(properties)
if i == 0:
# Postgres Embedded
self.assertEqual(properties[JDBC_DATABASE_PROPERTY], "postgres")
self.assertEqual(properties[JDBC_DATABASE_NAME_PROPERTY], db_name)
self.assertEqual(properties[JDBC_POSTGRES_SCHEMA_PROPERTY], postgres_schema)
self.assertEqual(properties[PERSISTENCE_TYPE_PROPERTY], "local")
elif i == 1:
# Oracle
self.assertEqual(properties[JDBC_DATABASE_PROPERTY], "oracle")
self.assertFalse(JDBC_POSTGRES_SCHEMA_PROPERTY in properties.propertyNames())
self.assertEqual(properties[PERSISTENCE_TYPE_PROPERTY], "remote")
elif i == 2:
# MySQL
self.assertEqual(properties[JDBC_DATABASE_PROPERTY], "mysql")
self.assertFalse(JDBC_POSTGRES_SCHEMA_PROPERTY in properties.propertyNames())
self.assertEqual(properties[PERSISTENCE_TYPE_PROPERTY], "remote")
elif i == 3:
# Postgres External
self.assertEqual(properties[JDBC_DATABASE_PROPERTY], "postgres")
self.assertEqual(properties[JDBC_DATABASE_NAME_PROPERTY], db_name)
self.assertEqual(properties[JDBC_POSTGRES_SCHEMA_PROPERTY], postgres_schema)
self.assertEqual(properties[PERSISTENCE_TYPE_PROPERTY], "remote")
elif i == 4:
# MSSQL
self.assertEqual(properties[JDBC_DATABASE_PROPERTY], "mssql")
self.assertFalse(JDBC_POSTGRES_SCHEMA_PROPERTY in properties.propertyNames())
self.assertEqual(properties[PERSISTENCE_TYPE_PROPERTY], "remote")
pass
@patch.object(os.path, "exists")
@patch.object(os.path, "isfile")
def test_validate_jdk(self, isfile_mock, exists_mock):
exists_mock.side_effect = [False]
result = validate_jdk("path")
self.assertFalse(result)
exists_mock.side_effect = [True, False]
result = validate_jdk("path")
self.assertFalse(result)
exists_mock.side_effect = [True, True]
isfile_mock.return_value = False
result = validate_jdk("path")
self.assertFalse(result)
exists_mock.side_effect = [True, True]
isfile_mock.return_value = True
result = validate_jdk("path")
self.assertTrue(result)
pass
@patch("glob.glob")
@patch("ambari_server.serverConfiguration.get_JAVA_HOME")
@patch("ambari_server.serverConfiguration.validate_jdk")
def test_find_jdk(self, validate_jdk_mock, get_JAVA_HOME_mock, globMock):
get_JAVA_HOME_mock.return_value = "somewhere"
validate_jdk_mock.return_value = True
result = find_jdk()
self.assertEqual("somewhere", result)
get_JAVA_HOME_mock.return_value = None
globMock.return_value = []
result = find_jdk()
self.assertEqual(None, result)
globMock.return_value = ["one", "two"]
result = find_jdk()
self.assertNotEqual(None, result)
globMock.return_value = ["one", "two"]
validate_jdk_mock.side_effect = [False, True]
result = find_jdk()
self.assertEqual(result, "one")
pass
@patch("os.path.exists")
@patch("zipfile.ZipFile")
@patch("os.path.split")
@patch("os.listdir")
@patch("ambari_server.serverSetup.copy_files")
@patch("shutil.rmtree")
def test_unpack_jce_policy(self, rmtree_mock, copy_files_mock, os_listdir_mock, os_path_split_mock, zipfile_mock, exists_mock):
# Testing the case when the zip file doesn't contains any folder
exists_mock.return_value = True
zipfile = MagicMock()
zipfile_mock.return_value = zipfile
zip_members = ["US_export_policy.jar", "local_policy.jar", "README.txt"]
zipfile.namelist.return_value = zip_members
os_path_split_mock.return_value = [""]
expand_jce_zip_file("", "")
self.assertTrue(exists_mock.called)
self.assertTrue(zipfile_mock.called)
self.assertTrue(os_path_split_mock.called)
# Testing the case when the zip file contains a folder
unziped_jce_path = "jce"
os_path_split_mock.return_value = unziped_jce_path
expand_jce_zip_file("", "")
self.assertTrue(exists_mock.called)
self.assertTrue(zipfile_mock.called)
self.assertTrue(os_listdir_mock.called)
self.assertTrue(copy_files_mock.called)
self.assertTrue(rmtree_mock.called)
# Testing when the jdk_security_path or jce_zip_path doesn't exist
exists_mock.return_value = False
try:
expand_jce_zip_file("", "")
except FatalException:
self.assertTrue(True)
exists_mock.return_value = True
# Testing when zipfile fail with an error
zipfile_mock.side_effect = FatalException(1,"Extract error")
try:
expand_jce_zip_file("", "")
except FatalException:
self.assertTrue(True)
@patch("os.path.exists")
@patch("shutil.copy")
@patch("os.path.split")
@patch("ambari_server.serverSetup.update_properties")
@patch.object(JDKSetup, "unpack_jce_policy")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("ambari_commons.os_utils.search_file")
@patch("__builtin__.open")
def test_setup_jce_policy(self, open_mock, search_file_mock, get_ambari_properties_mock, unpack_jce_policy_mock,
update_properties_mock, path_split_mock, shutil_copy_mock, exists_mock):
exists_mock.return_value = True
properties = Properties()
properties.process_pair(JAVA_HOME_PROPERTY, "/java_home")
unpack_jce_policy_mock.return_value = 0
get_ambari_properties_mock.return_value = properties
conf_file = 'etc/ambari-server/conf/ambari.properties'
search_file_mock.return_value = conf_file
path_split_mock.return_value = ["/path/to", "JCEPolicy.zip"]
args = ['setup-jce', '/path/to/JCEPolicy.zip']
setup_jce_policy(args)
shutil_copy_mock.assert_called_with(args[1], configDefaults.SERVER_RESOURCES_DIR)
self.assertTrue(unpack_jce_policy_mock.called)
self.assertTrue(get_ambari_properties_mock.called)
self.assertTrue(update_properties_mock.called)
# Testing that if the source and the destination is the same will not try to copy the file
path_split_mock.return_value = [configDefaults.SERVER_RESOURCES_DIR, "JCEPolicy.zip"]
shutil_copy_mock.reset_mock()
setup_jce_policy(args)
self.assertFalse(shutil_copy_mock.called)
self.assertTrue(unpack_jce_policy_mock.called)
self.assertTrue(get_ambari_properties_mock.called)
self.assertTrue(update_properties_mock.called)
path_split_mock.return_value = ["/path/to", "JCEPolicy.zip"]
# Testing with bad path
exists_mock.return_value = False
try:
setup_jce_policy(args)
except FatalException:
self.assertTrue(True)
exists_mock.return_value = True
# Testing with an error produced by shutil.copy
shutil_copy_mock.reset_mock()
shutil_copy_mock.side_effect = FatalException(1, "Error trying to copy the file.")
try:
setup_jce_policy(args)
except FatalException:
self.assertTrue(True)
# Testing with an error produced by Properties.store function
update_properties_mock.side_effect = Exception("Invalid file.")
try:
setup_jce_policy(args)
except Exception:
self.assertTrue(True)
update_properties_mock.reset_mock()
# Testing with an error produced by unpack_jce_policy
unpack_jce_policy_mock.side_effect = FatalException(1, "Can not install JCE policy")
try:
setup_jce_policy(args)
except FatalException:
self.assertTrue(True)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_commons.firewall.run_os_command")
@patch("os.path.exists")
@patch("os.path.isfile")
@patch("ambari_commons.os_utils.remove_file")
@patch("ambari_server.dbConfiguration_linux.LinuxDBMSConfig.ensure_jdbc_driver_installed")
@patch("ambari_server.dbConfiguration_linux.get_YN_input")
@patch("ambari_server.serverSetup.update_properties")
@patch("ambari_server.dbConfiguration_linux.get_ambari_properties")
@patch("ambari_server.dbConfiguration_linux.store_password_file")
@patch("ambari_server.dbConfiguration_linux.run_os_command")
@patch("ambari_server.dbConfiguration_linux.PGConfig._configure_postgres")
@patch("ambari_server.dbConfiguration_linux.PGConfig._check_postgre_up")
@patch("ambari_server.dbConfiguration_linux.PGConfig._is_jdbc_user_changed")
@patch("ambari_server.serverSetup.verify_setup_allowed")
@patch("ambari_server.dbConfiguration_linux.read_password")
@patch("ambari_server.dbConfiguration_linux.get_validated_string_input")
@patch("ambari_server.dbConfiguration.get_validated_string_input")
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("ambari_server.serverSetup.configure_os_settings")
@patch("ambari_server.serverSetup.download_and_install_jdk")
@patch("ambari_server.serverSetup.check_ambari_user")
@patch("ambari_server.serverSetup.check_jdbc_drivers")
@patch("ambari_server.serverSetup.disable_security_enhancements")
@patch("ambari_server.serverSetup.is_root")
@patch("ambari_server.serverSetup.proceedJDBCProperties")
@patch("ambari_server.serverSetup.extract_views")
@patch("ambari_server.serverSetup.adjust_directory_permissions")
@patch("ambari_server.serverSetup.service_setup")
@patch("ambari_server.serverSetup.read_ambari_user")
@patch("ambari_server.serverSetup.expand_jce_zip_file")
def test_setup(self, expand_jce_zip_file_mock, read_ambari_user_mock,
service_setup_mock, adjust_dirs_mock, extract_views_mock, proceedJDBCProperties_mock, is_root_mock,
disable_security_enhancements_mock, check_jdbc_drivers_mock, check_ambari_user_mock,
download_jdk_mock, configure_os_settings_mock, get_ambari_properties_mock,
get_YN_input_mock, gvsi_mock, gvsi_1_mock,
read_password_mock, verify_setup_allowed_method, is_jdbc_user_changed_mock, check_postgre_up_mock,
configure_postgres_mock, run_os_command_1_mock,
store_password_file_mock, get_ambari_properties_1_mock, update_properties_mock,
get_YN_input_1_mock, ensure_jdbc_driver_installed_mock,
remove_file_mock, isfile_mock, exists_mock,
run_os_command_mock):
hostname = "localhost"
db_name = "db_ambari"
postgres_schema = "sc_ambari"
port = "1234"
oracle_service = "1"
oracle_service_name = "ambari"
user_name = "ambari"
args = MagicMock()
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
args.silent = False
failed = False
properties = Properties()
get_YN_input_mock.return_value = False
isfile_mock.return_value = False
verify_setup_allowed_method.return_value = 0
exists_mock.return_value = False
remove_file_mock.return_value = 0
run_os_command_mock.return_value = 3,"",""
extract_views_mock.return_value = 0
read_ambari_user_mock.return_value = "ambari"
read_password_mock.return_value = "bigdata2"
get_ambari_properties_mock.return_value = properties
get_ambari_properties_1_mock.return_value = properties
store_password_file_mock.return_value = "encrypted_bigdata2"
ensure_jdbc_driver_installed_mock.return_value = True
check_postgre_up_mock.return_value = (PGConfig.PG_STATUS_RUNNING, 0, "", "")
configure_postgres_mock.return_value = (0, "", "")
run_os_command_1_mock.return_value = (0, "", "")
expand_jce_zip_file_mock.return_value = 0
def reset_mocks():
is_jdbc_user_changed_mock.reset_mock()
is_root_mock.reset_mock()
disable_security_enhancements_mock.reset_mock()
check_jdbc_drivers_mock.reset_mock()
check_ambari_user_mock.reset_mock()
run_os_command_mock.reset_mock()
configure_os_settings_mock.reset_mock()
run_os_command_1_mock.reset_mock()
get_YN_input_1_mock.reset_mock()
update_properties_mock.reset_mock()
args = MagicMock()
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
del args.sid_or_sname
del args.jdbc_url
args.jdbc_driver= None
args.jdbc_db = None
args.silent = False
return args
# Testing call under non-root
is_root_mock.return_value = False
try:
setup(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue("root-level" in fe.reason)
pass
args = reset_mocks()
# Testing calls under root
# remote case
is_root_mock.return_value = True
disable_security_enhancements_mock.return_value = (0, "")
check_ambari_user_mock.return_value = (0, False, 'user', None)
check_jdbc_drivers_mock.return_value = 0
download_jdk_mock.return_value = 0
configure_os_settings_mock.return_value = 0
result = setup(args)
self.assertEqual(None, result)
self.assertTrue(check_ambari_user_mock.called)
self.assertEqual(1, run_os_command_mock.call_count)
#Local case
args = reset_mocks()
# Input values
db_selection_values = ["1"]
postgres_values = [db_name, postgres_schema, hostname]
postgres_values = postgres_values[::-1] # Reverse the list since the input will be popped
def side_effect(*args, **kwargs):
return db_selection_values.pop()
gvsi_mock.side_effect = side_effect
def side_effect_1(*args, **kwargs):
return postgres_values.pop()
gvsi_1_mock.side_effect = side_effect_1
get_YN_input_mock.return_value = True
# is_local_database_mock.return_value = True
is_jdbc_user_changed_mock.return_value = False
try:
result = setup(args)
except FatalException:
self.fail("Setup should be successful")
self.assertEqual(None, result)
self.assertTrue(is_jdbc_user_changed_mock.called)
self.assertTrue(update_properties_mock.called)
self.assertTrue(run_os_command_1_mock.called)
self.assertFalse(remove_file_mock.called)
self.assertTrue("Ambari-DDL-Postgres-EMBEDDED-CREATE.sql" in run_os_command_1_mock.call_args[0][0][3])
#if DB user name was changed
args = reset_mocks()
# is_local_database_mock.return_value = True
is_jdbc_user_changed_mock.return_value = True
db_selection_values = ["1"]
postgres_values = [db_name, postgres_schema, hostname]
postgres_values = postgres_values[::-1] # Reverse the list since the input will be popped
try:
result = setup(args)
except FatalException:
self.fail("Setup should be successful")
self.assertEqual(None, result)
self.assertTrue(is_jdbc_user_changed_mock.called)
self.assertTrue(update_properties_mock.called)
self.assertTrue(run_os_command_1_mock.called)
self.assertFalse(remove_file_mock.called)
#negative case
args = reset_mocks()
# Use remote database
get_YN_input_1_mock.return_value = False
db_selection_values = ["4"]
postgres_values = [hostname, port, db_name, postgres_schema, user_name]
postgres_values = postgres_values[::-1] # Reverse the list since the input will be popped
try:
result = setup(args)
self.fail("Should throw exception")
except NonFatalException as fe:
self.assertTrue("Remote database setup aborted." in fe.reason)
self.assertFalse(run_os_command_1_mock.called)
# test not run setup if ambari-server setup executed with jdbc properties
args = reset_mocks()
# is_server_runing_mock.return_value = (False, 1)
args.jdbc_driver= "path/to/driver"
args.jdbc_db = "test_db_name"
setup(args)
self.assertTrue(proceedJDBCProperties_mock.called)
self.assertFalse(disable_security_enhancements_mock.called)
self.assertFalse(check_ambari_user_mock.called)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_commons.firewall.run_os_command")
@patch("os.path.exists")
@patch("os.path.isfile")
@patch("ambari_commons.os_utils.remove_file")
@patch("ambari_server.dbConfiguration_windows.MSSQLConfig.ensure_jdbc_driver_installed")
@patch("ambari_server.serverSetup.update_properties")
@patch("ambari_server.dbConfiguration_windows.store_password_file")
@patch("ambari_server.dbConfiguration_windows.run_os_command")
@patch("ambari_server.serverSetup.verify_setup_allowed")
@patch("ambari_server.dbConfiguration_windows.get_validated_string_input")
@patch("ambari_server.dbConfiguration.get_validated_string_input")
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("ambari_server.serverSetup.configure_os_settings")
@patch("ambari_server.serverSetup.download_and_install_jdk")
@patch("ambari_server.serverSetup.check_firewall")
@patch("ambari_server.serverSetup.check_ambari_user")
@patch("ambari_server.serverSetup.check_jdbc_drivers")
@patch("ambari_server.serverSetup.disable_security_enhancements")
@patch("ambari_server.serverSetup.is_root")
@patch("ambari_server.serverSetup.proceedJDBCProperties")
@patch("ambari_server.serverSetup.extract_views")
@patch("ambari_server.serverSetup.adjust_directory_permissions")
@patch("ambari_server.serverSetup.service_setup")
@patch("ambari_server.serverSetup.read_ambari_user")
@patch("ambari_server.serverSetup.expand_jce_zip_file")
def test_setup(self, expand_jce_zip_file_mock, read_ambari_user_mock,
service_setup_mock, adjust_dirs_mock, extract_views_mock, proceedJDBCProperties_mock, is_root_mock,
disable_security_enhancements_mock, check_jdbc_drivers_mock, check_ambari_user_mock, check_firewall_mock,
download_jdk_mock, configure_os_settings_mock, get_ambari_properties_mock,
get_YN_input_mock, gvsi_mock, gvsi_1_mock,
verify_setup_allowed_method, run_os_command_1_mock,
store_password_file_mock, update_properties_mock,
ensure_jdbc_driver_installed_mock,
remove_file_mock, isfile_mock, exists_mock,
run_os_command_mock):
hostname = "localhost"
db_name = "db_ambari"
port = "1433"
user_name = "ambari"
password = "bigdata2"
failed = False
properties = Properties()
get_YN_input_mock.return_value = False
isfile_mock.return_value = False
verify_setup_allowed_method.return_value = 0
exists_mock.return_value = False
remove_file_mock.return_value = 0
run_os_command_mock.return_value = 3,"",""
extract_views_mock.return_value = 0
read_ambari_user_mock.return_value = "ambari"
#read_password_mock.return_value = "bigdata2"
get_ambari_properties_mock.return_value = properties
store_password_file_mock.return_value = "encrypted_bigdata2"
ensure_jdbc_driver_installed_mock.return_value = True
run_os_command_1_mock.return_value = (0, "", "")
expand_jce_zip_file_mock.return_value = 0
def reset_mocks():
is_root_mock.reset_mock()
disable_security_enhancements_mock.reset_mock()
check_jdbc_drivers_mock.reset_mock()
check_ambari_user_mock.reset_mock()
run_os_command_mock.reset_mock()
configure_os_settings_mock.reset_mock()
run_os_command_1_mock.reset_mock()
update_properties_mock.reset_mock()
args = MagicMock()
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.default_database_host
del args.persistence_type
del args.init_db_script_file
del args.cleanup_db_script_file
del args.sid_or_sname
del args.jdbc_url
args.jdbc_driver= None
args.jdbc_db = None
args.silent = False
args.must_set_database_options = True
return args
args = reset_mocks()
# Testing call under non-root
is_root_mock.return_value = False
try:
setup(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue("administrator-level" in fe.reason)
pass
args = reset_mocks()
# Testing calls under root
is_root_mock.return_value = True
disable_security_enhancements_mock.return_value = (0, "")
check_ambari_user_mock.return_value = (0, False, 'user', None)
check_jdbc_drivers_mock.return_value = 0
download_jdk_mock.return_value = 0
configure_os_settings_mock.return_value = 0
result = setup(args)
self.assertEqual(None, result)
self.assertTrue(check_ambari_user_mock.called)
self.assertEqual(2, run_os_command_1_mock.call_count)
#negative case
args = reset_mocks()
# Use Windows authentication
get_YN_input_mock.return_value = True
gvsi_1_mock.side_effect = [hostname, "1"]
try:
result = setup(args)
except Exception:
self.fail("Shouldn't throw exception")
self.assertTrue(run_os_command_1_mock.called)
# Use SQL Server authentication
get_YN_input_mock.return_value = True
gvsi_1_mock.side_effect = [hostname, "2", user_name, password]
try:
result = setup(args)
except Exception:
self.fail("Shouldn't throw exception")
self.assertTrue(run_os_command_1_mock.called)
# test not run setup if ambari-server setup executed with jdbc properties
args = reset_mocks()
args.jdbc_driver= "path/to/driver"
args.jdbc_db = "test_db_name"
setup(args)
self.assertTrue(proceedJDBCProperties_mock.called)
self.assertFalse(disable_security_enhancements_mock.called)
self.assertFalse(check_ambari_user_mock.called)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(OracleConfig, "_get_remote_script_line")
@patch("ambari_server.serverSetup.is_server_runing")
@patch("ambari_server.dbConfiguration_linux.get_YN_input")
@patch("ambari_server.serverSetup.get_YN_input")
@patch.object(PGConfig, "_setup_db")
@patch("ambari_server.dbConfiguration_linux.print_warning_msg")
@patch("ambari_server.dbConfiguration_linux.print_info_msg")
@patch("ambari_server.dbConfiguration_linux.run_os_command")
@patch("ambari_server.dbConfiguration.decrypt_password_for_alias")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("ambari_server.serverSetup.is_root")
def test_reset(self, is_root_mock, get_ambari_properties_mock, decrypt_password_for_alias_mock,
run_os_command_mock, print_info_msg_mock, print_warning_msg_mock,
setup_db_mock, get_YN_input_mock, get_YN_input_2_mock, is_server_running_mock,
get_remote_script_line_mock):
def reset_mocks():
args = MagicMock()
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
del args.init_script_file
del args.drop_script_file
del args.sid_or_sname
del args.jdbc_url
return args
properties = Properties()
get_ambari_properties_mock.return_value = properties
args = reset_mocks()
args.persistence_type = "local"
get_YN_input_mock.return_value = False
decrypt_password_for_alias_mock.return_value = "password"
is_server_running_mock.return_value = (False, 0)
setup_db_mock.side_effect = [(0,None, None),(0,None, "ERROR: database 'ambari' is being accessed by other users"), (0, None, "ERROR: user 'mapred' already exist")]
# Testing call under non-root
is_root_mock.return_value = False
try:
reset(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue("root-level" in fe.reason)
pass
# Testing calls under root
is_root_mock.return_value = True
try:
reset(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertFalse("root-level" in fe.reason)
pass
get_YN_input_mock.return_value = True
get_YN_input_2_mock.return_value = True
run_os_command_mock.return_value = (1, None, None)
try:
reset(args)
self.fail("Should throw exception")
except FatalException:
# Expected
pass
run_os_command_mock.return_value = (0, None, None)
reset(args)
self.assertTrue(setup_db_mock.called)
# Database errors cases
is_server_running_mock.side_effect = [(True, 123), (False, 0), (False, 0), (False, 0), (False, 0)]
try:
reset(args)
self.fail("Should throw exception")
except FatalException:
# Expected
pass
try:
reset(args)
self.fail("Should throw exception")
except NonFatalException:
# Expected
pass
args = reset_mocks()
args.dbms = "postgres"
#get_remote_script_line_mock.return_value = None
try:
#remote db case
reset(args)
self.fail("Should throw exception")
except NonFatalException:
# Expected
pass
args = reset_mocks()
args.dbms = "oracle"
print_warning_msg_mock.reset_mock()
get_remote_script_line_mock.reset_mock()
get_remote_script_line_mock.side_effect = ["drop", "create"]
try:
#remote db case (not Postgres)
rcode = reset(args)
self.fail("Should throw exception")
except NonFatalException:
# Expected
self.assertTrue(get_remote_script_line_mock.called)
self.assertTrue(print_warning_msg_mock.called)
pass
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.serverSetup.is_server_runing")
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.dbConfiguration_windows.print_warning_msg")
@patch("ambari_server.dbConfiguration_windows.print_info_msg")
@patch("ambari_server.dbConfiguration_windows.run_os_command")
@patch("ambari_server.dbConfiguration.decrypt_password_for_alias")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("ambari_server.serverSetup.is_root")
def test_reset(self, is_root_mock, get_ambari_properties_mock, decrypt_password_for_alias_mock,
run_os_command_mock, print_info_msg_mock, print_warning_msg_mock,
get_YN_input_mock, is_server_running_mock):
def reset_mocks():
args = MagicMock()
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.default_database_host
del args.persistence_type
del args.init_db_script_file
del args.cleanup_db_script_file
del args.sid_or_sname
del args.jdbc_url
return args
properties = Properties()
get_ambari_properties_mock.return_value = properties
args = reset_mocks()
args.persistence_type = "local"
get_YN_input_mock.return_value = False
decrypt_password_for_alias_mock.return_value = "password"
is_server_running_mock.return_value = (False, 0)
# Testing call under non-root
is_root_mock.return_value = False
try:
reset(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue("administrator-level" in fe.reason)
pass
# Testing calls under root
is_root_mock.return_value = True
try:
reset(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertFalse("administrator-level" in fe.reason)
pass
get_YN_input_mock.return_value = True
run_os_command_mock.return_value = (1, None, None)
try:
reset(args)
self.fail("Should throw exception")
except FatalException:
# Expected
pass
run_os_command_mock.reset_mock()
run_os_command_mock.return_value = (0, None, None)
reset(args)
self.assertTrue(run_os_command_mock.called)
self.assertEqual(run_os_command_mock.call_count, 2)
# Database errors cases
is_server_running_mock.side_effect = [(True, 123), (False, 0)]
try:
reset(args)
self.fail("Should throw exception")
except FatalException:
# Expected
pass
try:
reset(args)
except NonFatalException:
self.fail("Shouldn't throw exception")
pass
pass
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.serverSetup.get_YN_input")
@patch("__builtin__.raw_input")
@patch("ambari_server.serverSetup.is_root")
def test_reset_default(self, is_root_mock, raw_input_mock, get_YN_inputMock):
is_root_mock.return_value=True
get_YN_inputMock.return_value = False
raw_input_mock.return_value=""
args = MagicMock()
try:
reset(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue(fe.code == 1)
pass
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(PGConfig, "_setup_db")
@patch("ambari_server.dbConfiguration_linux.print_info_msg")
@patch("ambari_server.dbConfiguration_linux.run_os_command")
#@patch("ambari_server.serverSetup.parse_properties_file")
@patch("ambari_server.serverSetup.is_root")
#@patch("ambari_server.serverSetup.check_database_name_property")
@patch("ambari_server.serverSetup.is_server_runing")
def test_silent_reset(self, is_server_runing_mock, #check_database_name_property_mock,
is_root_mock, #parse_properties_file_mock,
run_os_command_mock, print_info_msg_mock,
setup_db_mock):
is_root_mock.return_value = True
args = MagicMock()
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
set_silent(True)
self.assertTrue(get_silent())
setup_db_mock.return_value = (0, None, None)
run_os_command_mock.return_value = (0, None, None)
is_server_runing_mock.return_value = (False, 0)
def signal_handler(signum, frame):
self.fail("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
try:
signal.alarm(5)
rcode = reset(args)
signal.alarm(0)
self.assertEqual(None, rcode)
self.assertTrue(setup_db_mock.called)
finally:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.dbConfiguration_windows.MSSQLConfig._execute_db_script")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("ambari_server.serverSetup.is_root")
@patch("ambari_server.serverSetup.is_server_runing")
def test_silent_reset(self, is_server_runing_mock,
is_root_mock, get_ambari_properties_mock,
execute_db_script_mock):
is_root_mock.return_value = True
args = MagicMock()
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.default_database_host
del args.persistence_type
del args.init_db_script_file
del args.cleanup_db_script_file
set_silent(True)
self.assertTrue(get_silent())
properties = Properties()
get_ambari_properties_mock.return_value = properties
is_server_runing_mock.return_value = (False, 0)
rcode = reset(args)
self.assertEqual(None, rcode)
self.assertEqual(execute_db_script_mock.call_count, 2)
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("sys.stdout.flush")
@patch("sys.stdout.write")
@patch("ambari_server_main.looking_for_pid")
@patch("ambari_server_main.wait_for_pid")
@patch("ambari_server_main.save_main_pid_ex")
@patch("ambari_server_main.check_exitcode")
@patch("os.makedirs")
@patch("ambari_server_main.locate_file")
@patch.object(_ambari_server_, "is_server_runing")
@patch("os.chown")
@patch("ambari_server.setupSecurity.get_master_key_location")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server_main.get_is_persisted")
@patch("ambari_server_main.get_is_secure")
@patch('os.chmod', autospec=True)
@patch("ambari_server.serverConfiguration.write_property")
@patch("ambari_server.serverConfiguration.get_validated_string_input")
@patch("os.environ")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("ambari_server.serverConfiguration.get_ambari_properties")
@patch("ambari_server_main.get_ambari_properties")
@patch("os.path.exists")
@patch("__builtin__.open")
@patch("subprocess.Popen")
@patch("ambari_server.serverConfiguration.search_file")
@patch("ambari_server_main.check_database_name_property")
@patch("ambari_server_main.find_jdk")
@patch("ambari_server_main.print_warning_msg")
@patch("ambari_server_main.print_info_msg")
@patch.object(PGConfig, "_check_postgre_up")
@patch("ambari_server_main.read_ambari_user")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.dbConfiguration_linux.is_root")
@patch("ambari_server_main.is_root")
@patch.object(LinuxDBMSConfig, "_find_jdbc_driver")
@patch("getpass.getuser")
@patch("os.chdir")
@patch.object(ResourceFilesKeeper, "perform_housekeeping")
def test_start(self, perform_housekeeping_mock, chdir_mock, getuser_mock, find_jdbc_driver_mock,
is_root_mock, is_root_2_mock, is_root_3_mock, read_ambari_user_mock,
check_postgre_up_mock, print_info_msg_mock, print_warning_msg_mock,
find_jdk_mock, check_database_name_property_mock, search_file_mock,
popenMock, openMock, pexistsMock,
get_ambari_properties_mock, get_ambari_properties_2_mock, get_ambari_properties_3_mock,
get_ambari_properties_4_mock, os_environ_mock,
get_validated_string_input_method, write_property_method,
os_chmod_method, get_is_secure_mock, get_is_persisted_mock,
save_master_key_method, get_master_key_location_method,
os_chown_mock, is_server_running_mock, locate_file_mock,
os_makedirs_mock, check_exitcode_mock, save_main_pid_ex_mock,
wait_for_pid_mock, looking_for_pid_mock, stdout_write_mock, stdout_flush_mock):
def reset_mocks():
pexistsMock.reset_mock()
args = MagicMock()
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
del args.sid_or_sname
del args.jdbc_url
del args.debug
del args.suspend_start
return args
args = reset_mocks()
locate_file_mock.side_effect = lambda *args: '/bin/su' if args[0] == 'su' else '/bin/sh'
f = MagicMock()
f.readline.return_value = '42'
openMock.return_value = f
looking_for_pid_mock.return_value = [{
"pid": "777",
"exe": "/test",
"cmd": "test arg"
}]
wait_for_pid_mock.return_value = 1
check_exitcode_mock.return_value = 0
p = Properties()
p.process_pair(SECURITY_IS_ENCRYPTION_ENABLED, 'False')
get_ambari_properties_4_mock.return_value = \
get_ambari_properties_3_mock.return_value = get_ambari_properties_2_mock.return_value = \
get_ambari_properties_mock.return_value = p
get_is_secure_mock.return_value = False
get_is_persisted_mock.return_value = (False, None)
search_file_mock.return_value = None
is_server_running_mock.return_value = (True, 123)
os_chown_mock.return_value = None
# Checking "server is running"
pexistsMock.return_value = True
if get_platform() != PLATFORM_WINDOWS:
with patch("pwd.getpwnam") as getpwnam_mock:
pw = MagicMock()
pw.setattr('pw_uid', 0)
pw.setattr('pw_gid', 0)
getpwnam_mock.return_value = pw
try:
_ambari_server_.start(args)
self.fail("Should fail with 'Server is running'")
except FatalException as e:
# Expected
self.assertTrue('Ambari Server is already running.' in e.reason)
args = reset_mocks()
is_server_running_mock.return_value = (False, 0)
pexistsMock.return_value = False
# Checking situation when ambari user is not set up
read_ambari_user_mock.return_value = None
try:
_ambari_server_.start(args)
self.fail("Should fail with 'Can not detect a system user for Ambari'")
except FatalException as e:
# Expected
self.assertTrue('Unable to detect a system user for Ambari Server.' in e.reason)
# Checking start from non-root when current user is not the same as a
# custom user
args = reset_mocks()
read_ambari_user_mock.return_value = "dummy-user"
getuser_mock.return_value = "non_custom_user"
is_root_3_mock.return_value = \
is_root_2_mock.return_value = \
is_root_mock.return_value = False
try:
_ambari_server_.start(args)
self.fail("Should fail with 'Can not start ambari-server as user...'")
except FatalException as e:
# Expected
self.assertTrue('Unable to start Ambari Server as user' in e.reason)
#self.assertFalse(parse_properties_file_mock.called)
# Checking "jdk not found"
args = reset_mocks()
is_root_3_mock.return_value = \
is_root_2_mock.return_value = \
is_root_mock.return_value = True
find_jdk_mock.return_value = None
try:
_ambari_server_.start(args)
self.fail("Should fail with 'No JDK found'")
except FatalException as e:
# Expected
self.assertTrue('No JDK found' in e.reason)
args = reset_mocks()
find_jdk_mock.return_value = "somewhere"
## Testing workflow under root
is_root_3_mock.return_value = \
is_root_2_mock.return_value = \
is_root_mock.return_value = True
# Remote DB
p.process_pair(JDBC_DATABASE_PROPERTY, 'oracle')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'remote')
# Case when jdbc driver is not used
find_jdbc_driver_mock.return_value = -1
try:
_ambari_server_.start(args)
self.fail("Should fail with exception")
except FatalException as e:
self.assertTrue('Before starting Ambari Server' in e.reason)
args = reset_mocks()
# Remote DB
p.process_pair(JDBC_DATABASE_PROPERTY, 'oracle')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'remote')
find_jdbc_driver_mock.reset_mock()
find_jdbc_driver_mock.return_value = -1
try:
_ambari_server_.start(args)
except FatalException as e:
# Ignored
pass
args = reset_mocks()
# Remote DB
p.process_pair(JDBC_DATABASE_PROPERTY, 'oracle')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'remote')
find_jdbc_driver_mock.reset_mock()
find_jdbc_driver_mock.return_value = 0
# Test exception handling on resource files housekeeping
perform_housekeeping_mock.reset_mock()
perform_housekeeping_mock.side_effect = KeeperException("some_reason")
pexistsMock.return_value = True
try:
_ambari_server_.start(args)
self.fail("Should fail with exception")
except FatalException as e:
self.assertTrue('some_reason' in e.reason)
self.assertTrue(perform_housekeeping_mock.called)
perform_housekeeping_mock.side_effect = lambda *v, **kv : None
perform_housekeeping_mock.reset_mock()
self.assertFalse('Unable to start PostgreSQL server' in e.reason)
self.assertFalse(check_postgre_up_mock.called)
args = reset_mocks()
# Local DB
p.process_pair(JDBC_DATABASE_PROPERTY, 'postgres')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'local')
check_postgre_up_mock.reset_mock()
# case: postgres failed to start
check_postgre_up_mock.return_value = None, 1, "Unable to start PostgreSQL serv", "error"
try:
_ambari_server_.start(args)
self.fail("Should fail with 'Unable to start PostgreSQL server'")
except FatalException as e:
# Expected
self.assertTrue('Unable to start PostgreSQL server' in e.reason)
self.assertTrue(check_postgre_up_mock.called)
args = reset_mocks()
# Local DB
p.process_pair(JDBC_DATABASE_PROPERTY, 'postgres')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'local')
check_postgre_up_mock.return_value = "running", 0, "success", ""
# Case: custom user is "root"
read_ambari_user_mock.return_value = "root"
# Java failed to start
proc = MagicMock()
proc.pid = -186
popenMock.return_value = proc
try:
_ambari_server_.start(args)
except FatalException as e:
# Expected
self.assertTrue(popenMock.called)
self.assertTrue('Ambari Server java process died' in e.reason)
self.assertTrue(perform_housekeeping_mock.called)
args = reset_mocks()
# Java OK
proc.pid = 186
popenMock.reset_mock()
_ambari_server_.start(args)
self.assertTrue(popenMock.called)
popen_arg = popenMock.call_args[0][0]
self.assertTrue(popen_arg[0] == "/bin/sh")
self.assertTrue(perform_housekeeping_mock.called)
args = reset_mocks()
# Local DB
p.process_pair(JDBC_DATABASE_PROPERTY, 'postgres')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'local')
perform_housekeeping_mock.reset_mock()
popenMock.reset_mock()
# Case: custom user is not "root"
read_ambari_user_mock.return_value = "not-root-user"
_ambari_server_.start(args)
self.assertTrue(chdir_mock.called)
self.assertTrue(popenMock.called)
popen_arg = popenMock.call_args_list[0][0][0]
self.assertTrue("; /bin/su" in popen_arg[2])
self.assertTrue(perform_housekeeping_mock.called)
args = reset_mocks()
# Local DB
p.process_pair(JDBC_DATABASE_PROPERTY, 'postgres')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'local')
check_postgre_up_mock.reset_mock()
popenMock.reset_mock()
## Testing workflow under non-root
is_root_3_mock.return_value = \
is_root_2_mock.return_value = \
is_root_mock.return_value = False
read_ambari_user_mock.return_value = "not-root-user"
getuser_mock.return_value = read_ambari_user_mock.return_value
_ambari_server_.start(args)
self.assertFalse(check_postgre_up_mock.called)
args = reset_mocks()
# Remote DB
p.process_pair(JDBC_DATABASE_PROPERTY, 'postgres')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'remote')
_ambari_server_.start(args)
self.assertFalse(check_postgre_up_mock.called)
args = reset_mocks()
# Remote DB
p.process_pair(JDBC_DATABASE_PROPERTY, 'postgres')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'remote')
# Checking call
_ambari_server_.start(args)
self.assertTrue(popenMock.called)
popen_arg = popenMock.call_args[0][0]
self.assertTrue(popen_arg[0] == "/bin/sh")
args = reset_mocks()
# Remote DB
p.process_pair(JDBC_DATABASE_PROPERTY, 'postgres')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'remote')
# Test start under wrong user
read_ambari_user_mock.return_value = "not-root-user"
getuser_mock.return_value = "non_custom_user"
try:
_ambari_server_.start(args)
self.fail("Can not start ambari-server as user non_custom_user.")
except FatalException as e:
# Expected
self.assertTrue('Unable to start Ambari Server as user' in e.reason)
args = reset_mocks()
# Check environ master key is set
popenMock.reset_mock()
os_environ_mock.copy.return_value = {"a": "b",
SECURITY_KEY_ENV_VAR_NAME: "masterkey"}
p.process_pair(JDBC_DATABASE_PROPERTY, 'postgres')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'local')
read_ambari_user_mock.return_value = "root"
is_root_3_mock.return_value = \
is_root_2_mock.return_value = \
is_root_mock.return_value = True
_ambari_server_.start(args)
self.assertFalse(get_validated_string_input_method.called)
self.assertFalse(save_master_key_method.called)
popen_arg = popenMock.call_args[1]['env']
self.assertEquals(os_environ_mock.copy.return_value, popen_arg)
args = reset_mocks()
# Check environ master key is not set
popenMock.reset_mock()
os_environ_mock.reset_mock()
p.process_pair(SECURITY_IS_ENCRYPTION_ENABLED, 'True')
os_environ_mock.copy.return_value = {"a": "b"}
p.process_pair(JDBC_DATABASE_PROPERTY, 'postgres')
p.process_pair(PERSISTENCE_TYPE_PROPERTY, 'local')
read_ambari_user_mock.return_value = "root"
is_root_3_mock.return_value = \
is_root_2_mock.return_value = \
is_root_mock.return_value = True
get_validated_string_input_method.return_value = "masterkey"
os_chmod_method.return_value = None
get_is_secure_mock.return_value = True
_ambari_server_.start(args)
self.assertTrue(get_validated_string_input_method.called)
self.assertTrue(save_master_key_method.called)
popen_arg = popenMock.call_args[1]['env']
self.assertEquals(os_environ_mock.copy.return_value, popen_arg)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "is_server_runing")
@patch("os.remove")
@patch("os.killpg")
@patch("os.getpgid")
@patch.object(_ambari_server_, "print_info_msg")
def test_stop(self, print_info_msg_mock, gpidMock, removeMock,
killMock, isServerRuningMock):
isServerRuningMock.return_value = (True, 123)
_ambari_server_.stop(None)
self.assertTrue(killMock.called)
self.assertTrue(removeMock.called)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("win32serviceutil.StopServiceWithDeps")
@patch("win32serviceutil.StopService")
@patch("win32serviceutil.WaitForServiceStatus")
def test_stop(self, WaitForServiceStatusMock, StopServiceMock, StopServiceWithDepsMock):
_ambari_server_.stop()
self.assertTrue(StopServiceWithDepsMock.called)
self.assertFalse(StopServiceMock.called)
self.assertTrue(WaitForServiceStatusMock.called)
pass
@patch.object(_ambari_server_, "BackupRestore_main")
def test_backup(self, bkrestore_mock):
args = ["", "/some/path/file.zip"]
_ambari_server_.backup(args)
self.assertTrue(bkrestore_mock.called)
pass
@patch.object(_ambari_server_, "BackupRestore_main")
def test_backup_no_path(self, bkrestore_mock):
args = [""]
_ambari_server_.backup(args)
self.assertTrue(bkrestore_mock.called)
pass
@patch.object(_ambari_server_, "BackupRestore_main")
def test_restore(self, bkrestore_mock):
args = ["", "/some/path/file.zip"]
_ambari_server_.restore(args)
self.assertTrue(bkrestore_mock.called)
pass
@patch.object(_ambari_server_, "BackupRestore_main")
def test_restore_no_path(self, bkrestore_mock):
args = [""]
_ambari_server_.restore(args)
self.assertTrue(bkrestore_mock.called)
pass
@patch("ambari_server.serverUpgrade.is_root")
@patch("ambari_server.serverUpgrade.check_database_name_property")
@patch("ambari_server.serverUpgrade.run_stack_upgrade")
def test_upgrade_stack(self, run_stack_upgrade_mock,
check_database_name_property_mock, is_root_mock):
# Testing call under non-root
is_root_mock.return_value = False
args = ['', 'HDP-2.0']
try:
upgrade_stack(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue("root-level" in fe.reason)
pass
# Testing calls under root
is_root_mock.return_value = True
run_stack_upgrade_mock.return_value = 0
upgrade_stack(args)
self.assertTrue(run_stack_upgrade_mock.called)
run_stack_upgrade_mock.assert_called_with("HDP", "2.0", None, None)
pass
@patch("ambari_server.serverUpgrade.get_ambari_properties")
@patch("os.listdir")
@patch("os.path.isfile")
@patch("shutil.move")
def test_move_user_custom_actions(self, shutil_move_mock, os_path_isfile_mock, os_listdir_mock, get_ambari_properties_mock):
properties = Properties()
properties.process_pair(RESOURCES_DIR_PROPERTY, 'some/test/fake/resources/dir/path')
get_ambari_properties_mock.return_value = properties
os_listdir_mock.return_value = ['sometestdir', 'sometestfile.md', 'sometestfile.py', 'sometestfile2.java', 'sometestfile2.py', 'sometestdir2.py']
os_path_isfile_mock.side_effect = [False, True, True, True, True, False]
move_user_custom_actions()
custom_actions_scripts_dir = os.path.join('some/test/fake/resources/dir/path', 'custom_actions', 'scripts')
shutil_move_mock.assert_has_calls([call(os.path.join('some/test/fake/resources/dir/path', 'custom_actions', 'sometestfile.py'), custom_actions_scripts_dir),
call(os.path.join('some/test/fake/resources/dir/path', 'custom_actions', 'sometestfile2.py'), custom_actions_scripts_dir)])
self.assertEqual(shutil_move_mock.call_count, 2)
pass
@patch("ambari_server.serverConfiguration.get_conf_dir")
@patch("ambari_server.serverConfiguration.get_ambari_classpath")
@patch("ambari_server.serverUpgrade.run_os_command")
@patch("ambari_server.serverUpgrade.get_java_exe_path")
def test_run_stack_upgrade(self, java_exe_path_mock, run_os_command_mock,
get_ambari_classpath_mock, get_conf_dir_mock):
java_exe_path_mock.return_value = "/usr/lib/java/bin/java"
run_os_command_mock.return_value = (0, None, None)
get_ambari_classpath_mock.return_value = 'test:path12'
get_conf_dir_mock.return_value = '/etc/conf'
stackIdMap = {'HDP' : '2.0', 'repo_url' : 'http://test.com'}
run_stack_upgrade('HDP', '2.0', 'http://test.com', None)
self.assertTrue(java_exe_path_mock.called)
self.assertTrue(get_ambari_classpath_mock.called)
self.assertTrue(get_conf_dir_mock.called)
self.assertTrue(run_os_command_mock.called)
run_os_command_mock.assert_called_with('/usr/lib/java/bin/java -cp /etc/conf' + os.pathsep + 'test:path12 '
'org.apache.ambari.server.upgrade.StackUpgradeHelper '
'updateStackId ' + "'" + json.dumps(stackIdMap) + "'" +
' > ' + os.sep + 'var' + os.sep + 'log' + os.sep + 'ambari-server' + os.sep +
'ambari-server.out 2>&1')
pass
@patch("ambari_server.serverConfiguration.get_conf_dir")
@patch("ambari_server.serverConfiguration.get_ambari_classpath")
@patch("ambari_server.serverUpgrade.run_os_command")
@patch("ambari_server.serverUpgrade.get_java_exe_path")
def test_run_stack_upgrade_with_url_os(self, java_exe_path_mock, run_os_command_mock,
get_ambari_classpath_mock, get_conf_dir_mock):
java_exe_path_mock.return_value = "/usr/lib/java/bin/java"
run_os_command_mock.return_value = (0, None, None)
get_ambari_classpath_mock.return_value = 'test:path12'
get_conf_dir_mock.return_value = '/etc/conf'
stackIdMap = {'HDP' : '2.0', 'repo_url': 'http://test.com', 'repo_url_os': 'centos5,centos6'}
run_stack_upgrade('HDP', '2.0', 'http://test.com', 'centos5,centos6')
self.assertTrue(java_exe_path_mock.called)
self.assertTrue(get_ambari_classpath_mock.called)
self.assertTrue(get_conf_dir_mock.called)
self.assertTrue(run_os_command_mock.called)
run_os_command_mock.assert_called_with('/usr/lib/java/bin/java -cp /etc/conf' + os.pathsep + 'test:path12 '
'org.apache.ambari.server.upgrade.StackUpgradeHelper '
'updateStackId ' + "'" + json.dumps(stackIdMap) + "'" +
' > ' + os.sep + 'var' + os.sep + 'log' + os.sep + 'ambari-server' + os.sep +
'ambari-server.out 2>&1')
pass
@patch("ambari_server.serverUpgrade.ensure_can_start_under_current_user")
@patch("ambari_server.serverUpgrade.generate_env")
@patch("ambari_server.serverUpgrade.read_ambari_user")
@patch("ambari_server.serverConfiguration.get_conf_dir")
@patch("ambari_server.serverConfiguration.get_ambari_classpath")
@patch("ambari_server.serverUpgrade.run_os_command")
@patch("ambari_server.serverUpgrade.get_java_exe_path")
def test_run_schema_upgrade(self, java_exe_path_mock, run_os_command_mock,
get_ambari_classpath_mock, get_conf_dir_mock,
read_ambari_user_mock, generate_env_mock,
ensure_can_start_under_current_user_mock):
java_exe_path_mock.return_value = "/usr/lib/java/bin/java"
run_os_command_mock.return_value = (0, None, None)
get_ambari_classpath_mock.return_value = 'test' + os.pathsep + 'path12'
get_conf_dir_mock.return_value = '/etc/conf'
command = '/usr/lib/java/bin/java -cp /etc/conf' + os.pathsep + 'test' + os.pathsep + 'path12 ' \
'org.apache.ambari.server.upgrade.SchemaUpgradeHelper ' \
'> ' + os.sep + 'var' + os.sep + 'log' + os.sep + 'ambari-server' + os.sep + 'ambari-server.out 2>&1'
environ = {}
generate_env_mock.return_value = environ
ensure_can_start_under_current_user_mock.return_value = "root"
read_ambari_user_mock.return_value = "ambari"
run_schema_upgrade()
self.assertTrue(java_exe_path_mock.called)
self.assertTrue(ensure_can_start_under_current_user_mock.called)
self.assertTrue(generate_env_mock.called)
self.assertTrue(read_ambari_user_mock.called)
self.assertTrue(get_ambari_classpath_mock.called)
self.assertTrue(get_conf_dir_mock.called)
self.assertTrue(run_os_command_mock.called)
run_os_command_mock.assert_called_with(command, env=environ)
pass
@patch("ambari_server.serverConfiguration.get_conf_dir")
@patch("ambari_server.serverConfiguration.get_ambari_classpath")
@patch("ambari_server.serverUpgrade.run_os_command")
@patch("ambari_server.serverUpgrade.get_java_exe_path")
def test_run_metainfo_upgrade(self, java_exe_path_mock, run_os_command_mock,
get_ambari_classpath_mock, get_conf_dir_mock):
java_exe_path_mock.return_value = "/usr/lib/java/bin/java"
run_os_command_mock.return_value = (0, None, None)
get_ambari_classpath_mock.return_value = 'test' + os.pathsep + 'path12'
get_conf_dir_mock.return_value = '/etc/conf'
json_map = {'a': 'http://newurl'}
run_metainfo_upgrade(json_map)
self.assertTrue(java_exe_path_mock.called)
self.assertTrue(get_ambari_classpath_mock.called)
self.assertTrue(get_conf_dir_mock.called)
self.assertTrue(run_os_command_mock.called)
run_os_command_mock.assert_called_with('/usr/lib/java/bin/java '
'-cp /etc/conf' + os.pathsep + 'test' + os.pathsep + 'path12 '
'org.apache.ambari.server.upgrade.StackUpgradeHelper updateMetaInfo ' +
"'" + json.dumps(json_map) + "'" +
' > ' + os.sep + 'var' + os.sep + 'log' + os.sep + 'ambari-server' +
os.sep + 'ambari-server.out 2>&1')
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("os.path.isfile")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("os.path.exists")
@patch("os.path.lexists")
@patch("os.remove")
@patch("os.symlink")
@patch("shutil.copy")
def test_proceedJDBCProperties(self, copy_mock, os_symlink_mock, os_remove_mock, lexists_mock, exists_mock,
get_ambari_properties_mock, isfile_mock):
args = MagicMock()
# test incorrect path to jdbc-driver
isfile_mock.return_value = False
args.jdbc_driver = "test jdbc"
fail = False
try:
proceedJDBCProperties(args)
except FatalException as e:
self.assertEquals("File test jdbc does not exist!", e.reason)
fail = True
self.assertTrue(fail)
# test incorrect jdbc-db
isfile_mock.return_value = True
args.jdbc_db = "incorrect db"
fail = False
try:
proceedJDBCProperties(args)
except FatalException as e:
self.assertEquals("Unsupported database name incorrect db. Please see help for more information.", e.reason)
fail = True
self.assertTrue(fail)
# test getAmbariProperties failed
args.jdbc_db = "mysql"
get_ambari_properties_mock.return_value = -1
fail = False
try:
proceedJDBCProperties(args)
except FatalException as e:
self.assertEquals("Error getting ambari properties", e.reason)
fail = True
self.assertTrue(fail)
# test getAmbariProperties failed
args.jdbc_db = "mssql"
get_ambari_properties_mock.return_value = -1
fail = False
try:
proceedJDBCProperties(args)
except FatalException as e:
self.assertEquals("Error getting ambari properties", e.reason)
fail = True
self.assertTrue(fail)
# test get resource dir param failed
args.jdbc_db = "oracle"
p = MagicMock()
get_ambari_properties_mock.return_value = p
p.__getitem__.side_effect = KeyError("test exception")
exists_mock.return_value = False
fail = False
try:
proceedJDBCProperties(args)
except FatalException as e:
fail = True
self.assertTrue(fail)
# test copy jdbc failed and symlink exists
lexists_mock.return_value = True
args.jdbc_db = "postgres"
get_ambari_properties_mock.return_value = MagicMock()
isfile_mock.side_effect = [True, False]
exists_mock.return_value = True
fail = False
def side_effect():
raise Exception(-1, "Failed to copy!")
copy_mock.side_effect = side_effect
try:
proceedJDBCProperties(args)
except FatalException as e:
fail = True
self.assertTrue(fail)
self.assertTrue(os_remove_mock.called)
# test success symlink creation
get_ambari_properties_mock.reset_mock()
os_remove_mock.reset_mock()
p = MagicMock()
get_ambari_properties_mock.return_value = p
p.__getitem__.side_effect = None
p.__getitem__.return_value = "somewhere"
copy_mock.reset_mock()
copy_mock.side_effect = None
isfile_mock.side_effect = [True, False]
proceedJDBCProperties(args)
self.assertTrue(os_remove_mock.called)
self.assertTrue(os_symlink_mock.called)
self.assertTrue(copy_mock.called)
self.assertEquals(os_symlink_mock.call_args_list[0][0][0], os.path.join("somewhere","test jdbc"))
self.assertEquals(os_symlink_mock.call_args_list[0][0][1], os.path.join("somewhere","postgres-jdbc-driver.jar"))
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("os.path.isfile")
@patch("ambari_server.serverSetup.get_ambari_properties")
@patch("os.path.exists")
@patch("os.path.lexists")
@patch("os.remove")
@patch("os.symlink")
@patch("shutil.copy")
def test_proceedJDBCProperties(self, copy_mock, os_symlink_mock, os_remove_mock, lexists_mock, exists_mock,
get_ambari_properties_mock, isfile_mock):
args = MagicMock()
# test incorrect path to jdbc-driver
isfile_mock.return_value = False
args.jdbc_driver = "test jdbc"
fail = False
try:
proceedJDBCProperties(args)
except FatalException as e:
self.assertEquals("File test jdbc does not exist!", e.reason)
fail = True
self.assertTrue(fail)
# test incorrect jdbc-db
isfile_mock.return_value = True
args.jdbc_db = "incorrect db"
fail = False
try:
proceedJDBCProperties(args)
except FatalException as e:
self.assertEquals("Unsupported database name incorrect db. Please see help for more information.", e.reason)
fail = True
self.assertTrue(fail)
# test getAmbariProperties succeeded
args.jdbc_db = "mssql"
get_ambari_properties_mock.return_value = -1
fail = False
try:
proceedJDBCProperties(args)
except FatalException as e:
self.assertEquals("Error getting ambari properties", e.reason)
fail = True
self.assertFalse(fail)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("__builtin__.open")
@patch("os.path.isfile")
@patch("os.path.lexists")
@patch("os.path.exists")
@patch("os.remove")
@patch("os.symlink")
@patch.object(Properties, "store")
@patch("ambari_server.serverUpgrade.adjust_directory_permissions")
@patch("ambari_server.serverUpgrade.print_warning_msg")
@patch("ambari_server.serverUpgrade.read_ambari_user")
@patch("ambari_server.serverUpgrade.run_schema_upgrade")
@patch("ambari_server.dbConfiguration_linux.run_os_command")
@patch("ambari_server.serverConfiguration.find_properties_file")
@patch("ambari_server.serverUpgrade.update_ambari_properties")
@patch("ambari_server.serverUpgrade.is_root")
@patch("ambari_server.serverConfiguration.write_property")
@patch("ambari_server.serverConfiguration.get_ambari_version")
@patch("ambari_server.dbConfiguration.get_ambari_properties")
@patch("ambari_server.serverConfiguration.get_ambari_properties")
@patch("ambari_server.serverUpgrade.get_ambari_properties")
@patch("ambari_server.serverUpgrade.upgrade_local_repo")
@patch("ambari_server.serverUpgrade.move_user_custom_actions")
@patch("ambari_server.serverUpgrade.update_krb_jaas_login_properties")
def test_upgrade_from_161(self, update_krb_jaas_login_properties_mock, move_user_custom_actions_mock, upgrade_local_repo_mock, get_ambari_properties_mock,
get_ambari_properties_2_mock, get_ambari_properties_3_mock, get_ambari_version_mock, write_property_mock,
is_root_mock, update_ambari_properties_mock, find_properties_file_mock, run_os_command_mock,
run_schema_upgrade_mock, read_ambari_user_mock, print_warning_msg_mock,
adjust_directory_permissions_mock, properties_store_mock,
os_symlink_mock, os_remove_mock, exists_mock, lexists_mock, isfile_mock, open_mock):
def reset_mocks():
run_os_command_mock.reset_mock()
write_property_mock.reset_mock()
isfile_mock.reset_mock()
lexists_mock.reeset_mock()
os_symlink_mock.reset_mock()
lexists_mock.return_value = False
args = MagicMock()
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.database_windows_auth
del args.default_database_host
del args.init_db_script_file
del args.cleanup_db_script_file
del args.must_set_database_options
del args.sid_or_sname
del args.jdbc_url
args.jdbc_driver= None
args.jdbc_db = None
args.silent = False
return args
args = reset_mocks()
args.dbms = "postgres"
is_root_mock.return_value = True
update_ambari_properties_mock.return_value = 0
get_ambari_version_mock.return_value = "1.7.0"
move_user_custom_actions_mock.return_value = None
update_krb_jaas_login_properties_mock.return_value = -2
# Local Postgres
# In Ambari 1.6.1 for an embedded postgres database, the "server.jdbc.database" property stored the DB name,
# and the DB type was assumed to be "postgres" if the "server.persistence.type" property was "local"
properties = Properties()
properties.process_pair(PERSISTENCE_TYPE_PROPERTY, "local")
properties.process_pair(JDBC_DATABASE_PROPERTY, "ambari")
properties.process_pair(RESOURCES_DIR_PROPERTY, "/tmp")
get_ambari_properties_mock.return_value = properties
properties2 = Properties()
properties2.process_pair(PERSISTENCE_TYPE_PROPERTY, "local")
properties2.process_pair(JDBC_DATABASE_NAME_PROPERTY, "ambari")
properties2.process_pair(JDBC_DATABASE_PROPERTY, "postgres")
get_ambari_properties_3_mock.side_effect = get_ambari_properties_2_mock.side_effect = [properties, properties2, properties2]
run_schema_upgrade_mock.return_value = 0
read_ambari_user_mock.return_value = "custom_user"
run_os_command_mock.return_value = (0, "", "")
isfile_mock.return_value = False
try:
upgrade(args)
except FatalException as fe:
self.fail("Did not expect failure: " + str(fe))
else:
self.assertTrue(write_property_mock.called)
self.assertEquals(write_property_mock.call_args_list[0][0][0], JDBC_DATABASE_NAME_PROPERTY)
self.assertEquals(write_property_mock.call_args_list[0][0][1], "ambari")
self.assertEquals(write_property_mock.call_args_list[1][0][0], JDBC_DATABASE_PROPERTY)
self.assertEquals(write_property_mock.call_args_list[1][0][1], "postgres")
self.assertTrue(run_os_command_mock.called)
self.assertFalse(move_user_custom_actions_mock.called)
args = reset_mocks()
# External Postgres
# In Ambari 1.6.1 for an external postgres database, the "server.jdbc.database" property stored the
# DB type ("postgres"), and the "server.jdbc.schema" property stored the DB name.
properties = Properties()
properties.process_pair(PERSISTENCE_TYPE_PROPERTY, "remote")
properties.process_pair(JDBC_DATABASE_PROPERTY, "postgres")
properties.process_pair(JDBC_RCA_SCHEMA_PROPERTY, "ambari")
properties.process_pair(JDBC_URL_PROPERTY, "jdbc:postgresql://c6410.ambari.apache.org:5432/ambari")
properties2 = Properties()
properties2.process_pair(PERSISTENCE_TYPE_PROPERTY, "remote")
properties2.process_pair(JDBC_DATABASE_NAME_PROPERTY, "ambari")
properties2.process_pair(JDBC_DATABASE_PROPERTY, "postgres")
properties2.process_pair(JDBC_RCA_SCHEMA_PROPERTY, "ambari")
properties2.process_pair(JDBC_URL_PROPERTY, "jdbc:postgresql://c6410.ambari.apache.org:5432/ambari")
get_ambari_properties_mock.return_value = properties
get_ambari_properties_3_mock.side_effect = get_ambari_properties_2_mock.side_effect = [properties, properties2, properties2]
exists_mock.return_value = True
try:
upgrade(args)
except FatalException as fe:
self.fail("Did not expect failure: " + str(fe))
else:
self.assertTrue(write_property_mock.called)
self.assertFalse(run_os_command_mock.called)
self.assertFalse(move_user_custom_actions_mock.called)
args = reset_mocks()
# External Postgres missing DB type, so it should be set based on the JDBC URL.
properties = Properties()
properties.process_pair(PERSISTENCE_TYPE_PROPERTY, "remote")
properties.process_pair(JDBC_RCA_SCHEMA_PROPERTY, "ambari")
properties.process_pair(JDBC_URL_PROPERTY, "jdbc:postgresql://c6410.ambari.apache.org:5432/ambari")
get_ambari_properties_mock.return_value = properties
get_ambari_properties_3_mock.side_effect = get_ambari_properties_2_mock.side_effect = [properties, properties2, properties2]
try:
upgrade(args)
except FatalException as fe:
self.fail("Did not expect failure: " + str(fe))
else:
self.assertTrue(write_property_mock.call_count == 2)
self.assertFalse(move_user_custom_actions_mock.called)
args = reset_mocks()
# External MySQL
# In Ambari 1.6.1 for an external MySQL database, the "server.jdbc.database" property stored the DB type ("mysql"),
# And the "server.jdbc.schema" property stored the DB name.
properties = Properties()
properties.process_pair(PERSISTENCE_TYPE_PROPERTY, "remote")
properties.process_pair(JDBC_DATABASE_PROPERTY, "mysql")
properties.process_pair(JDBC_RCA_SCHEMA_PROPERTY, "ambari")
properties.process_pair(JDBC_URL_PROPERTY, "jdbc:mysql://c6409.ambari.apache.org:3306/ambari")
properties2 = Properties()
properties2.process_pair(PERSISTENCE_TYPE_PROPERTY, "remote")
properties2.process_pair(JDBC_DATABASE_PROPERTY, "mysql")
properties2.process_pair(JDBC_DATABASE_NAME_PROPERTY, "ambari")
properties2.process_pair(JDBC_RCA_SCHEMA_PROPERTY, "ambari")
properties2.process_pair(JDBC_URL_PROPERTY, "jdbc:mysql://c6409.ambari.apache.org:3306/ambari")
get_ambari_properties_mock.return_value = properties
get_ambari_properties_3_mock.side_effect = get_ambari_properties_2_mock.side_effect = [properties, properties2, properties2]
isfile_mock.side_effect = [False, True, False]
try:
upgrade(args)
except FatalException as fe:
self.fail("Did not expect failure: " + str(fe))
else:
self.assertTrue(write_property_mock.called)
self.assertFalse(move_user_custom_actions_mock.called)
self.assertTrue(os_symlink_mock.called)
self.assertTrue(os_symlink_mock.call_args_list[0][0][0] == "/var/lib/ambari-server/resources/mysql-connector-java.jar")
self.assertTrue(os_symlink_mock.call_args_list[0][0][1] == "/var/lib/ambari-server/resources/mysql-jdbc-driver.jar")
args = reset_mocks()
# External MySQL missing DB type, so it should be set based on the JDBC URL.
properties = Properties()
properties.process_pair(PERSISTENCE_TYPE_PROPERTY, "remote")
properties.process_pair(JDBC_RCA_SCHEMA_PROPERTY, "ambari")
properties.process_pair(JDBC_URL_PROPERTY, "jdbc:mysql://c6409.ambari.apache.org:3306/ambari")
get_ambari_properties_mock.return_value = properties
get_ambari_properties_3_mock.side_effect = get_ambari_properties_2_mock.side_effect = [properties, properties2, properties2]
isfile_mock.side_effect = None
try:
upgrade(args)
except FatalException as fe:
self.fail("Did not expect failure: " + str(fe))
else:
self.assertTrue(write_property_mock.call_count == 2)
self.assertFalse(move_user_custom_actions_mock.called)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("__builtin__.open")
@patch("os.path.isfile")
@patch("os.path.exists")
@patch("os.path.lexists")
@patch("os.remove")
@patch("os.symlink")
@patch.object(Properties, "store")
@patch.object(PGConfig, "_change_db_files_owner")
@patch("ambari_server.serverConfiguration.find_properties_file")
@patch("ambari_server.serverUpgrade.adjust_directory_permissions")
@patch("ambari_server.serverUpgrade.print_warning_msg")
@patch("ambari_server.serverUpgrade.read_ambari_user")
@patch("ambari_server.serverUpgrade.run_schema_upgrade")
@patch("ambari_server.serverUpgrade.update_ambari_properties")
@patch("ambari_server.serverUpgrade.parse_properties_file")
@patch("ambari_server.serverUpgrade.get_ambari_version")
@patch("ambari_server.serverConfiguration.get_ambari_version")
@patch("ambari_server.serverUpgrade.is_root")
@patch("ambari_server.dbConfiguration.get_ambari_properties")
@patch("ambari_server.serverConfiguration.get_ambari_properties")
@patch("ambari_server.serverUpgrade.get_ambari_properties")
@patch("ambari_server.serverUpgrade.upgrade_local_repo")
@patch("ambari_server.serverUpgrade.move_user_custom_actions")
@patch("ambari_server.serverUpgrade.update_krb_jaas_login_properties")
def test_upgrade(self, update_krb_jaas_login_properties_mock, move_user_custom_actions, upgrade_local_repo_mock,
get_ambari_properties_mock, get_ambari_properties_2_mock, get_ambari_properties_3_mock,
is_root_mock, get_ambari_version_mock, get_ambari_version_2_mock,
parse_properties_file_mock,
update_ambari_properties_mock, run_schema_upgrade_mock,
read_ambari_user_mock, print_warning_msg_mock,
adjust_directory_permissions_mock,
find_properties_file_mock, change_db_files_owner_mock, properties_store_mock,
os_symlink_mock, os_remove_mock, lexists_mock, exists_mock, isfile_mock, open_mock):
def reset_mocks():
isfile_mock.reset_mock()
args = MagicMock()
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
del args.sid_or_sname
del args.jdbc_url
args.must_set_database_options = True
return args
args = reset_mocks()
properties = Properties()
get_ambari_properties_3_mock.return_value = get_ambari_properties_2_mock.return_value = \
get_ambari_properties_mock.return_value = properties
update_ambari_properties_mock.return_value = 0
run_schema_upgrade_mock.return_value = 0
isfile_mock.return_value = False
get_ambari_version_2_mock.return_value = get_ambari_version_mock.return_value = CURR_AMBARI_VERSION
move_user_custom_actions.return_value = None
update_krb_jaas_login_properties_mock.return_value = -2
# Testing call under non-root
is_root_mock.return_value = False
try:
upgrade(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue("root-level" in fe.reason)
pass
args = reset_mocks()
# Testing calls under root
is_root_mock.return_value = True
# Testing with undefined custom user
read_ambari_user_mock.return_value = None
run_schema_upgrade_mock.return_value = 0
change_db_files_owner_mock.return_value = 0
exists_mock.return_value = True
upgrade(args)
self.assertTrue(print_warning_msg_mock.called)
warning_args = print_warning_msg_mock.call_args[0][0]
self.assertTrue("custom ambari user" in warning_args)
self.assertTrue(upgrade_local_repo_mock.called)
self.assertTrue(move_user_custom_actions.called)
args = reset_mocks()
# Testing with defined custom user
read_ambari_user_mock.return_value = "ambari-custom-user"
upgrade(args)
self.assertTrue(adjust_directory_permissions_mock.called)
args = reset_mocks()
run_schema_upgrade_mock.return_value = 0
parse_properties_file_mock.called = False
move_user_custom_actions.called = False
retcode = upgrade(args)
self.assertTrue(get_ambari_properties_mock.called)
self.assertTrue(get_ambari_properties_2_mock.called)
self.assertNotEqual(-1, retcode)
self.assertTrue(parse_properties_file_mock.called)
self.assertTrue(run_schema_upgrade_mock.called)
self.assertTrue(move_user_custom_actions.called)
# Assert that move_user_custom_actions is called on upgrade to Ambari == 2.0.0
get_ambari_version_2_mock.return_value = get_ambari_version_mock.return_value = '2.0.0'
move_user_custom_actions.called = False
upgrade(args)
self.assertTrue(move_user_custom_actions.called)
# Assert that move_user_custom_actions is not called on upgrade to Ambari < 2.0.0
get_ambari_version_2_mock.return_value = get_ambari_version_mock.return_value = '1.6.0'
move_user_custom_actions.called = False
upgrade(args)
self.assertFalse(move_user_custom_actions.called)
get_ambari_version_2_mock.return_value = get_ambari_version_mock.return_value = CURR_AMBARI_VERSION
# test getAmbariProperties failed
args = reset_mocks()
get_ambari_properties_3_mock.return_value = get_ambari_properties_2_mock.return_value = \
get_ambari_properties_mock.return_value = -1
fail = False
try:
upgrade(args)
except FatalException as e:
self.assertEquals("Error getting ambari properties", e.reason)
fail = True
self.assertTrue(fail)
# test get resource dir param failed
args = reset_mocks()
p = MagicMock()
get_ambari_properties_mock.reset_mock()
get_ambari_properties_2_mock.reset_mock()
get_ambari_properties_3_mock.reset_mock()
get_ambari_properties_3_mock.return_value = get_ambari_properties_2_mock.return_value = \
get_ambari_properties_mock.return_value = p
p.__getitem__.side_effect = ["something", "something", "something", "something", KeyError("test exception")]
exists_mock.return_value = False
fail = False
try:
upgrade(args)
except FatalException as e:
fail = True
self.assertTrue(fail)
# test if some drivers are available in resources, and symlink available too
args = reset_mocks()
props = Properties()
props.process_pair(JDBC_DATABASE_NAME_PROPERTY, "something")
props.process_pair(RESOURCES_DIR_PROPERTY, "resources")
get_ambari_properties_3_mock.return_value = get_ambari_properties_2_mock.return_value = \
get_ambari_properties_mock.return_value = props
exists_mock.return_value = True
lexists_mock.return_value = True
isfile_mock.side_effect = [True, False, False]
upgrade(args)
self.assertTrue(os_remove_mock.called)
self.assertEquals(os_remove_mock.call_count, 1)
self.assertEquals(os_remove_mock.call_args[0][0], os.path.join("resources", "oracle-jdbc-driver.jar"))
self.assertEquals(os_symlink_mock.call_count, 1)
self.assertEquals(os_symlink_mock.call_args[0][0], os.path.join("resources", "ojdbc6.jar"))
self.assertEquals(os_symlink_mock.call_args[0][1], os.path.join("resources", "oracle-jdbc-driver.jar"))
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("__builtin__.open")
@patch("os.path.isfile")
@patch("os.path.exists")
@patch("os.path.lexists")
@patch("os.remove")
@patch("os.symlink")
@patch.object(Properties, "store")
@patch("ambari_server.serverConfiguration.find_properties_file")
@patch("ambari_server.serverUpgrade.adjust_directory_permissions")
@patch("ambari_server.serverUpgrade.print_warning_msg")
@patch("ambari_server.serverUpgrade.read_ambari_user")
@patch("ambari_server.serverUpgrade.run_schema_upgrade")
@patch("ambari_server.serverUpgrade.update_ambari_properties")
@patch("ambari_server.serverUpgrade.parse_properties_file")
@patch("ambari_server.serverUpgrade.get_ambari_version")
@patch("ambari_server.serverConfiguration.get_ambari_version")
@patch("ambari_server.serverUpgrade.is_root")
@patch("ambari_server.dbConfiguration.get_ambari_properties")
@patch("ambari_server.serverConfiguration.get_ambari_properties")
@patch("ambari_server.serverUpgrade.get_ambari_properties")
@patch("ambari_server.serverUpgrade.upgrade_local_repo")
@patch("ambari_server.serverUpgrade.move_user_custom_actions")
@patch("ambari_server.serverUpgrade.update_krb_jaas_login_properties")
def test_upgrade(self, update_krb_jaas_login_properties_mock, move_user_custom_actions, upgrade_local_repo_mock,
get_ambari_properties_mock, get_ambari_properties_2_mock, get_ambari_properties_3_mock,
is_root_mock, get_ambari_version_mock, get_ambari_version_2_mock,
parse_properties_file_mock,
update_ambari_properties_mock, run_schema_upgrade_mock,
read_ambari_user_mock, print_warning_msg_mock,
adjust_directory_permissions_mock,
find_properties_file_mock, properties_store_mock,
os_symlink_mock, os_remove_mock, lexists_mock, exists_mock, isfile_mock, open_mock):
def reset_mocks():
isfile_mock.reset_mock()
args = MagicMock()
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.default_database_host
del args.persistence_type
del args.init_db_script_file
del args.cleanup_db_script_file
del args.sid_or_sname
del args.jdbc_url
args.must_set_database_options = True
return args
args = reset_mocks()
properties = Properties()
get_ambari_properties_3_mock.return_value = get_ambari_properties_2_mock.return_value = \
get_ambari_properties_mock.return_value = properties
update_ambari_properties_mock.return_value = 0
run_schema_upgrade_mock.return_value = 0
isfile_mock.return_value = False
get_ambari_version_2_mock.return_value = get_ambari_version_mock.return_value = CURR_AMBARI_VERSION
move_user_custom_actions.return_value = None
update_krb_jaas_login_properties_mock.return_value = -2
# Testing call under non-root
is_root_mock.return_value = False
try:
upgrade(args)
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue("administrator-level" in fe.reason)
pass
args = reset_mocks()
# Testing calls under root
is_root_mock.return_value = True
# Testing with undefined custom user
read_ambari_user_mock.return_value = None
run_schema_upgrade_mock.return_value = 0
exists_mock.return_value = True
upgrade(args)
self.assertTrue(print_warning_msg_mock.called)
warning_args = print_warning_msg_mock.call_args[0][0]
self.assertTrue("custom ambari user" in warning_args)
self.assertTrue(upgrade_local_repo_mock.called)
self.assertTrue(move_user_custom_actions.called)
args = reset_mocks()
# Testing with defined custom user
read_ambari_user_mock.return_value = "ambari-custom-user"
upgrade(args)
self.assertTrue(adjust_directory_permissions_mock.called)
args = reset_mocks()
run_schema_upgrade_mock.return_value = 0
parse_properties_file_mock.called = False
move_user_custom_actions.called = False
retcode = upgrade(args)
self.assertTrue(get_ambari_properties_mock.called)
self.assertTrue(get_ambari_properties_2_mock.called)
self.assertNotEqual(-1, retcode)
self.assertTrue(parse_properties_file_mock.called)
self.assertTrue(run_schema_upgrade_mock.called)
self.assertTrue(move_user_custom_actions.called)
# Assert that move_user_custom_actions is called on upgrade to Ambari == 2.0.0
get_ambari_version_2_mock.return_value = get_ambari_version_mock.return_value = '2.0.0'
move_user_custom_actions.called = False
upgrade(args)
self.assertTrue(move_user_custom_actions.called)
# Assert that move_user_custom_actions is not called on upgrade to Ambari < 2.0.0
get_ambari_version_2_mock.return_value = get_ambari_version_mock.return_value = '1.6.0'
move_user_custom_actions.called = False
upgrade(args)
self.assertFalse(move_user_custom_actions.called)
get_ambari_version_2_mock.return_value = get_ambari_version_mock.return_value = CURR_AMBARI_VERSION
# test getAmbariProperties failed
args = reset_mocks()
get_ambari_properties_3_mock.return_value = get_ambari_properties_2_mock.return_value = \
get_ambari_properties_mock.return_value = -1
fail = False
try:
upgrade(args)
except FatalException as e:
self.assertEquals("Error getting ambari properties", e.reason)
fail = True
self.assertTrue(fail)
# test get resource dir param failed
args = reset_mocks()
p = MagicMock()
get_ambari_properties_mock.reset_mock()
get_ambari_properties_2_mock.reset_mock()
get_ambari_properties_3_mock.reset_mock()
get_ambari_properties_3_mock.return_value = get_ambari_properties_2_mock.return_value = \
get_ambari_properties_mock.return_value = p
p.__getitem__.side_effect = ["something", "something", "something", "something", KeyError("test exception")]
exists_mock.return_value = False
fail = False
try:
upgrade(args)
except FatalException as e:
fail = True
self.assertTrue(fail)
# test if some drivers are available in resources, and symlink available too
args = reset_mocks()
props = Properties()
props.process_pair(JDBC_DATABASE_NAME_PROPERTY, "something")
props.process_pair(RESOURCES_DIR_PROPERTY, "resources")
get_ambari_properties_3_mock.return_value = get_ambari_properties_2_mock.return_value = \
get_ambari_properties_mock.return_value = props
exists_mock.return_value = True
lexists_mock.return_value = True
isfile_mock.side_effect = [True, False, False]
pass
def test_print_info_msg(self):
out = StringIO.StringIO()
sys.stdout = out
set_verbose(True)
print_info_msg("msg")
self.assertNotEqual("", out.getvalue())
sys.stdout = sys.__stdout__
pass
def test_print_error_msg(self):
out = StringIO.StringIO()
sys.stdout = out
set_verbose(True)
print_error_msg("msg")
self.assertNotEqual("", out.getvalue())
sys.stdout = sys.__stdout__
pass
def test_print_warning_msg(self):
out = StringIO.StringIO()
sys.stdout = out
set_verbose(True)
print_warning_msg("msg")
self.assertNotEqual("", out.getvalue())
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.userInput.get_choice_string_input")
def test_get_YN_input(self, get_choice_string_input_mock):
get_YN_input("prompt", "default")
self.assertTrue(get_choice_string_input_mock.called)
self.assertEqual(4, len(get_choice_string_input_mock.call_args_list[0][0]))
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
def test_main_db_options(self, setup_mock):
base_args = ["ambari-server.py", "setup"]
db_args = ["--database", "postgres", "--databasehost", "somehost.net", "--databaseport", "12345",
"--databasename", "ambari", "--databaseusername", "ambari", "--databasepassword", "bigdata"]
#test no args
failed = False
sys.argv = list(base_args)
try:
_ambari_server_.mainBody()
except SystemExit:
failed = True
pass
self.assertFalse(failed)
self.assertTrue(setup_mock.called)
self.assertTrue(setup_mock.call_args_list[0][0][0].must_set_database_options)
setup_mock.reset_mock()
# test embedded option
failed = False
sys.argv = list(base_args)
sys.argv.extend(db_args[-10:])
sys.argv.extend(["--database", "embedded"])
try:
_ambari_server_.mainBody()
except SystemExit:
failed = True
pass
self.assertFalse(failed)
self.assertTrue(setup_mock.called)
setup_mock.reset_mock()
#test full args
sys.argv = list(base_args)
sys.argv.extend(db_args)
try:
_ambari_server_.mainBody()
except SystemExit:
failed = True
pass
self.assertFalse(failed)
self.assertTrue(setup_mock.called)
self.assertFalse(setup_mock.call_args_list[0][0][0].must_set_database_options)
setup_mock.reset_mock()
#test not full args
sys.argv = list(base_args)
sys.argv.extend(["--database", "postgres"])
try:
_ambari_server_.mainBody()
except SystemExit:
failed = True
pass
self.assertFalse(setup_mock.called)
self.assertTrue(failed)
setup_mock.reset_mock()
#test wrong database
failed = False
sys.argv = list(base_args)
sys.argv.extend(["--database", "unknown"])
sys.argv.extend(db_args[2:])
try:
_ambari_server_.mainBody()
except SystemExit:
failed = True
pass
self.assertTrue(failed)
self.assertFalse(setup_mock.called)
setup_mock.reset_mock()
#test wrong port check
failed = False
sys.argv = list(base_args)
sys.argv.extend(["--databaseport", "unknown"])
sys.argv.extend(db_args[:4])
sys.argv.extend(db_args[6:])
try:
_ambari_server_.mainBody()
except SystemExit:
failed = True
pass
self.assertTrue(failed)
self.assertFalse(setup_mock.called)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(_ambari_server_, "setup")
def test_main_db_options(self, setup_mock):
base_args = ["ambari-server.py", "setup"]
db_args = ["--databasehost", "somehost.net", "--databaseport", "12345",
"--databasename", "ambari", "--databaseusername", "ambari", "--databasepassword", "bigdata"]
#test no args
failed = False
sys.argv = list(base_args)
try:
_ambari_server_.mainBody()
except SystemExit:
failed = True
pass
self.assertFalse(failed)
self.assertTrue(setup_mock.called)
self.assertTrue(setup_mock.call_args_list[0][0][0].must_set_database_options)
setup_mock.reset_mock()
#test full args
sys.argv = list(base_args)
sys.argv.extend(db_args)
try:
_ambari_server_.mainBody()
except SystemExit:
failed = True
pass
self.assertFalse(failed)
self.assertTrue(setup_mock.called)
self.assertFalse(setup_mock.call_args_list[0][0][0].must_set_database_options)
setup_mock.reset_mock()
#test not full args
sys.argv = list(base_args)
sys.argv.extend(["--databasehost", "somehost.net"])
try:
_ambari_server_.mainBody()
except SystemExit:
failed = True
pass
self.assertFalse(setup_mock.called)
self.assertTrue(failed)
setup_mock.reset_mock()
#test wrong port check
failed = False
sys.argv = list(base_args)
sys.argv.extend(["--databaseport", "unknown"])
sys.argv.extend(db_args[:2])
sys.argv.extend(db_args[6:])
try:
_ambari_server_.mainBody()
except SystemExit:
failed = True
pass
self.assertTrue(failed)
self.assertFalse(setup_mock.called)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.dbConfiguration.get_validated_string_input")
@patch("ambari_server.dbConfiguration_linux.print_info_msg")
def test_prompt_db_properties(self, print_info_msg_mock,
get_validated_string_input_mock, get_YN_input_mock):
def reset_mocks():
get_validated_string_input_mock.reset_mock()
get_YN_input_mock.reset_mock()
args = MagicMock()
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
return args
args = reset_mocks()
set_silent(False)
#test not prompt
args.must_set_database_options = False
prompt_db_properties(args)
self.assertFalse(get_validated_string_input_mock.called)
self.assertFalse(get_YN_input_mock.called)
args = reset_mocks()
#test prompt
args.must_set_database_options = True
get_YN_input_mock.return_value = False
prompt_db_properties(args)
self.assertTrue(get_YN_input_mock.called)
self.assertFalse(get_validated_string_input_mock.called)
args = reset_mocks()
#test prompt advanced
args.must_set_database_options = True
get_YN_input_mock.return_value = True
get_validated_string_input_mock.return_value = "4"
prompt_db_properties(args)
self.assertTrue(get_YN_input_mock.called)
self.assertTrue(get_validated_string_input_mock.called)
self.assertEquals(args.database_index, 3)
pass
@patch("ambari_server.serverConfiguration.get_conf_dir")
def test_update_ambari_properties(self, get_conf_dir_mock):
from ambari_server import serverConfiguration # need to modify constants inside the module
properties = ["server.jdbc.user.name=ambari-server\n",
"server.jdbc.user.passwd=/etc/ambari-server/conf/password.dat\n",
"java.home=/usr/jdk64/jdk1.6.0_31\n",
"server.jdbc.database_name=ambari\n",
"ambari-server.user=ambari\n",
"agent.fqdn.service.url=URL\n"]
NEW_PROPERTY = 'some_new_property=some_value\n'
CHANGED_VALUE_PROPERTY = 'server.jdbc.database_name=should_not_overwrite_value\n'
get_conf_dir_mock.return_value = '/etc/ambari-server/conf'
(tf1, fn1) = tempfile.mkstemp()
(tf2, fn2) = tempfile.mkstemp()
configDefaults.AMBARI_PROPERTIES_BACKUP_FILE = fn1
os.close(tf1)
serverConfiguration.AMBARI_PROPERTIES_FILE = fn2
os.close(tf2)
with open(serverConfiguration.AMBARI_PROPERTIES_FILE, "w") as f:
f.write(NEW_PROPERTY)
f.write(CHANGED_VALUE_PROPERTY)
f.close()
with open(configDefaults.AMBARI_PROPERTIES_BACKUP_FILE, 'w') as f:
for line in properties:
f.write(line)
f.close()
#Call tested method
update_ambari_properties()
timestamp = datetime.datetime.now()
#RPMSAVE_FILE wasn't found
self.assertFalse(os.path.exists(configDefaults.AMBARI_PROPERTIES_BACKUP_FILE))
#Renamed RPMSAVE_FILE exists
self.assertTrue(os.path.exists(configDefaults.AMBARI_PROPERTIES_BACKUP_FILE
+ '.' + timestamp.strftime('%Y%m%d%H%M%S')))
with open(serverConfiguration.AMBARI_PROPERTIES_FILE, 'r') as f:
ambari_properties_content = f.readlines()
for line in properties:
if (line == "agent.fqdn.service.url=URL\n"):
if (not GET_FQDN_SERVICE_URL + "=URL\n" in ambari_properties_content) and (
line in ambari_properties_content):
self.fail()
else:
if not line in ambari_properties_content:
self.fail()
if not NEW_PROPERTY in ambari_properties_content:
self.fail()
if CHANGED_VALUE_PROPERTY in ambari_properties_content:
self.fail()
# Command should not fail if *.rpmsave file is missing
result = update_ambari_properties()
self.assertEquals(result, 0)
os.unlink(fn2)
#if ambari.properties file is absent then "ambari-server upgrade" should
# fail
(tf, fn) = tempfile.mkstemp()
configDefaults.AMBARI_PROPERTIES_BACKUP_FILE = fn
result = update_ambari_properties()
self.assertNotEquals(result, 0)
pass
@patch("ambari_server.properties.Properties.__init__")
@patch("ambari_server.serverConfiguration.search_file")
def test_update_ambari_properties_negative_case(self, search_file_mock, properties_mock):
search_file_mock.return_value = None
#Call tested method
self.assertEquals(0, update_ambari_properties())
self.assertFalse(properties_mock.called)
search_file_mock.return_value = False
#Call tested method
self.assertEquals(0, update_ambari_properties())
self.assertFalse(properties_mock.called)
search_file_mock.return_value = ''
#Call tested method
self.assertEquals(0, update_ambari_properties())
self.assertFalse(properties_mock.called)
pass
@patch("ambari_server.serverConfiguration.get_conf_dir")
def test_update_ambari_properties_without_some_properties(self, get_conf_dir_mock):
'''
Checks: update_ambari_properties call should add ambari-server.user property if
it's absent
'''
from ambari_server import serverConfiguration # need to modify constants inside the module
properties = ["server.jdbc.user.name=ambari-server\n",
"server.jdbc.user.passwd=/etc/ambari-server/conf/password.dat\n",
"java.home=/usr/jdk64/jdk1.6.0_31\n",
"server.os_type=redhat6\n"]
get_conf_dir_mock.return_value = '/etc/ambari-server/conf'
(tf1, fn1) = tempfile.mkstemp()
os.close(tf1)
(tf2, fn2) = tempfile.mkstemp()
os.close(tf2)
serverConfiguration.AMBARI_PROPERTIES_RPMSAVE_FILE = fn1
serverConfiguration.AMBARI_PROPERTIES_FILE = fn2
with open(serverConfiguration.AMBARI_PROPERTIES_RPMSAVE_FILE, 'w') as f:
for line in properties:
f.write(line)
#Call tested method
update_ambari_properties()
ambari_properties = Properties()
ambari_properties.load(open(fn2))
self.assertTrue(NR_USER_PROPERTY in ambari_properties.keys())
value = ambari_properties[NR_USER_PROPERTY]
self.assertEqual(value, "root")
self.assertTrue(OS_FAMILY_PROPERTY in ambari_properties.keys())
os.unlink(fn2)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_commons.firewall.run_os_command")
@patch("ambari_server.serverSetup.verify_setup_allowed")
@patch("sys.exit")
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.dbConfiguration.get_validated_string_input")
@patch("ambari_server.dbConfiguration_linux.get_YN_input")
@patch("ambari_server.dbConfiguration_linux.get_validated_string_input")
@patch("ambari_server.dbConfiguration_linux.PGConfig._store_remote_properties")
@patch("ambari_server.dbConfiguration_linux.LinuxDBMSConfig.ensure_jdbc_driver_installed")
@patch("ambari_server.dbConfiguration_linux.read_password")
@patch("ambari_server.serverSetup.check_jdbc_drivers")
@patch("ambari_server.serverSetup.is_root")
@patch("ambari_server.serverSetup.check_ambari_user")
@patch("ambari_server.serverSetup.download_and_install_jdk")
@patch("ambari_server.serverSetup.configure_os_settings")
@patch('__builtin__.raw_input')
@patch("ambari_server.serverSetup.disable_security_enhancements")
@patch("ambari_server.serverSetup.expand_jce_zip_file")
def test_setup_remote_db_wo_client(self, expand_jce_zip_file_mock, check_selinux_mock, raw_input, configure_os_settings_mock,
download_jdk_mock, check_ambari_user_mock, is_root_mock, check_jdbc_drivers_mock,
read_password_mock, ensure_jdbc_driver_installed_mock, store_remote_properties_mock,
get_validated_string_input_0_mock, get_YN_input_0_mock,
get_validated_string_input_mock, get_YN_input,
exit_mock, verify_setup_allowed_method,
run_os_command_mock):
args = MagicMock()
args.jdbc_driver = None
args.jdbc_db = None
args.silent = False
del args.dbms
del args.database_index
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
raw_input.return_value = ""
is_root_mock.return_value = True
check_selinux_mock.return_value = (0, "")
run_os_command_mock.return_value = 3,"",""
store_remote_properties_mock.return_value = 0
get_YN_input.return_value = True
get_validated_string_input_mock.side_effect = ["4"]
get_validated_string_input_0_mock.side_effect = ["localhost", "5432", "ambari", "ambari", "admin"]
get_YN_input_0_mock.return_value = False
read_password_mock.return_value = "encrypted_bigdata"
ensure_jdbc_driver_installed_mock.return_value = True
check_jdbc_drivers_mock.return_value = 0
check_ambari_user_mock.return_value = (0, False, 'user', None)
download_jdk_mock.return_value = 0
configure_os_settings_mock.return_value = 0
verify_setup_allowed_method.return_value = 0
expand_jce_zip_file_mock.return_value = 0
try:
setup(args)
self.fail("Should throw exception")
except NonFatalException as fe:
# Expected
self.assertTrue("Remote database setup aborted." in fe.reason)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_commons.firewall.run_os_command")
@patch("sys.exit")
@patch("ambari_server.userInput.get_YN_input")
@patch("ambari_commons.os_utils.is_root")
@patch("ambari_server.dbConfiguration_linux.store_password_file")
@patch("__builtin__.raw_input")
def test_store_remote_properties(self, raw_input_mock, store_password_file_mock,
is_root_mock, get_YN_input, exit_mock,
run_os_command_mock
):
raw_input_mock.return_value = ""
is_root_mock.return_value = True
get_YN_input.return_value = False
run_os_command_mock.return_value = 3,"",""
store_password_file_mock.return_value = "encrypted_bigdata"
import optparse
args = optparse.Values()
args.dbms = "oracle"
args.database_host = "localhost"
args.database_port = "1234"
args.database_name = "ambari"
args.postgres_schema = "ambari"
args.sid_or_sname = "foo"
args.database_username = "foo"
args.database_password = "foo"
properties0 = Properties()
properties = Properties()
factory = DBMSConfigFactory()
dbConfig = factory.create(args, properties0)
dbConfig._store_remote_properties(properties)
found = False
for n in properties.propertyNames():
if not found and n.startswith("server.jdbc.properties"):
found = True
self.assertTrue(found)
# verify that some properties exist
self.assertEquals("internal", properties.get_property(JDBC_CONNECTION_POOL_TYPE))
# now try with MySQL instead of Oracle to verify that the properties are different
args.dbms = "mysql"
args.database_index = 2
properties0 = Properties()
properties = Properties()
factory = DBMSConfigFactory()
dbConfig = factory.create(args, properties0)
dbConfig._store_remote_properties(properties)
# verify MySQL properties
self.assertEquals("c3p0", properties.get_property(JDBC_CONNECTION_POOL_TYPE))
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.find_properties_file")
def test_get_ambari_properties(self, find_properties_file_mock):
find_properties_file_mock.return_value = None
rcode = get_ambari_properties()
self.assertEqual(rcode, -1)
tf1 = tempfile.NamedTemporaryFile()
find_properties_file_mock.return_value = tf1.name
prop_name = 'name'
prop_value = 'val'
with open(tf1.name, 'w') as fout:
fout.write(prop_name + '=' + prop_value)
fout.close()
properties = get_ambari_properties()
self.assertEqual(properties[prop_name], prop_value)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.find_properties_file")
def test_get_ambari_properties(self, find_properties_file):
find_properties_file.return_value = None
rcode = get_ambari_properties()
self.assertEqual(rcode, -1)
tf1 = tempfile.NamedTemporaryFile(delete=False)
find_properties_file.return_value = tf1.name
tf1.close()
prop_name = 'name'
prop_value = 'val'
with open(tf1.name, 'w') as fout:
fout.write(prop_name + '=' + prop_value)
properties = get_ambari_properties()
self.assertEqual(properties[prop_name], prop_value)
self.assertEqual(properties.fileName, os.path.abspath(tf1.name))
sys.stdout = sys.__stdout__
pass
@patch("os.path.exists")
@patch("os.remove")
@patch("ambari_commons.os_utils.print_warning_msg")
def test_remove_file(self, printWarningMsgMock, removeMock, pathExistsMock):
def side_effect():
raise Exception(-1, "Failed to delete!")
removeMock.side_effect = side_effect
pathExistsMock.return_value = 1
res = remove_file("/someNonExsistantDir/filename")
self.assertEquals(res, 1)
removeMock.side_effect = None
res = remove_file("/someExsistantDir/filename")
self.assertEquals(res, 0)
@patch("shutil.copyfile")
def test_copy_file(self, shutilCopyfileMock):
def side_effect():
raise Exception(-1, "Failed to copy!")
shutilCopyfileMock.side_effect = side_effect
try:
copy_file("/tmp/psswd", "/someNonExsistantDir/filename")
self.fail("Exception on file not copied has not been thrown!")
except FatalException:
# Expected
pass
self.assertTrue(shutilCopyfileMock.called)
shutilCopyfileMock.side_effect = None
try:
copy_file("/tmp/psswd", "/root/psswd")
except FatalException:
self.fail("Exception on file copied should not be thrown!")
self.assertTrue(shutilCopyfileMock.called)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.dbConfiguration_linux.get_ambari_properties")
@patch("ambari_server.dbConfiguration_linux.print_error_msg")
@patch("ambari_server.dbConfiguration.print_error_msg")
@patch("ambari_server.dbConfiguration_linux.print_warning_msg")
@patch("__builtin__.raw_input")
@patch("glob.glob")
@patch("os.path.isdir")
@patch("os.path.lexists")
@patch("os.remove")
@patch("os.symlink")
@patch("shutil.copy")
def test_ensure_jdbc_drivers_installed(self, shutil_copy_mock, os_symlink_mock, os_remove_mock, lexists_mock, isdir_mock, glob_mock,
raw_input_mock, print_warning_msg, print_error_msg_mock, print_error_msg_2_mock,
get_ambari_properties_mock):
out = StringIO.StringIO()
sys.stdout = out
def reset_mocks():
get_ambari_properties_mock.reset_mock()
shutil_copy_mock.reset_mock()
print_error_msg_mock.reset_mock()
print_warning_msg.reset_mock()
raw_input_mock.reset_mock()
args = MagicMock()
del args.database_index
del args.persistence_type
del args.silent
del args.sid_or_sname
del args.jdbc_url
args.dbms = "oracle"
return args
# Check positive scenario
drivers_list = [os.path.join(os.sep,'usr','share','java','ojdbc6.jar')]
resources_dir = os.sep + 'tmp'
props = Properties()
props.process_pair(RESOURCES_DIR_PROPERTY, resources_dir)
get_ambari_properties_mock.return_value = props
factory = DBMSConfigFactory()
args = reset_mocks()
glob_mock.return_value = drivers_list
isdir_mock.return_value = True
lexists_mock.return_value = True
dbms = factory.create(args, props)
rcode = dbms.ensure_jdbc_driver_installed(props)
self.assertEquals(os_symlink_mock.call_count, 1)
self.assertEquals(os_symlink_mock.call_args_list[0][0][0], os.path.join(os.sep,'tmp','ojdbc6.jar'))
self.assertEquals(os_symlink_mock.call_args_list[0][0][1], os.path.join(os.sep,'tmp','oracle-jdbc-driver.jar'))
self.assertTrue(rcode)
self.assertEquals(shutil_copy_mock.call_count, 1)
self.assertEquals(shutil_copy_mock.call_args_list[0][0][0], drivers_list[0])
self.assertEquals(shutil_copy_mock.call_args_list[0][0][1], resources_dir)
# Check negative scenarios
# Silent option, no drivers
set_silent(True)
args = reset_mocks()
glob_mock.return_value = []
failed = False
try:
dbms = factory.create(args, props)
rcode = dbms.ensure_jdbc_driver_installed(props)
except FatalException:
failed = True
self.assertTrue(print_error_msg_mock.called)
self.assertTrue(failed)
# Non-Silent option, no drivers
set_silent(False)
args = reset_mocks()
glob_mock.return_value = []
failed = False
try:
dbms = factory.create(args, props)
rcode = dbms.ensure_jdbc_driver_installed(props)
except FatalException:
failed = True
self.assertTrue(failed)
self.assertTrue(print_error_msg_mock.called)
# Non-Silent option, no drivers at first ask, present drivers after that
args = reset_mocks()
glob_mock.side_effect = [[], drivers_list]
dbms = factory.create(args, props)
rcode = dbms.ensure_jdbc_driver_installed(props)
self.assertTrue(rcode)
self.assertEquals(shutil_copy_mock.call_count, 1)
self.assertEquals(shutil_copy_mock.call_args_list[0][0][0], drivers_list[0])
self.assertEquals(shutil_copy_mock.call_args_list[0][0][1], resources_dir)
# Non-Silent option, no drivers at first ask, no drivers after that
args = reset_mocks()
glob_mock.side_effect = [[], []]
failed = False
try:
dbms = factory.create(args, props)
rcode = dbms.ensure_jdbc_driver_installed(props)
except FatalException:
failed = True
self.assertTrue(failed)
self.assertTrue(print_error_msg_mock.called)
# Failed to copy_files
args = reset_mocks()
glob_mock.side_effect = [drivers_list]
try:
dbms = factory.create(args, props)
rcode = dbms.ensure_jdbc_driver_installed(props)
except FatalException:
failed = True
self.assertTrue(failed)
sys.stdout = sys.__stdout__
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.dbConfiguration.get_ambari_properties")
@patch("os.path.isdir")
@patch("os.path.isfile")
@patch("os.path.lexists")
@patch("os.remove")
@patch("os.symlink")
def test_check_jdbc_drivers(self, os_symlink_mock, os_remove_mock, lexists_mock, isfile_mock, isdir_mock,
get_ambari_properties_mock):
args = MagicMock()
# Check positive scenario
drivers_list = [os.path.join(os.sep,'usr','share','java','ojdbc6.jar')]
resources_dir = os.sep + 'tmp'
props = Properties()
props.process_pair(RESOURCES_DIR_PROPERTY, resources_dir)
get_ambari_properties_mock.return_value = props
isdir_mock.return_value = True
isfile_mock.side_effect = [True, False, False]
del args.database_index
del args.persistence_type
del args.silent
del args.sid_or_sname
del args.jdbc_url
lexists_mock.return_value = True
check_jdbc_drivers(args)
self.assertEquals(os_symlink_mock.call_count, 1)
self.assertEquals(os_symlink_mock.call_args_list[0][0][0], os.path.join(os.sep,'tmp','ojdbc6.jar'))
self.assertEquals(os_symlink_mock.call_args_list[0][0][1], os.path.join(os.sep,'tmp','oracle-jdbc-driver.jar'))
# Check negative scenarios
# No drivers deployed
get_ambari_properties_mock.reset_mock()
os_symlink_mock.reset_mock()
isfile_mock.side_effect = [False, False, False]
check_jdbc_drivers(args)
self.assertFalse(os_symlink_mock.called)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.find_properties_file")
def test_get_ambari_properties(self, find_properties_file_mock):
find_properties_file_mock.return_value = None
rcode = get_ambari_properties()
self.assertEqual(rcode, -1)
tf1 = tempfile.NamedTemporaryFile()
find_properties_file_mock.return_value = tf1.name
prop_name = 'name'
prop_value = 'val'
with open(tf1.name, 'w') as fout:
fout.write(prop_name + '=' + prop_value)
fout.close()
properties = get_ambari_properties()
self.assertEqual(properties[prop_name], prop_value)
self.assertEqual(properties.fileName, os.path.abspath(tf1.name))
sys.stdout = sys.__stdout__
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.find_properties_file")
def test_get_ambari_properties(self, find_properties_file_mock):
find_properties_file_mock.return_value = None
rcode = get_ambari_properties()
self.assertEqual(rcode, -1)
tf1 = tempfile.NamedTemporaryFile(delete=False)
find_properties_file_mock.return_value = tf1.name
prop_name = 'name'
prop_value = 'val'
tf1.close()
with open(tf1.name, 'w') as fout:
fout.write(prop_name + '=' + prop_value)
fout.close()
properties = get_ambari_properties()
self.assertEqual(properties[prop_name], prop_value)
self.assertEqual(properties.fileName, os.path.abspath(tf1.name))
sys.stdout = sys.__stdout__
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.check_database_name_property")
@patch("ambari_server.serverConfiguration.find_properties_file")
def test_parse_properties_file(self, find_properties_file_mock, check_database_name_property_mock):
check_database_name_property_mock.return_value = 1
tf1 = tempfile.NamedTemporaryFile(mode='r')
find_properties_file_mock.return_value = tf1.name
args = MagicMock()
parse_properties_file(args)
self.assertEquals(args.persistence_type, "local")
with open(tf1.name, 'w') as fout:
fout.write("\n")
fout.write(PERSISTENCE_TYPE_PROPERTY + "=remote")
args = MagicMock()
parse_properties_file(args)
self.assertEquals(args.persistence_type, "remote")
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("os.path.isabs")
@patch("ambari_server.dbConfiguration.decrypt_password_for_alias")
@patch("ambari_server.dbConfiguration_linux.get_ambari_properties")
def test_configure_database_username_password_masterkey_persisted(self,
get_ambari_properties_method,
decrypt_password_for_alias_method,
path_isabs_method):
out = StringIO.StringIO()
sys.stdout = out
properties = Properties()
properties.process_pair(JDBC_USER_NAME_PROPERTY, "fakeuser")
properties.process_pair(JDBC_PASSWORD_PROPERTY, "${alias=somealias}")
properties.process_pair(JDBC_DATABASE_NAME_PROPERTY, "fakedbname")
properties.process_pair(SECURITY_KEY_IS_PERSISTED, "True")
get_ambari_properties_method.return_value = properties
decrypt_password_for_alias_method.return_value = "falepasswd"
args = MagicMock()
args.master_key = None
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.sid_or_sname
del args.jdbc_url
dbms = OracleConfig(args, properties, "local")
self.assertTrue(decrypt_password_for_alias_method.called)
self.assertFalse(path_isabs_method.called)
self.assertEquals("fakeuser", dbms.database_username)
self.assertEquals("falepasswd", dbms.database_password)
sys.stdout = sys.__stdout__
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.dbConfiguration_linux.read_password")
def test_configure_database_password(self, read_password_method):
out = StringIO.StringIO()
sys.stdout = out
read_password_method.return_value = "fakepasswd"
result = LinuxDBMSConfig._configure_database_password(True)
self.assertTrue(read_password_method.called)
self.assertEquals("fakepasswd", result)
result = LinuxDBMSConfig._configure_database_password(True)
self.assertEquals("fakepasswd", result)
result = LinuxDBMSConfig._configure_database_password(True)
self.assertEquals("fakepasswd", result)
sys.stdout = sys.__stdout__
pass
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.remove_password_file")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_master_key")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.get_master_key_location")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
def test_setup_master_key_not_persist(self, is_root_method,
get_ambari_properties_method, search_file_message,
get_YN_input_method, save_master_key_method,
update_properties_method, get_master_key_location_method,
read_ambari_user_method, read_master_key_method,
save_passwd_for_alias_method, remove_password_file_method,
get_is_persisted_method, get_is_secure_method, exists_mock):
is_root_method.return_value = True
p = Properties()
FAKE_PWD_STRING = "fakepasswd"
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(LDAP_MGR_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
read_master_key_method.return_value = "aaa"
get_YN_input_method.return_value = False
read_ambari_user_method.return_value = None
save_passwd_for_alias_method.return_value = 0
get_is_persisted_method.return_value = (True, "filepath")
get_is_secure_method.return_value = False
exists_mock.return_value = False
setup_master_key()
self.assertTrue(get_YN_input_method.called)
self.assertTrue(read_master_key_method.called)
self.assertTrue(read_ambari_user_method.called)
self.assertTrue(update_properties_method.called)
self.assertFalse(save_master_key_method.called)
self.assertTrue(save_passwd_for_alias_method.called)
self.assertEquals(3, save_passwd_for_alias_method.call_count)
self.assertTrue(remove_password_file_method.called)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
LDAP_MGR_PASSWORD_PROPERTY:
get_alias_string(LDAP_MGR_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.read_master_key")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.get_master_key_location")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.serverConfiguration.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
def test_setup_master_key_persist(self, is_root_method,
get_ambari_properties_method, search_file_message,
get_YN_input_method, save_master_key_method,
update_properties_method, get_master_key_location_method,
read_ambari_user_method, read_master_key_method,
get_is_persisted_method, get_is_secure_method, exists_mock,
save_passwd_for_alias_method):
is_root_method.return_value = True
p = Properties()
FAKE_PWD_STRING = "fakepasswd"
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
search_file_message.return_value = "propertiesfile"
read_master_key_method.return_value = "aaa"
get_YN_input_method.side_effect = [True, False]
read_ambari_user_method.return_value = None
get_is_persisted_method.return_value = (True, "filepath")
get_is_secure_method.return_value = False
exists_mock.return_value = False
save_passwd_for_alias_method.return_value = 0
setup_master_key()
self.assertTrue(get_YN_input_method.called)
self.assertTrue(read_master_key_method.called)
self.assertTrue(read_ambari_user_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(save_master_key_method.called)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.read_master_key")
@patch("ambari_server.setupSecurity.remove_password_file")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.get_master_key_location")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
def test_reset_master_key_persisted(self, is_root_method,
get_ambari_properties_method, search_file_message,
get_YN_input_method, get_validated_string_input_method,
save_master_key_method, update_properties_method,
read_passwd_for_alias_method, save_passwd_for_alias_method,
get_master_key_location_method,
read_ambari_user_method, exists_mock,
remove_password_file_method, read_master_key_method):
# Testing call under non-root
is_root_method.return_value = False
try:
setup_master_key()
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue("root-level" in fe.reason)
pass
# Testing call under root
is_root_method.return_value = True
search_file_message.return_value = "filepath"
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(LDAP_MGR_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
get_YN_input_method.side_effect = [True, True]
read_master_key_method.return_value = "aaa"
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
setup_master_key()
self.assertTrue(save_master_key_method.called)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(read_master_key_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(read_passwd_for_alias_method.called)
self.assertTrue(3, read_passwd_for_alias_method.call_count)
self.assertTrue(3, save_passwd_for_alias_method.call_count)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
LDAP_MGR_PASSWORD_PROPERTY:
get_alias_string(LDAP_MGR_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@patch("ambari_server.setupSecurity.get_is_persisted")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.remove_password_file")
@patch("os.path.exists")
@patch("ambari_server.setupSecurity.read_ambari_user")
@patch("ambari_server.setupSecurity.get_master_key_location")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.read_passwd_for_alias")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.save_master_key")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
def test_reset_master_key_not_persisted(self, is_root_method,
get_ambari_properties_method,
search_file_message, get_YN_input_method,
get_validated_string_input_method, save_master_key_method,
update_properties_method, read_passwd_for_alias_method,
save_passwd_for_alias_method,
get_master_key_location_method, read_ambari_user_method,
exists_mock, remove_password_file_method, get_is_secure_method,
get_is_persisted_method):
is_root_method.return_value = True
search_file_message.return_value = False
read_ambari_user_method.return_value = None
p = Properties()
FAKE_PWD_STRING = '${alias=fakealias}'
p.process_pair(JDBC_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(LDAP_MGR_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(SSL_TRUSTSTORE_PASSWORD_PROPERTY, FAKE_PWD_STRING)
p.process_pair(JDBC_RCA_PASSWORD_FILE_PROPERTY, FAKE_PWD_STRING)
get_ambari_properties_method.return_value = p
get_YN_input_method.side_effect = [True, False]
get_validated_string_input_method.return_value = "aaa"
read_passwd_for_alias_method.return_value = "fakepassword"
save_passwd_for_alias_method.return_value = 0
exists_mock.return_value = False
get_is_secure_method.return_value = True
get_is_persisted_method.return_value = (True, "filePath")
setup_master_key()
self.assertFalse(save_master_key_method.called)
self.assertTrue(get_YN_input_method.called)
self.assertTrue(get_validated_string_input_method.called)
self.assertTrue(update_properties_method.called)
self.assertTrue(read_passwd_for_alias_method.called)
self.assertTrue(3, read_passwd_for_alias_method.call_count)
self.assertTrue(3, save_passwd_for_alias_method.call_count)
self.assertFalse(save_master_key_method.called)
result_expected = {JDBC_PASSWORD_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
JDBC_RCA_PASSWORD_FILE_PROPERTY:
get_alias_string(JDBC_RCA_PASSWORD_ALIAS),
LDAP_MGR_PASSWORD_PROPERTY:
get_alias_string(LDAP_MGR_PASSWORD_ALIAS),
SSL_TRUSTSTORE_PASSWORD_PROPERTY:
get_alias_string(SSL_TRUSTSTORE_PASSWORD_ALIAS),
SECURITY_IS_ENCRYPTION_ENABLED: 'true'}
sorted_x = sorted(result_expected.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
pass
@staticmethod
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def _init_test_ldap_properties_map_invalid_input_1():
ldap_properties_map = \
{
LDAP_PRIMARY_URL_PROPERTY: "a:3",
"authentication.ldap.secondaryUrl": "b:2",
"authentication.ldap.useSSL": "false",
"authentication.ldap.usernameAttribute": "user",
"authentication.ldap.baseDn": "uid",
"authentication.ldap.bindAnonymously": "true",
"authentication.ldap.referral": "follow",
"client.security": "ldap",
"ambari.ldap.isConfigured": "true"
}
return ldap_properties_map
@staticmethod
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def _init_test_ldap_properties_map_invalid_input_1():
ldap_properties_map = \
{
LDAP_PRIMARY_URL_PROPERTY: "a:3",
"authentication.ldap.secondaryUrl": "b:2",
"authentication.ldap.useSSL": "false",
"authentication.ldap.userObjectClass": "user",
"authentication.ldap.usernameAttribute": "uid",
"authentication.ldap.groupObjectClass": "group",
"authentication.ldap.groupNamingAttr": "cn",
"authentication.ldap.groupMembershipAttr": "member",
"authentication.ldap.dnAttribute": "dn",
"authentication.ldap.baseDn": "base",
"authentication.ldap.referral": "follow",
"authentication.ldap.bindAnonymously": "true",
"client.security": "ldap",
"ambari.ldap.isConfigured": "true"
}
return ldap_properties_map
@staticmethod
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def _init_test_ldap_properties_map_invalid_input_2():
ldap_properties_map = \
{
LDAP_PRIMARY_URL_PROPERTY: "a:3",
"authentication.ldap.useSSL": "false",
"authentication.ldap.usernameAttribute": "user",
"authentication.ldap.baseDn": "uid",
"authentication.ldap.bindAnonymously": "true",
"authentication.ldap.referral": "follow",
"client.security": "ldap",
"ambari.ldap.isConfigured": "true"
}
return ldap_properties_map
@staticmethod
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def _init_test_ldap_properties_map_invalid_input_2():
ldap_properties_map = \
{
LDAP_PRIMARY_URL_PROPERTY: "a:3",
"authentication.ldap.useSSL": "false",
"authentication.ldap.userObjectClass": "user",
"authentication.ldap.usernameAttribute": "uid",
"authentication.ldap.groupObjectClass": "group",
"authentication.ldap.groupNamingAttr": "cn",
"authentication.ldap.groupMembershipAttr": "member",
"authentication.ldap.dnAttribute": "dn",
"authentication.ldap.baseDn": "base",
"authentication.ldap.referral": "follow",
"authentication.ldap.bindAnonymously": "true",
"client.security": "ldap",
"ambari.ldap.isConfigured": "true"
}
return ldap_properties_map
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("__builtin__.raw_input")
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
def test_setup_ldap_invalid_input(self, is_root_method, get_ambari_properties_method,
search_file_message,
update_properties_method,
get_YN_input_method,
get_is_secure_method,
raw_input_mock):
out = StringIO.StringIO()
sys.stdout = out
is_root_method.return_value = True
search_file_message.return_value = "filepath"
configs = {SECURITY_MASTER_KEY_LOCATION: "filepath",
SECURITY_KEYS_DIR: tempfile.gettempdir(),
SECURITY_IS_ENCRYPTION_ENABLED: "true"
}
get_ambari_properties_method.return_value = configs
raw_input_mock.side_effect = ['a:3', 'b:b', 'hody', 'b:2', 'false', 'user', 'uid', 'group', 'cn', 'member', 'dn', 'base', 'follow', 'true']
set_silent(False)
get_YN_input_method.return_value = True
setup_ldap()
ldap_properties_map = TestAmbariServer._init_test_ldap_properties_map_invalid_input_1()
sorted_x = sorted(ldap_properties_map.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
self.assertTrue(get_YN_input_method.called)
self.assertEquals(14, raw_input_mock.call_count)
raw_input_mock.reset_mock()
raw_input_mock.side_effect = ['a:3', '', 'b:2', 'false', 'user', 'uid', 'group', 'cn', 'member', 'dn', 'base', 'follow', 'true']
setup_ldap()
ldap_properties_map = TestAmbariServer._init_test_ldap_properties_map_invalid_input_2()
sorted_x = sorted(ldap_properties_map.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
self.assertEquals(13, raw_input_mock.call_count)
sys.stdout = sys.__stdout__
pass
@staticmethod
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def _init_test_ldap_properties_map():
ldap_properties_map = \
{
"authentication.ldap.primaryUrl": "test",
"authentication.ldap.secondaryUrl": "test",
"authentication.ldap.useSSL": "false",
"authentication.ldap.usernameAttribute": "test",
"authentication.ldap.baseDn": "test",
"authentication.ldap.bindAnonymously": "false",
"authentication.ldap.managerDn": "test",
"authentication.ldap.referral": "test",
"client.security": "ldap",
LDAP_MGR_PASSWORD_PROPERTY: "ldap-password.dat",
"ambari.ldap.isConfigured": "true"
}
return ldap_properties_map
@staticmethod
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def _init_test_ldap_properties_map():
ldap_properties_map = \
{
"authentication.ldap.primaryUrl": "test",
"authentication.ldap.secondaryUrl": "test",
"authentication.ldap.useSSL": "false",
"authentication.ldap.userObjectClass": "test",
"authentication.ldap.usernameAttribute": "test",
"authentication.ldap.baseDn": "test",
"authentication.ldap.bindAnonymously": "false",
"authentication.ldap.managerDn": "test",
"authentication.ldap.groupObjectClass": "test",
"authentication.ldap.groupMembershipAttr": "test",
"authentication.ldap.groupNamingAttr": "test",
"authentication.ldap.dnAttribute": "test",
"authentication.ldap.referral": "test",
"client.security": "ldap",
LDAP_MGR_PASSWORD_PROPERTY: "ldap-password.dat",
"ambari.ldap.isConfigured": "true"
}
return ldap_properties_map
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.setupSecurity.get_is_secure")
@patch("ambari_server.setupSecurity.encrypt_password")
@patch("ambari_server.setupSecurity.save_passwd_for_alias")
@patch("ambari_server.setupSecurity.get_YN_input")
@patch("ambari_server.setupSecurity.update_properties_2")
@patch("ambari_server.setupSecurity.configure_ldap_password")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.serverConfiguration.search_file")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.read_password")
@patch("os.path.exists")
def test_setup_ldap(self, exists_method, read_password_method, is_root_method, get_ambari_properties_method,
search_file_message,
get_validated_string_input_method,
configure_ldap_password_method, update_properties_method,
get_YN_input_method, save_passwd_for_alias_method,
encrypt_password_method, get_is_secure_method):
out = StringIO.StringIO()
sys.stdout = out
# Testing call under non-root
is_root_method.return_value = False
try:
setup_ldap()
self.fail("Should throw exception")
except FatalException as fe:
# Expected
self.assertTrue("root-level" in fe.reason)
pass
# Testing call under root
is_root_method.return_value = True
search_file_message.return_value = "filepath"
configs = {SECURITY_MASTER_KEY_LOCATION: "filepath",
SECURITY_KEYS_DIR: tempfile.gettempdir(),
SECURITY_IS_ENCRYPTION_ENABLED: "true"
}
get_ambari_properties_method.return_value = configs
configure_ldap_password_method.return_value = "password"
save_passwd_for_alias_method.return_value = 0
encrypt_password_method.return_value = get_alias_string(LDAP_MGR_PASSWORD_ALIAS)
def yn_input_side_effect(*args, **kwargs):
if 'TrustStore' in args[0]:
return False
else:
return True
#get_YN_input_method.side_effect = yn_input_side_effect()
get_YN_input_method.side_effect = [True, ]
def valid_input_side_effect(*args, **kwargs):
if 'Bind anonymously' in args[0]:
return 'false'
if args[1] == "true" or args[1] == "false":
return args[1]
else:
return "test"
get_validated_string_input_method.side_effect = valid_input_side_effect
setup_ldap()
ldap_properties_map = TestAmbariServer._init_test_ldap_properties_map()
sorted_x = sorted(ldap_properties_map.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
self.assertEquals(sorted_x, sorted_y)
self.assertTrue(update_properties_method.called)
self.assertTrue(configure_ldap_password_method.called)
self.assertTrue(get_validated_string_input_method.called)
self.assertTrue(get_YN_input_method.called)
# truststore not found case
def os_path_exists(*args, **kwargs):
if "bogus" in args[0]:
return False
else:
return True
pass
def input_enable_ssl(*args, **kwargs):
if 'Bind anonymously' in args[0]:
return 'false'
if "SSL" in args[0]:
return "true"
if "Path to TrustStore file" in args[0]:
if input_enable_ssl.path_counter < 2:
input_enable_ssl.path_counter += 1
return "bogus"
else:
return "valid"
if args[1] == "true" or args[1] == "false":
return args[1]
else:
return "test"
pass
input_enable_ssl.path_counter = 0
exists_method.side_effect = os_path_exists
get_validated_string_input_method.side_effect = input_enable_ssl
read_password_method.return_value = "password"
get_YN_input_method.reset_mock()
get_YN_input_method.side_effect = [True, True]
update_properties_method.reset_mock()
setup_ldap()
self.assertTrue(read_password_method.called)
ldap_properties_map = \
{
"authentication.ldap.primaryUrl": "test",
"authentication.ldap.secondaryUrl": "test",
"authentication.ldap.useSSL": "true",
"authentication.ldap.usernameAttribute": "test",
"authentication.ldap.baseDn": "test",
"authentication.ldap.dnAttribute": "test",
"authentication.ldap.bindAnonymously": "false",
"authentication.ldap.managerDn": "test",
"client.security": "ldap",
"ssl.trustStore.type": "test",
"ssl.trustStore.path": "valid",
"ssl.trustStore.password": "password",
LDAP_MGR_PASSWORD_PROPERTY: get_alias_string(LDAP_MGR_PASSWORD_ALIAS)
}
sorted_x = sorted(ldap_properties_map.iteritems(), key=operator.itemgetter(0))
sorted_y = sorted(update_properties_method.call_args[0][1].iteritems(),
key=operator.itemgetter(0))
sys.stdout = sys.__stdout__
pass
@patch("urllib2.urlopen")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_server_runing")
@patch("ambari_server.setupSecurity.is_root")
def test_ldap_sync_all(self, is_root_method, is_server_runing_mock, get_ambari_properties_mock,
get_validated_string_input_mock, urlopen_mock):
is_root_method.return_value = True
is_server_runing_mock.return_value = (True, 0)
properties = Properties()
properties.process_pair(IS_LDAP_CONFIGURED, 'true')
properties.process_pair(CLIENT_API_PORT_PROPERTY, '8080')
get_ambari_properties_mock.return_value = properties
get_validated_string_input_mock.side_effect = ['admin', 'admin']
response = MagicMock()
response.getcode.side_effect = [201, 200, 200]
response.read.side_effect = ['{"resources" : [{"href" : "http://c6401.ambari.apache.org:8080/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
'{"Event":{"status" : "RUNNING","summary" : {"groups" : {"created" : 0,"removed" : 0,"updated" : 0},"memberships" : {"created" : 0,"removed" : 0},"users" : {"created" : 0,"removed" : 0,"updated" : 0}}}}',
'{"Event":{"status" : "COMPLETE","summary" : {"groups" : {"created" : 1,"removed" : 0,"updated" : 0},"memberships" : {"created" : 5,"removed" : 0},"users" : {"created" : 5,"removed" : 0,"updated" : 0}}}}']
urlopen_mock.return_value = response
options = MagicMock()
options.ldap_sync_all = True
options.ldap_sync_existing = False
options.ldap_sync_users = None
options.ldap_sync_groups = None
sync_ldap(options)
url = '{0}://{1}:{2!s}{3}'.format('http', '127.0.0.1', '8080', '/api/v1/ldap_sync_events')
request = urlopen_mock.call_args_list[0][0][0]
self.assertEquals(url, str(request.get_full_url()))
self.assertEquals('[{"Event": {"specs": [{"principal_type": "users", "sync_type": "all"}, {"principal_type": "groups", "sync_type": "all"}]}}]', request.data)
self.assertTrue(response.getcode.called)
self.assertTrue(response.read.called)
pass
@patch("__builtin__.open")
@patch("os.path.exists")
@patch("urllib2.urlopen")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_server_runing")
@patch("ambari_server.setupSecurity.is_root")
def test_ldap_sync_users(self, is_root_method, is_server_runing_mock, get_ambari_properties_mock,
get_validated_string_input_mock, urlopen_mock, os_path_exists_mock, open_mock):
os_path_exists_mock.return_value = 1
f = MagicMock()
f.__enter__().read.return_value = "bob, tom"
open_mock.return_value = f
is_root_method.return_value = True
is_server_runing_mock.return_value = (True, 0)
properties = Properties()
properties.process_pair(IS_LDAP_CONFIGURED, 'true')
get_ambari_properties_mock.return_value = properties
get_validated_string_input_mock.side_effect = ['admin', 'admin']
response = MagicMock()
response.getcode.side_effect = [201, 200, 200]
response.read.side_effect = ['{"resources" : [{"href" : "http://c6401.ambari.apache.org:8080/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
'{"Event":{"status" : "RUNNING","summary" : {"groups" : {"created" : 0,"removed" : 0,"updated" : 0},"memberships" : {"created" : 0,"removed" : 0},"users" : {"created" : 0,"removed" : 0,"updated" : 0}}}}',
'{"Event":{"status" : "COMPLETE","summary" : {"groups" : {"created" : 1,"removed" : 0,"updated" : 0},"memberships" : {"created" : 5,"removed" : 0},"users" : {"created" : 5,"removed" : 0,"updated" : 0}}}}']
urlopen_mock.return_value = response
options = MagicMock()
options.ldap_sync_all = False
options.ldap_sync_existing = False
options.ldap_sync_users = 'users.txt'
options.ldap_sync_groups = None
sync_ldap(options)
request = urlopen_mock.call_args_list[0][0][0]
self.assertEquals('[{"Event": {"specs": [{"principal_type": "users", "sync_type": "specific", "names": "bob, tom"}]}}]', request.data)
self.assertTrue(response.getcode.called)
self.assertTrue(response.read.called)
pass
@patch("__builtin__.open")
@patch("os.path.exists")
@patch("urllib2.urlopen")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_server_runing")
@patch("ambari_server.setupSecurity.is_root")
def test_ldap_sync_groups(self, is_root_method, is_server_runing_mock, get_ambari_properties_mock,
get_validated_string_input_mock, urlopen_mock, os_path_exists_mock, open_mock):
os_path_exists_mock.return_value = 1
f = MagicMock()
f.__enter__().read.return_value = "group1, group2"
open_mock.return_value = f
is_root_method.return_value = True
is_server_runing_mock.return_value = (True, 0)
properties = Properties()
properties.process_pair(IS_LDAP_CONFIGURED, 'true')
get_ambari_properties_mock.return_value = properties
get_validated_string_input_mock.side_effect = ['admin', 'admin']
response = MagicMock()
response.getcode.side_effect = [201, 200, 200]
response.read.side_effect = ['{"resources" : [{"href" : "http://c6401.ambari.apache.org:8080/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
'{"Event":{"status" : "RUNNING","summary" : {"groups" : {"created" : 0,"removed" : 0,"updated" : 0},"memberships" : {"created" : 0,"removed" : 0},"users" : {"created" : 0,"removed" : 0,"updated" : 0}}}}',
'{"Event":{"status" : "COMPLETE","summary" : {"groups" : {"created" : 1,"removed" : 0,"updated" : 0},"memberships" : {"created" : 5,"removed" : 0},"users" : {"created" : 5,"removed" : 0,"updated" : 0}}}}']
urlopen_mock.return_value = response
options = MagicMock()
options.ldap_sync_all = False
options.ldap_sync_existing = False
options.ldap_sync_users = None
options.ldap_sync_groups = 'groups.txt'
sync_ldap(options)
request = urlopen_mock.call_args_list[0][0][0]
self.assertEquals('[{"Event": {"specs": [{"principal_type": "groups", "sync_type": "specific", "names": "group1, group2"}]}}]', request.data)
self.assertTrue(response.getcode.called)
self.assertTrue(response.read.called)
pass
@patch("urllib2.urlopen")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_server_runing")
@patch("ambari_server.setupSecurity.is_root")
def test_ldap_sync_ssl(self, is_root_method, is_server_runing_mock, get_ambari_properties_mock,
get_validated_string_input_mock, urlopen_mock):
is_root_method.return_value = True
is_server_runing_mock.return_value = (True, 0)
properties = Properties()
properties.process_pair(IS_LDAP_CONFIGURED, 'true')
properties.process_pair(SSL_API, 'true')
properties.process_pair(SSL_API_PORT, '8443')
get_ambari_properties_mock.return_value = properties
get_validated_string_input_mock.side_effect = ['admin', 'admin']
response = MagicMock()
response.getcode.side_effect = [201, 200, 200]
response.read.side_effect = ['{"resources" : [{"href" : "https://c6401.ambari.apache.org:8443/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
'{"Event":{"status" : "RUNNING","summary" : {"groups" : {"created" : 0,"removed" : 0,"updated" : 0},"memberships" : {"created" : 0,"removed" : 0},"users" : {"created" : 0,"removed" : 0,"updated" : 0}}}}',
'{"Event":{"status" : "COMPLETE","summary" : {"groups" : {"created" : 1,"removed" : 0,"updated" : 0},"memberships" : {"created" : 5,"removed" : 0},"users" : {"created" : 5,"removed" : 0,"updated" : 0}}}}']
urlopen_mock.return_value = response
options = MagicMock()
options.ldap_sync_all = True
options.ldap_sync_existing = False
options.ldap_sync_users = None
options.ldap_sync_groups = None
sync_ldap(options)
url = '{0}://{1}:{2!s}{3}'.format('https', '127.0.0.1', '8443', '/api/v1/ldap_sync_events')
request = urlopen_mock.call_args_list[0][0][0]
self.assertEquals(url, str(request.get_full_url()))
self.assertTrue(response.getcode.called)
self.assertTrue(response.read.called)
pass
@patch("urllib2.urlopen")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_server_runing")
@patch("ambari_server.setupSecurity.is_root")
def test_ldap_sync_existing(self, is_root_method, is_server_runing_mock, get_ambari_properties_mock,
get_validated_string_input_mock, urlopen_mock):
is_root_method.return_value = True
is_server_runing_mock.return_value = (True, 0)
properties = Properties()
properties.process_pair(IS_LDAP_CONFIGURED, 'true')
get_ambari_properties_mock.return_value = properties
get_validated_string_input_mock.side_effect = ['admin', 'admin']
response = MagicMock()
response.getcode.side_effect = [201, 200, 200]
response.read.side_effect = ['{"resources" : [{"href" : "http://c6401.ambari.apache.org:8080/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
'{"Event":{"status" : "RUNNING","summary" : {"groups" : {"created" : 0,"removed" : 0,"updated" : 0},"memberships" : {"created" : 0,"removed" : 0},"users" : {"created" : 0,"removed" : 0,"updated" : 0}}}}',
'{"Event":{"status" : "COMPLETE","summary" : {"groups" : {"created" : 1,"removed" : 0,"updated" : 0},"memberships" : {"created" : 5,"removed" : 0},"users" : {"created" : 5,"removed" : 0,"updated" : 0}}}}']
urlopen_mock.return_value = response
options = MagicMock()
options.ldap_sync_all = False
options.ldap_sync_existing = True
options.ldap_sync_users = None
options.ldap_sync_groups = None
sync_ldap(options)
self.assertTrue(response.getcode.called)
self.assertTrue(response.read.called)
pass
@patch("urllib2.urlopen")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_server_runing")
@patch("ambari_server.setupSecurity.is_root")
def test_ldap_sync_no_sync_mode(self, is_root_method, is_server_runing_mock, get_ambari_properties_mock,
get_validated_string_input_mock, urlopen_mock):
is_root_method.return_value = True
is_server_runing_mock.return_value = (True, 0)
properties = Properties()
properties.process_pair(IS_LDAP_CONFIGURED, 'true')
get_ambari_properties_mock.return_value = properties
get_validated_string_input_mock.side_effect = ['admin', 'admin']
response = MagicMock()
response.getcode.side_effect = [201, 200, 200]
response.read.side_effect = ['{"resources" : [{"href" : "http://c6401.ambari.apache.org:8080/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
'{"Event":{"status" : "RUNNING","summary" : {"groups" : {"created" : 0,"removed" : 0,"updated" : 0},"memberships" : {"created" : 0,"removed" : 0},"users" : {"created" : 0,"removed" : 0,"updated" : 0}}}}',
'{"Event":{"status" : "COMPLETE","summary" : {"groups" : {"created" : 1,"removed" : 0,"updated" : 0},"memberships" : {"created" : 5,"removed" : 0},"users" : {"created" : 5,"removed" : 0,"updated" : 0}}}}']
urlopen_mock.return_value = response
options = MagicMock()
del options.ldap_sync_all
del options.ldap_sync_existing
del options.ldap_sync_users
del options.ldap_sync_groups
try:
sync_ldap(options)
self.fail("Should fail with exception")
except FatalException as e:
pass
pass
@patch("urllib2.urlopen")
@patch("ambari_server.setupSecurity.get_validated_string_input")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.is_server_runing")
@patch("ambari_server.setupSecurity.is_root")
def test_ldap_sync_error_status(self, is_root_method, is_server_runing_mock, get_ambari_properties_mock,
get_validated_string_input_mock, urlopen_mock):
is_root_method.return_value = True
is_server_runing_mock.return_value = (True, 0)
properties = Properties()
properties.process_pair(IS_LDAP_CONFIGURED, 'true')
get_ambari_properties_mock.return_value = properties
get_validated_string_input_mock.side_effect = ['admin', 'admin']
response = MagicMock()
response.getcode.side_effect = [201, 200]
response.read.side_effect = ['{"resources" : [{"href" : "http://c6401.ambari.apache.org:8080/api/v1/ldap_sync_events/16","Event" : {"id" : 16}}]}',
'{"Event":{"status" : "ERROR","status_detail" : "Error!!","summary" : {"groups" : {"created" : 0,"removed" : 0,"updated" : 0},"memberships" : {"created" : 0,"removed" : 0},"users" : {"created" : 0,"removed" : 0,"updated" : 0}}}}']
urlopen_mock.return_value = response
options = MagicMock()
options.ldap_sync_all = False
options.ldap_sync_existing = False
options.ldap_sync_users = None
options.ldap_sync_groups = None
try:
sync_ldap(options)
self.fail("Should fail with exception")
except FatalException as e:
pass
pass
@patch("urllib2.urlopen")
@patch("urllib2.Request")
@patch("base64.encodestring")
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.is_server_runing")
@patch("ambari_server.setupSecurity.get_ambari_properties")
@patch("ambari_server.setupSecurity.get_validated_string_input")
def test_sync_ldap_forbidden(self, get_validated_string_input_method, get_ambari_properties_method,
is_server_runing_method, is_root_method,
encodestring_method, request_constructor, urlopen_method):
options = MagicMock()
options.ldap_sync_all = True
options.ldap_sync_existing = False
options.ldap_sync_users = None
options.ldap_sync_groups = None
is_root_method.return_value = False
try:
sync_ldap(options)
self.fail("Should throw exception if not root")
except FatalException as fe:
# Expected
self.assertTrue("root-level" in fe.reason)
pass
is_root_method.return_value = True
is_server_runing_method.return_value = (None, None)
try:
sync_ldap(options)
self.fail("Should throw exception if ambari is stopped")
except FatalException as fe:
# Expected
self.assertTrue("not running" in fe.reason)
pass
is_server_runing_method.return_value = (True, None)
configs = MagicMock()
configs.get_property.return_value = None
get_ambari_properties_method.return_value = configs
try:
sync_ldap(options)
self.fail("Should throw exception if ldap is not configured")
except FatalException as fe:
# Expected
self.assertTrue("not configured" in fe.reason)
pass
configs.get_property.return_value = 'true'
get_validated_string_input_method.return_value = 'admin'
encodestring_method.return_value = 'qwe123'
requestMocks = [MagicMock()]
request_constructor.side_effect = requestMocks
response = MagicMock()
response.getcode.return_value = 403
urlopen_method.return_value = response
try:
sync_ldap(options)
self.fail("Should throw exception if return code != 200")
except FatalException as fe:
# Expected
self.assertTrue("status code" in fe.reason)
pass
pass
@patch("ambari_server.setupSecurity.is_root")
def test_sync_ldap_ambari_stopped(self, is_root_method):
is_root_method.return_value = False
options = MagicMock()
options.ldap_sync_all = True
options.ldap_sync_existing = False
options.ldap_sync_users = None
options.ldap_sync_groups = None
try:
sync_ldap(options)
self.fail("Should throw exception if not root")
except FatalException as fe:
# Expected
self.assertTrue("root-level" in fe.reason)
pass
pass
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.is_server_runing")
def test_sync_ldap_ambari_stopped(self, is_server_runing_method, is_root_method):
is_root_method.return_value = True
is_server_runing_method.return_value = (None, None)
options = MagicMock()
options.ldap_sync_all = True
options.ldap_sync_existing = False
options.ldap_sync_users = None
options.ldap_sync_groups = None
try:
sync_ldap(options)
self.fail("Should throw exception if ambari is stopped")
except FatalException as fe:
# Expected
self.assertTrue("not running" in fe.reason)
pass
pass
@patch("ambari_server.setupSecurity.is_root")
@patch("ambari_server.setupSecurity.is_server_runing")
@patch("ambari_server.setupSecurity.get_ambari_properties")
def test_sync_ldap_not_configured(self, get_ambari_properties_method,
is_server_runing_method, is_root_method):
is_root_method.return_value = True
is_server_runing_method.return_value = (True, None)
configs = MagicMock()
configs.get_property.return_value = None
get_ambari_properties_method.return_value = configs
options = MagicMock()
options.ldap_sync_all = True
del options.ldap_sync_existing
del options.ldap_sync_users
del options.ldap_sync_groups
try:
sync_ldap(options)
self.fail("Should throw exception if ldap is not configured")
except FatalException as fe:
# Expected
self.assertTrue("not configured" in fe.reason)
pass
pass
@patch("__builtin__.open")
@patch("os.path.exists")
def test_get_ldap_event_spec_names(self, os_path_exists_mock, open_mock):
os_path_exists_mock.return_value = 1
f = MagicMock()
f.__enter__().read.return_value = "\n\n\t some group, \tanother group, \n\t\tgrp, \ngroup*\n\n\n\n"
open_mock.return_value = f
bodies = [{"Event":{"specs":[]}}]
body = bodies[0]
events = body['Event']
specs = events['specs']
new_specs = [{"principal_type":"groups","sync_type":"specific","names":""}]
get_ldap_event_spec_names("groups.txt", specs, new_specs)
self.assertEquals("[{'Event': {'specs': [{'principal_type': 'groups', 'sync_type': 'specific', 'names': ' some group, another group, grp, group*'}]}}]", str(bodies))
pass
@patch("ambari_server.setupSecurity.read_password")
def test_configure_ldap_password(self, read_password_method):
out = StringIO.StringIO()
sys.stdout = out
read_password_method.return_value = "blah"
configure_ldap_password()
self.assertTrue(read_password_method.called)
sys.stdout = sys.__stdout__
pass
@patch("ambari_server.userInput.get_validated_string_input")
def test_read_password(self, get_validated_string_input_method):
out = StringIO.StringIO()
sys.stdout = out
passwordDefault = ""
passwordPrompt = 'Enter Manager Password* : '
passwordPattern = ".*"
passwordDescr = "Invalid characters in password."
get_validated_string_input_method.side_effect = ['', 'aaa', 'aaa']
password = read_password(passwordDefault, passwordPattern,
passwordPrompt, passwordDescr)
self.assertTrue(3, get_validated_string_input_method.call_count)
self.assertEquals('aaa', password)
get_validated_string_input_method.reset_mock()
get_validated_string_input_method.side_effect = ['aaa', 'aaa']
password = read_password(passwordDefault, passwordPattern,
passwordPrompt, passwordDescr)
self.assertTrue(2, get_validated_string_input_method.call_count)
self.assertEquals('aaa', password)
get_validated_string_input_method.reset_mock()
get_validated_string_input_method.side_effect = ['aaa']
password = read_password('aaa', passwordPattern,
passwordPrompt, passwordDescr)
self.assertTrue(1, get_validated_string_input_method.call_count)
self.assertEquals('aaa', password)
sys.stdout = sys.__stdout__
pass
def test_generate_random_string(self):
random_str_len = 100
str1 = generate_random_string(random_str_len)
self.assertTrue(len(str1) == random_str_len)
str2 = generate_random_string(random_str_len)
self.assertTrue(str1 != str2)
pass
@patch("__builtin__.open")
@patch("ambari_server.serverConfiguration.search_file")
@patch("ambari_server.serverConfiguration.backup_file_in_temp")
def test_update_properties_2(self, backup_file_in_temp_mock, search_file_mock, open_mock):
conf_file = "ambari.properties"
propertyMap = {"1": "1", "2": "2"}
properties = MagicMock()
f = MagicMock(name="file")
# f.__enter__.return_value = f #mimic file behavior
search_file_mock.return_value = conf_file
open_mock.return_value = f
update_properties_2(properties, propertyMap)
properties.store_ordered.assert_called_with(f.__enter__.return_value)
backup_file_in_temp_mock.assert_called_with(conf_file)
self.assertEquals(2, properties.removeOldProp.call_count)
self.assertEquals(2, properties.process_pair.call_count)
properties = MagicMock()
backup_file_in_temp_mock.reset_mock()
open_mock.reset_mock()
update_properties_2(properties, None)
properties.store_ordered.assert_called_with(f.__enter__.return_value)
backup_file_in_temp_mock.assert_called_with(conf_file)
self.assertFalse(properties.removeOldProp.called)
self.assertFalse(properties.process_pair.called)
pass
def test_regexps(self):
res = re.search(REGEX_HOSTNAME_PORT, "")
self.assertTrue(res is None)
res = re.search(REGEX_HOSTNAME_PORT, "ddd")
self.assertTrue(res is None)
res = re.search(REGEX_HOSTNAME_PORT, "gg:ff")
self.assertTrue(res is None)
res = re.search(REGEX_HOSTNAME_PORT, "gg:55444325")
self.assertTrue(res is None)
res = re.search(REGEX_HOSTNAME_PORT, "gg:555")
self.assertTrue(res is not None)
res = re.search(REGEX_TRUE_FALSE, "")
self.assertTrue(res is not None)
res = re.search(REGEX_TRUE_FALSE, "t")
self.assertTrue(res is None)
res = re.search(REGEX_TRUE_FALSE, "trrrr")
self.assertTrue(res is None)
res = re.search(REGEX_TRUE_FALSE, "true|false")
self.assertTrue(res is None)
res = re.search(REGEX_TRUE_FALSE, "true")
self.assertTrue(res is not None)
res = re.search(REGEX_TRUE_FALSE, "false")
self.assertTrue(res is not None)
res = re.search(REGEX_ANYTHING, "")
self.assertTrue(res is not None)
res = re.search(REGEX_ANYTHING, "t")
self.assertTrue(res is not None)
res = re.search(REGEX_ANYTHING, "trrrr")
self.assertTrue(res is not None)
pass
def get_sample(self, sample):
"""
Returns sample file content as string with normalized line endings
"""
path = self.get_samples_dir(sample)
return self.get_file_string(path)
def get_file_string(self, file):
"""
Returns file content as string with normalized line endings
"""
string = open(file, 'r').read()
return self.normalize(string)
def normalize(self, string):
"""
Normalizes line ending in string according to platform-default encoding
"""
return string.replace("\n", os.linesep)
def get_samples_dir(self, sample):
"""
Returns full file path by sample name
"""
testdir = os.path.dirname(__file__)
return os.path.dirname(testdir) + os.sep + "resources" + os.sep \
+ 'TestAmbaryServer.samples/' + sample
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.dbConfiguration_linux.get_ambari_properties")
def test_is_jdbc_user_changed(self, get_ambari_properties_mock):
previous_user = "previous_user"
new_user = "new_user"
props = Properties()
props.process_pair(JDBC_USER_NAME_PROPERTY, previous_user)
get_ambari_properties_mock.return_value = props
#check if users are different
result = PGConfig._is_jdbc_user_changed(new_user)
self.assertTrue(result)
#check if users are equal
result = PGConfig._is_jdbc_user_changed(previous_user)
self.assertFalse(result)
#check if one of users is None
result = PGConfig._is_jdbc_user_changed(None)
self.assertEqual(None, result)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch("ambari_server.serverConfiguration.write_property")
@patch("ambari_server.serverConfiguration.get_ambari_properties")
@patch("ambari_server.serverConfiguration.get_ambari_version")
def test_check_database_name_property(self, get_ambari_version_mock, get_ambari_properties_mock, write_property_mock):
parser = OptionParser()
parser.add_option('--database', default=None, help="Database to use embedded|oracle|mysql|mssql|postgres", dest="dbms")
args = parser.parse_args()
# negative case
get_ambari_properties_mock.return_value = {JDBC_DATABASE_NAME_PROPERTY: ""}
try:
result = check_database_name_property()
self.fail("Should fail with exception")
except FatalException as e:
self.assertTrue('DB Name property not set in config file.' in e.reason)
# positive case
dbname = "ambari"
get_ambari_properties_mock.reset_mock()
get_ambari_properties_mock.return_value = {JDBC_DATABASE_NAME_PROPERTY: dbname}
try:
result = check_database_name_property()
except FatalException:
self.fail("Setup should be successful")
# Check upgrade. In Ambari < 1.7.1 "database" property contained db name for local db
dbname = "ambari"
database = "ambari"
persistence = "local"
get_ambari_properties_mock.reset_mock()
get_ambari_properties_mock.return_value = {JDBC_DATABASE_NAME_PROPERTY: dbname,
JDBC_DATABASE_PROPERTY: database,
PERSISTENCE_TYPE_PROPERTY: persistence}
try:
result = check_database_name_property(upgrade=True)
except FatalException:
self.fail("Setup should be successful")
self.assertTrue(write_property_mock.called)
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_commons.firewall.run_os_command")
@patch("ambari_server.dbConfiguration_linux.PGConfig._is_jdbc_user_changed")
@patch("ambari_server.serverSetup.verify_setup_allowed")
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.serverSetup.configure_os_settings")
@patch("ambari_server.serverSetup.download_and_install_jdk")
@patch.object(PGConfig, "_configure_postgres")
@patch.object(PGConfig, "_check_postgre_up")
@patch("ambari_server.serverSetup.check_ambari_user")
@patch("ambari_server.serverSetup.check_jdbc_drivers")
@patch("ambari_server.serverSetup.check_selinux")
@patch("ambari_server.serverSetup.is_root")
@patch.object(PGConfig, "_setup_db")
@patch("ambari_server.serverSetup.get_is_secure")
@patch("ambari_server.dbConfiguration_linux.store_password_file")
@patch("ambari_server.serverSetup.extract_views")
@patch("ambari_server.serverSetup.adjust_directory_permissions")
@patch("sys.exit")
@patch("__builtin__.raw_input")
@patch("ambari_server.serverSetup.expand_jce_zip_file")
def test_ambariServerSetupWithCustomDbName(self, expand_jce_zip_file_mock, raw_input, exit_mock, adjust_dirs_mock,
extract_views_mock, store_password_file_mock,
get_is_secure_mock, setup_db_mock, is_root_mock, #is_local_database_mock,
check_selinux_mock, check_jdbc_drivers_mock, check_ambari_user_mock,
check_postgre_up_mock, configure_postgres_mock,
download_jdk_mock, configure_os_settings_mock, get_YN_input,
verify_setup_allowed_method, is_jdbc_user_changed_mock,
run_os_command_mock):
args = MagicMock()
raw_input.return_value = ""
get_YN_input.return_value = False
verify_setup_allowed_method.return_value = 0
is_root_mock.return_value = True
check_selinux_mock.return_value = 0
check_ambari_user_mock.return_value = (0, False, 'user', None)
check_jdbc_drivers_mock.return_value = 0
check_postgre_up_mock.return_value = "running", 0, "", ""
#is_local_database_mock.return_value = True
configure_postgres_mock.return_value = 0, "", ""
download_jdk_mock.return_value = 0
configure_os_settings_mock.return_value = 0
is_jdbc_user_changed_mock.return_value = False
setup_db_mock.return_value = (0, None, None)
get_is_secure_mock.return_value = False
store_password_file_mock.return_value = "password"
extract_views_mock.return_value = 0
run_os_command_mock.return_value = 3,"",""
new_db = "newDBName"
args.dbms = "postgres"
args.database_name = new_db
args.postgres_schema = new_db
args.database_username = "user"
args.database_password = "password"
args.jdbc_driver= None
args.jdbc_db = None
args.must_set_database_options = True
del args.database_index
del args.persistence_type
tempdir = tempfile.gettempdir()
prop_file = os.path.join(tempdir, "ambari.properties")
with open(prop_file, "w") as f:
f.write("server.jdbc.database_name=oldDBName")
f.close()
os.environ[AMBARI_CONF_VAR] = tempdir
try:
result = setup(args)
except FatalException as ex:
self.fail("Setup should be successful")
properties = get_ambari_properties()
self.assertTrue(JDBC_DATABASE_NAME_PROPERTY in properties.keys())
value = properties[JDBC_DATABASE_NAME_PROPERTY]
self.assertEqual(value, new_db)
del os.environ[AMBARI_CONF_VAR]
os.remove(prop_file)
pass
@only_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.serverSetup.service_setup")
@patch("ambari_server.dbConfiguration_windows.MSSQLConfig._execute_db_script")
@patch("ambari_server.dbConfiguration_windows.store_password_file")
@patch("ambari_server.dbConfiguration_windows.MSSQLConfig._is_jdbc_driver_installed")
@patch("ambari_server.serverSetup.verify_setup_allowed")
@patch("ambari_server.serverSetup.get_YN_input")
@patch("ambari_server.serverSetup.configure_os_settings")
@patch("ambari_server.serverSetup.download_and_install_jdk")
@patch("ambari_server.serverSetup.check_firewall")
@patch("ambari_server.serverSetup.check_ambari_user")
@patch("ambari_server.serverSetup.check_jdbc_drivers")
@patch("ambari_server.serverSetup.is_root")
@patch("ambari_server.serverSetup.extract_views")
@patch("ambari_server.serverSetup.adjust_directory_permissions")
def test_ambariServerSetupWithCustomDbName(self,
adjust_dirs_mock,
extract_views_mock,
is_root_mock,
check_jdbc_drivers_mock,
check_ambari_user_mock,
check_firewall_mock,
download_jdk_mock,
configure_os_settings_mock,
get_YN_input,
verify_setup_allowed_method,
is_jdbc_driver_installed_mock,
store_password_file_mock,
execute_db_script_mock,
service_setup_mock):
args = MagicMock()
get_YN_input.return_value = False
verify_setup_allowed_method.return_value = 0
is_root_mock.return_value = True
check_ambari_user_mock.return_value = (0, False, 'user', None)
check_jdbc_drivers_mock.return_value = 0
download_jdk_mock.return_value = 0
configure_os_settings_mock.return_value = 0
is_jdbc_driver_installed_mock.return_value = True
store_password_file_mock.return_value = "password.dat"
extract_views_mock.return_value = 0
new_db = "newDBName"
del args.dbms
del args.database_index
del args.database_host
del args.database_port
args.database_name = new_db
args.database_username = "user"
args.database_password = "password"
del args.database_windows_auth
args.jdbc_driver= None
args.jdbc_db = None
args.must_set_database_options = True
del args.default_database_host
del args.persistence_type
del args.init_db_script_file
del args.cleanup_db_script_file
tempdir = tempfile.gettempdir()
prop_file = os.path.join(tempdir, "ambari.properties")
with open(prop_file, "w") as f:
f.write("server.jdbc.database_name=oldDBName")
f.close()
os.environ[AMBARI_CONF_VAR] = tempdir
try:
result = setup(args)
except FatalException as ex:
self.fail("Setup should be successful")
properties = get_ambari_properties()
self.assertTrue(JDBC_DATABASE_NAME_PROPERTY in properties.keys())
value = properties[JDBC_DATABASE_NAME_PROPERTY]
self.assertEqual(value, new_db)
self.assertEqual(store_password_file_mock.call_count, 2)
self.assertEqual(execute_db_script_mock.call_count, 2)
del os.environ[AMBARI_CONF_VAR]
os.remove(prop_file)
pass
def test_is_valid_filepath(self):
temp_dir = tempfile.gettempdir()
temp_file = tempfile.NamedTemporaryFile(mode='r')
# Correct path to an existing file
self.assertTrue(temp_file)
# Correct path to an existing directory
self.assertFalse(is_valid_filepath(temp_dir), \
'is_valid_filepath(path) should return False is path is a directory')
# Incorrect path
self.assertFalse(is_valid_filepath(''))
pass
@patch("os.listdir")
@patch("os.path.exists")
@patch("ambari_server.serverUpgrade.load_stack_values")
@patch("ambari_server.serverUpgrade.get_ambari_properties")
@patch("ambari_server.serverUpgrade.run_metainfo_upgrade")
def test_upgrade_local_repo(self,
run_metainfo_upgrade_mock,
get_ambari_properties_mock,
load_stack_values_mock,
os_path_exists_mock,
os_listdir_mock):
from mock.mock import call
args = MagicMock()
args.persistence_type = "local"
def load_values_side_effect(*args, **kwargs):
res = {}
res['a'] = 'http://oldurl'
if -1 != args[1].find("HDPLocal"):
res['a'] = 'http://newurl'
return res
load_stack_values_mock.side_effect = load_values_side_effect
properties = Properties()
get_ambari_properties_mock.return_value = properties
os_path_exists_mock.return_value = 1
os_listdir_mock.return_value = ['1.1']
upgrade_local_repo(args)
self.assertTrue(get_ambari_properties_mock.called)
self.assertTrue(load_stack_values_mock.called)
self.assertTrue(run_metainfo_upgrade_mock.called)
run_metainfo_upgrade_mock.assert_called_with({'a': 'http://newurl'})
pass
@patch("os.listdir")
@patch("os.path.exists")
@patch("ambari_server.serverUpgrade.load_stack_values")
@patch("ambari_server.serverUpgrade.get_ambari_properties")
@patch("ambari_server.serverUpgrade.run_metainfo_upgrade")
def test_upgrade_local_repo_nochange(self,
run_metainfo_upgrade_mock,
get_ambari_properties_mock,
load_stack_values_mock,
os_path_exists_mock,
os_listdir_mock):
from mock.mock import call
args = MagicMock()
args.persistence_type = "local"
def load_values_side_effect(*args, **kwargs):
res = {}
res['a'] = 'http://oldurl'
return res
load_stack_values_mock.side_effect = load_values_side_effect
properties = Properties()
get_ambari_properties_mock.return_value = properties
os_path_exists_mock.return_value = 1
os_listdir_mock.return_value = ['1.1']
upgrade_local_repo(args)
self.assertTrue(get_ambari_properties_mock.called)
self.assertTrue(load_stack_values_mock.called)
self.assertTrue(run_metainfo_upgrade_mock.called)
run_metainfo_upgrade_mock.assert_called_with({})
pass
@patch("os.path.exists")
@patch.object(ResourceFilesKeeper, "perform_housekeeping")
def test_refresh_stack_hash(self,
perform_housekeeping_mock, path_exists_mock):
path_exists_mock.return_value = True
properties = Properties()
refresh_stack_hash(properties)
self.assertTrue(perform_housekeeping_mock.called)
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.dbConfiguration_linux.run_os_command")
@patch("ambari_server.dbConfiguration_linux.print_error_msg")
def test_change_objects_owner_both(self,
print_error_msg_mock,
run_os_command_mock):
args = MagicMock()
args.master_key = None
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
stdout = " stdout "
stderr = " stderr "
run_os_command_mock.return_value = 1, stdout, stderr
set_verbose(True)
self.assertRaises(FatalException, change_objects_owner, args)
print_error_msg_mock.assert_any_call("stderr:\nstderr")
print_error_msg_mock.assert_any_call("stdout:\nstdout")
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.dbConfiguration_linux.run_os_command")
@patch("ambari_server.dbConfiguration_linux.print_error_msg")
def test_change_objects_owner_only_stdout(self,
print_error_msg_mock,
run_os_command_mock):
args = MagicMock()
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
stdout = " stdout "
stderr = ""
run_os_command_mock.return_value = 1, stdout, stderr
set_verbose(True)
self.assertRaises(FatalException, change_objects_owner, args)
print_error_msg_mock.assert_called_once_with("stdout:\nstdout")
pass
@not_for_platform(PLATFORM_WINDOWS)
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
@patch("ambari_server.dbConfiguration_linux.run_os_command")
@patch("ambari_server.dbConfiguration_linux.print_error_msg")
def test_change_objects_owner_only_stderr(self,
print_error_msg_mock,
run_os_command_mock):
args = MagicMock()
del args.database_index
del args.dbms
del args.database_host
del args.database_port
del args.database_name
del args.database_username
del args.database_password
del args.persistence_type
stdout = ""
stderr = " stderr "
run_os_command_mock.return_value = 1, stdout, stderr
set_verbose(True)
self.assertRaises(FatalException, change_objects_owner, args)
print_error_msg_mock.assert_called_once_with("stderr:\nstderr")
pass
| 38.357632
| 256
| 0.716026
|
e711398ea4e871a318346faf850e72c2d78ad99d
| 632
|
py
|
Python
|
src/z3c/objpath/tests.py
|
zopefoundation/z3c.objpath
|
bc4947b105a6851d06d09a205f8fffcb8088507f
|
[
"ZPL-2.1"
] | 1
|
2021-03-05T17:27:29.000Z
|
2021-03-05T17:27:29.000Z
|
src/z3c/objpath/tests.py
|
zopefoundation/z3c.objpath
|
bc4947b105a6851d06d09a205f8fffcb8088507f
|
[
"ZPL-2.1"
] | 5
|
2018-03-12T17:28:42.000Z
|
2021-09-21T06:16:30.000Z
|
src/z3c/objpath/tests.py
|
zopefoundation/z3c.objpath
|
bc4947b105a6851d06d09a205f8fffcb8088507f
|
[
"ZPL-2.1"
] | null | null | null |
from zope.interface.verify import verifyObject
import doctest
import unittest
class ObjectPathTests(unittest.TestCase):
"""Testing .path.*"""
def test_module_provides_interface(self):
from .interfaces import IObjectPath
from . import _path
self.assertTrue(verifyObject(IObjectPath, _path))
def test_suite():
optionflags = (
doctest.ELLIPSIS
| doctest.REPORT_NDIFF
| doctest.NORMALIZE_WHITESPACE
)
return unittest.TestSuite([
unittest.makeSuite(ObjectPathTests),
doctest.DocFileSuite(
'README.rst', optionflags=optionflags)
])
| 23.407407
| 57
| 0.68038
|
c4f12b4bf90aa328a2e7eaf42e80c231ec54ab36
| 3,505
|
py
|
Python
|
python/tink/integration/gcpkms/_gcp_kms_aead_test.py
|
szaydel/tink
|
e231fe638eb1f1ed8f1d45cc665ee98736c781d6
|
[
"Apache-2.0"
] | null | null | null |
python/tink/integration/gcpkms/_gcp_kms_aead_test.py
|
szaydel/tink
|
e231fe638eb1f1ed8f1d45cc665ee98736c781d6
|
[
"Apache-2.0"
] | null | null | null |
python/tink/integration/gcpkms/_gcp_kms_aead_test.py
|
szaydel/tink
|
e231fe638eb1f1ed8f1d45cc665ee98736c781d6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink.integration.gcp_kms_aead."""
import os
from absl.testing import absltest
from tink import core
from tink.integration import gcpkms
from tink.testing import helper
CREDENTIAL_PATH = os.path.join(helper.tink_root_path(),
'testdata/credential.json')
KEY_URI = 'gcp-kms://projects/tink-test-infrastructure/locations/global/keyRings/unit-and-integration-testing/cryptoKeys/aead-key'
LOCAL_KEY_URI = 'gcp-kms://projects/tink-test-infrastructure/locations/europe-west1/keyRings/unit-and-integration-test/cryptoKeys/aead-key'
BAD_KEY_URI = 'aws-kms://arn:aws:kms:us-east-2:235739564943:key/3ee50705-5a82-4f5b-9753-05c4f473922f'
if 'TEST_SRCDIR' in os.environ:
# Set root certificates for gRPC in Bazel Test which are needed on MacOS
os.environ['GRPC_DEFAULT_SSL_ROOTS_FILE_PATH'] = os.path.join(
os.environ['TEST_SRCDIR'], 'google_root_pem/file/downloaded')
class GcpKmsAeadTest(absltest.TestCase):
def test_encrypt_decrypt(self):
gcp_client = gcpkms.GcpKmsClient(KEY_URI, CREDENTIAL_PATH)
aead = gcp_client.get_aead(KEY_URI)
plaintext = b'helloworld'
ciphertext = aead.encrypt(plaintext, b'')
self.assertEqual(plaintext, aead.decrypt(ciphertext, b''))
plaintext = b'hello'
associated_data = b'world'
ciphertext = aead.encrypt(plaintext, associated_data)
self.assertEqual(plaintext, aead.decrypt(ciphertext, associated_data))
def test_encrypt_decrypt_localized_uri(self):
gcp_client = gcpkms.GcpKmsClient(LOCAL_KEY_URI, CREDENTIAL_PATH)
aead = gcp_client.get_aead(LOCAL_KEY_URI)
plaintext = b'helloworld'
ciphertext = aead.encrypt(plaintext, b'')
self.assertEqual(plaintext, aead.decrypt(ciphertext, b''))
plaintext = b'hello'
associated_data = b'world'
ciphertext = aead.encrypt(plaintext, associated_data)
self.assertEqual(plaintext, aead.decrypt(ciphertext, associated_data))
def test_encrypt_with_bad_uri(self):
with self.assertRaises(core.TinkError):
gcp_client = gcpkms.GcpKmsClient(KEY_URI, CREDENTIAL_PATH)
gcp_client.get_aead(BAD_KEY_URI)
def test_corrupted_ciphertext(self):
gcp_client = gcpkms.GcpKmsClient(KEY_URI, CREDENTIAL_PATH)
aead = gcp_client.get_aead(KEY_URI)
plaintext = b'helloworld'
ciphertext = aead.encrypt(plaintext, b'')
self.assertEqual(plaintext, aead.decrypt(ciphertext, b''))
# Corrupt each byte once and check that decryption fails
# NOTE: Only starting at 4th byte here, as the 3rd byte is malleable
# (see b/146633745).
for byte_idx in range(3, len(ciphertext)):
tmp_ciphertext = list(ciphertext)
tmp_ciphertext[byte_idx] ^= 1
corrupted_ciphertext = bytes(tmp_ciphertext)
with self.assertRaises(core.TinkError):
aead.decrypt(corrupted_ciphertext, b'')
if __name__ == '__main__':
# TODO(b/154273145): re-enable this.
pass
# absltest.main()
| 38.097826
| 139
| 0.74408
|
8fb74104745e4e194de6e52378c617c2b9ca62ea
| 5,050
|
py
|
Python
|
m2cgen/assemblers/svm.py
|
bcampbell-prosper/m2cgen
|
cc049fe5cd7060c2f0cd5a0331e3aa85fac2a336
|
[
"MIT"
] | 3
|
2021-06-29T02:43:40.000Z
|
2022-03-28T07:41:59.000Z
|
m2cgen/assemblers/svm.py
|
bcampbell-prosper/m2cgen
|
cc049fe5cd7060c2f0cd5a0331e3aa85fac2a336
|
[
"MIT"
] | null | null | null |
m2cgen/assemblers/svm.py
|
bcampbell-prosper/m2cgen
|
cc049fe5cd7060c2f0cd5a0331e3aa85fac2a336
|
[
"MIT"
] | 3
|
2021-08-06T07:51:37.000Z
|
2022-03-28T07:41:42.000Z
|
from m2cgen import ast
from m2cgen.assemblers import utils
from m2cgen.assemblers.base import ModelAssembler
class SVMModelAssembler(ModelAssembler):
def __init__(self, model):
super().__init__(model)
supported_kernels = {
"rbf": self._rbf_kernel,
"sigmoid": self._sigmoid_kernel,
"poly": self._poly_kernel,
"linear": self._linear_kernel
}
kernel_type = model.kernel
if kernel_type not in supported_kernels:
raise ValueError("Unsupported kernel type {}".format(kernel_type))
self._kernel_fun = supported_kernels[kernel_type]
n_features = len(model.support_vectors_[0])
gamma = model.gamma
if gamma == "auto" or gamma == "auto_deprecated":
gamma = 1.0 / n_features
self._gamma_expr = ast.NumVal(gamma)
self._neg_gamma_expr = utils.sub(ast.NumVal(0), ast.NumVal(gamma),
to_reuse=True)
self._output_size = 1
if type(model).__name__ in ("SVC", "NuSVC"):
n_classes = len(model.n_support_)
if n_classes > 2:
self._output_size = n_classes
def assemble(self):
if self._output_size > 1:
return self._assemble_multi_class_output()
else:
return self._assemble_single_output()
def _assemble_single_output(self):
support_vectors = self.model.support_vectors_
coef = self.model.dual_coef_[0]
intercept = self.model.intercept_[0]
kernel_exprs = self._apply_kernel(support_vectors)
kernel_weight_mul_ops = []
for index, value in enumerate(coef):
kernel_weight_mul_ops.append(
utils.mul(kernel_exprs[index], ast.NumVal(value)))
return utils.apply_op_to_expressions(
ast.BinNumOpType.ADD,
ast.NumVal(intercept),
*kernel_weight_mul_ops)
def _assemble_multi_class_output(self):
support_vectors = self.model.support_vectors_
coef = self.model.dual_coef_
intercept = self.model.intercept_
n_support = self.model.n_support_
n_support_len = len(n_support)
kernel_exprs = self._apply_kernel(support_vectors, to_reuse=True)
support_ranges = []
for i in range(n_support_len):
range_start = sum(n_support[:i])
range_end = range_start + n_support[i]
support_ranges.append((range_start, range_end))
# One-vs-one decisions.
decisions = []
for i in range(n_support_len):
for j in range(i + 1, n_support_len):
kernel_weight_mul_ops = [
utils.mul(kernel_exprs[k], ast.NumVal(coef[i][k]))
for k in range(*support_ranges[j])
]
kernel_weight_mul_ops.extend([
utils.mul(kernel_exprs[k], ast.NumVal(coef[j - 1][k]))
for k in range(*support_ranges[i])
])
decision = utils.apply_op_to_expressions(
ast.BinNumOpType.ADD,
ast.NumVal(intercept[len(decisions)]),
*kernel_weight_mul_ops
)
decisions.append(decision)
# TODO convert One-vs-one decisions to One-vs-rest
return ast.VectorVal(decisions)
def _apply_kernel(self, support_vectors, to_reuse=False):
kernel_exprs = []
for v in support_vectors:
kernel = self._kernel_fun(v)
kernel_exprs.append(ast.SubroutineExpr(kernel, to_reuse=to_reuse))
return kernel_exprs
def _rbf_kernel(self, support_vector):
elem_wise = [
ast.PowExpr(
utils.sub(ast.NumVal(support_element), ast.FeatureRef(i)),
ast.NumVal(2)
)
for i, support_element in enumerate(support_vector)
]
kernel = utils.apply_op_to_expressions(ast.BinNumOpType.ADD,
*elem_wise)
kernel = utils.mul(self._neg_gamma_expr, kernel)
return ast.ExpExpr(kernel)
def _sigmoid_kernel(self, support_vector):
kernel = self._linear_kernel_with_gama_and_coef(support_vector)
return ast.TanhExpr(kernel)
def _poly_kernel(self, support_vector):
kernel = self._linear_kernel_with_gama_and_coef(support_vector)
return ast.PowExpr(kernel, ast.NumVal(self.model.degree))
def _linear_kernel(self, support_vector):
elem_wise = [
utils.mul(ast.NumVal(support_element), ast.FeatureRef(i))
for i, support_element in enumerate(support_vector)
]
return utils.apply_op_to_expressions(ast.BinNumOpType.ADD, *elem_wise)
def _linear_kernel_with_gama_and_coef(self, support_vector):
kernel = self._linear_kernel(support_vector)
kernel = utils.mul(self._gamma_expr, kernel)
return utils.add(kernel, ast.NumVal(self.model.coef0))
| 36.594203
| 78
| 0.611485
|
9c86779fcc28c2dee798286233823a667a8aa9fd
| 1,828
|
py
|
Python
|
Lib/site-packages/sphinx/environment/collectors/indexentries.py
|
nemarugommula/ecommerce
|
60185e79655fbaf0fcad9e877a886fe9eb3c4451
|
[
"bzip2-1.0.6"
] | 3
|
2020-01-04T16:46:59.000Z
|
2020-10-09T03:04:31.000Z
|
Lib/site-packages/sphinx/environment/collectors/indexentries.py
|
nemarugommula/ecommerce
|
60185e79655fbaf0fcad9e877a886fe9eb3c4451
|
[
"bzip2-1.0.6"
] | 10
|
2021-06-16T20:48:32.000Z
|
2021-10-04T18:22:02.000Z
|
Lib/site-packages/sphinx/environment/collectors/indexentries.py
|
nemarugommula/ecommerce
|
60185e79655fbaf0fcad9e877a886fe9eb3c4451
|
[
"bzip2-1.0.6"
] | 2
|
2019-11-02T08:03:09.000Z
|
2020-06-29T14:52:15.000Z
|
"""
sphinx.environment.collectors.indexentries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Index entries collector for sphinx.environment.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from typing import Any, Dict, Set
from docutils import nodes
from sphinx import addnodes
from sphinx.application import Sphinx
from sphinx.environment import BuildEnvironment
from sphinx.environment.collectors import EnvironmentCollector
from sphinx.util import split_index_msg, logging
logger = logging.getLogger(__name__)
class IndexEntriesCollector(EnvironmentCollector):
name = 'indices'
def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None:
env.indexentries.pop(docname, None)
def merge_other(self, app: Sphinx, env: BuildEnvironment,
docnames: Set[str], other: BuildEnvironment) -> None:
for docname in docnames:
env.indexentries[docname] = other.indexentries[docname]
def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:
docname = app.env.docname
entries = app.env.indexentries[docname] = []
for node in doctree.traverse(addnodes.index):
try:
for entry in node['entries']:
split_index_msg(entry[0], entry[1])
except ValueError as exc:
logger.warning(str(exc), location=node)
node.parent.remove(node)
else:
for entry in node['entries']:
entries.append(entry)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_env_collector(IndexEntriesCollector)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 31.517241
| 82
| 0.641685
|
ab705f28ba8114d712a9c43f6a0f9792ddd17d5e
| 5,779
|
py
|
Python
|
neural_net.py
|
gioypi/penthy
|
938566af92b519cb57a7f0f1f22885f4b6a92292
|
[
"MIT"
] | null | null | null |
neural_net.py
|
gioypi/penthy
|
938566af92b519cb57a7f0f1f22885f4b6a92292
|
[
"MIT"
] | null | null | null |
neural_net.py
|
gioypi/penthy
|
938566af92b519cb57a7f0f1f22885f4b6a92292
|
[
"MIT"
] | null | null | null |
"""Create a Convolutional Neural Network with Keras and train it to tell truly lossless and transcoded audio apart.
Build a Keras CNN to evaluate audio compression based on spectograms. Use spectogram images extracted with
the audio_manipulation module as a dataset. Save the trained Network, but not the dataset.
A network output of '1' corresponds to a spectogram derived from a truly lossless source.
An output of '0' corresponds to a spectogram derived from audio transcoded to mp3 and back to a lossless format.
Only possible mp3 transcoding is inspected. Even if a file is verified as truly lossless in terms of mp3 transcoding,
it could still be upsampled, transcoded from a different format or altered in other ways.
"""
import audio_manipulation as am
import time
import wakepy
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Conv2D, MaxPooling2D, Dropout
def main():
# Number of samples from the input dataset used in each update within an epoch.
BATCH_SIZE = 16
# Number of epochs to train the network.
EPOCH_NUM = 50
# Proportion of the input dataset used for validation, instead of training.
# Float between 0 and 1.
PROP_VALID = 0.06
# Path to the file or directory where the model will be saved after training.
MODEL_PATH = "saved_models/trained_penthy"
# Check that tensorflow recognizes the GPU.
print("Tensorflow GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
wakepy.set_keepawake(keep_screen_awake=False) # Prevent OS from sleeping during long training sessions.
print("Extracting spectograms...")
start_time = time.monotonic()
spect_true = np.array(am.extract_spectogram_from_dir("dataset_files/true_flac_44-16"))
in_ds = tf.data.Dataset.from_tensor_slices((spect_true, np.array([1] * spect_true.shape[0])))
# [spect stands for spectogram, trans and transc stand for transcoded]
# For the transcoded samples, use various bitrates and compression qualities.
trans_list = am.extract_spectogram_from_dir("dataset_files/flac_44-16_transcoded_from_mp3_320")
trans_list.extend(am.extract_spectogram_from_dir("dataset_files/flac_44-16_transcoded_from_mp3_128"))
trans_list.extend(am.extract_spectogram_from_dir("dataset_files/flac_44-16_transc_from_mp3_320_prime"))
spect_trans = np.array(trans_list)
temp_ds = tf.data.Dataset.from_tensor_slices((spect_trans, np.array([0] * spect_trans.shape[0])))
in_ds = in_ds.concatenate(temp_ds)
end_time = time.monotonic()
# Real time passed, not time in CPU/GPU.
print("Spectograms ready. Creation duration: %.4f minutes" % ((end_time - start_time) / 60))
# print("Input dataset:", in_ds)
# print(list(in_ds.as_numpy_iterator()))
print("Preparing dataset...")
start_time = time.monotonic()
num_elements = spect_true.shape[0] + spect_trans.shape[0]
in_ds = in_ds.shuffle(num_elements, reshuffle_each_iteration=False)
num_valid = int(num_elements * PROP_VALID)
num_train = num_elements - num_valid
valid_ds = in_ds.take(num_valid)
train_ds = in_ds.skip(num_valid)
valid_ds = valid_ds.batch(BATCH_SIZE)
train_ds = train_ds.batch(BATCH_SIZE)
train_ds = train_ds.shuffle(num_train, reshuffle_each_iteration=True) # Reshuffle after each epoch.
end_time = time.monotonic()
print("Dataset ready. Preparation duration: %.4f minutes" % ((end_time - start_time) / 60))
print("Dataset size:", num_elements, "samples")
print("of which", num_train, "used for training and", num_valid, "used for validation.")
print("Creating neural network...")
net = Sequential()
net.add(Conv2D(10, (3, 3), padding="valid", data_format="channels_last", activation="relu", use_bias=True,
input_shape=(am.HEIGHT, am.WIDTH, 3), kernel_initializer="random_normal"))
net.add(Conv2D(10, (3, 3), padding="valid", data_format="channels_last", activation="relu", use_bias=True,
kernel_initializer="random_normal"))
net.add(MaxPooling2D((2, 2), padding="valid", data_format="channels_last"))
net.add(Conv2D(8, (3, 3), padding="valid", data_format="channels_last", activation="relu", use_bias=True,
kernel_initializer="random_normal"))
net.add(Conv2D(8, (3, 3), padding="valid", data_format="channels_last", activation="relu", use_bias=True,
kernel_initializer="random_normal"))
net.add(MaxPooling2D((2, 2), padding="valid", data_format="channels_last"))
net.add(Flatten(data_format="channels_last"))
net.add(Dropout(0.2))
net.add(Dense(32, activation="relu", use_bias=True, kernel_initializer="random_normal"))
net.add(Dropout(0.2))
net.add(Dense(16, activation="relu", use_bias=True, kernel_initializer="random_normal"))
net.add(Dense(1, activation="sigmoid", use_bias=True, kernel_initializer="random_normal"))
print("Neural network created.")
net.summary()
print("Training neural network...")
start_time = time.monotonic()
net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=0.005, rho=0.95),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=False), metrics=["accuracy"])
net.fit(train_ds, epochs=EPOCH_NUM, verbose=1, validation_data=valid_ds)
end_time = time.monotonic()
print("Training complete. Training duration: %.4f minutes" % ((end_time - start_time) / 60))
print("Saving neural network to file...")
net.save(MODEL_PATH, save_format=tf)
print("Network saved.")
wakepy.unset_keepawake()
if __name__ == "__main__":
main()
| 52.063063
| 118
| 0.71483
|
72c71119cd90849738f63158e442f5b421f4185f
| 3,601
|
py
|
Python
|
imitator/pose_imitation/data_process/convert_clip.py
|
Garfield-kh/PoseTriplet
|
eb93132f99161bd776dafbcb713e9fb43e501c36
|
[
"MIT"
] | 9
|
2022-03-30T04:40:11.000Z
|
2022-03-31T16:03:27.000Z
|
imitator/pose_imitation/data_process/convert_clip.py
|
Garfield-kh/PoseTriplet
|
eb93132f99161bd776dafbcb713e9fb43e501c36
|
[
"MIT"
] | null | null | null |
imitator/pose_imitation/data_process/convert_clip.py
|
Garfield-kh/PoseTriplet
|
eb93132f99161bd776dafbcb713e9fb43e501c36
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.append(os.getcwd())
from utils import *
from utils.transformation import quaternion_from_euler
from mujoco_py import load_model_from_path
from mocap.skeleton import Skeleton
from mocap.pose import load_bvh_file, interpolated_traj
import pickle
import glob
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument('--render', action='store_true', default=True)
parser.add_argument('--model-id', type=str, default='humanoid_1205_orig')
parser.add_argument('--mocap-id', type=str, default='0626') # default: 0213
parser.add_argument('--range', type=int, default=None) # default: (5, 20)
parser.add_argument('--skt-id', type=str, default='take_000')
parser.add_argument('--mocap-fr', type=int, default=30) # default 120
parser.add_argument('--dt', type=float, default=1/30)
parser.add_argument('--offset-z', type=float, default=0.0)
args = parser.parse_args()
model_file = 'assets/mujoco_models/%s.xml' % args.model_id
model = load_model_from_path(model_file)
body_qposaddr = get_body_qposaddr(model)
skt_bvh = os.path.expanduser('datasets/traj_debug/%s_%s.bvh' % (args.mocap_id, args.skt_id))
exclude_bones = {'Thumb', 'Index', 'Middle', 'Ring', 'Pinky', 'End', 'Toe'}
spec_channels = {'LeftForeArm': ['Zrotation'], 'RightForeArm': ['Zrotation'],
'LeftLeg': ['Xrotation'], 'RightLeg': ['Xrotation']}
skeleton = Skeleton()
skeleton.load_from_bvh(skt_bvh, exclude_bones, spec_channels)
def get_qpos(pose, bone_addr):
"""
:param pose: - ind1 = [a, b] ind2 = [a', b']
:param bone_addr: bvh address
:return:
"""
qpos = np.zeros(model.nq)
for bone_name, ind2 in body_qposaddr.items():
ind1 = bone_addr[bone_name]
if ind1[0] == 0:
trans = pose[ind1[0]:ind1[0] + 3].copy()
angles = pose[ind1[0] + 3:ind1[1]].copy()
quat = quaternion_from_euler(angles[0], angles[1], angles[2], 'rxyz')
qpos[ind2[0]:ind2[0] + 3] = trans
qpos[ind2[0] + 3:ind2[1]] = quat
else:
qpos[ind2[0]:ind2[1]] = pose[ind1[0]:ind1[1]]
return qpos
def get_poses(bvh_file):
time0_get_poses = time.time() # time start load.
poses, bone_addr = load_bvh_file(bvh_file, skeleton)
poses_samp = interpolated_traj(poses, args.dt, mocap_fr=args.mocap_fr)
qpos_traj = []
for i in range(poses_samp.shape[0]):
cur_pose = poses_samp[i, :]
cur_qpos = get_qpos(cur_pose, bone_addr)
qpos_traj.append(cur_qpos)
qpos_traj = np.vstack(qpos_traj)
qpos_traj[:, 2] += args.offset_z
time_cost_get_poses = time.time() - time0_get_poses # time spend.
print('-----> get_poses spends {:.2f}s on {} with {:0>6d} frames'.format(time_cost_get_poses,
bvh_file, poses.shape[0]))
return qpos_traj
bvh_files = glob.glob(os.path.expanduser('datasets/traj_debug/%s_*.bvh' % args.mocap_id))
bvh_files.sort()
if args.range is not None:
bvh_files = bvh_files[args.range[0]: args.range[1]]
print('bvh_files:', bvh_files)
tmp_dict = {}
for file in bvh_files:
print('extracting trajectory from %s' % file)
qpos_traj = get_poses(file)
name = os.path.splitext(os.path.basename(file))[0]
tmp_dict[name] = {}
tmp_dict[name]['predicted_3d_qpos'] = qpos_traj
# with open('./datasets/traj_new/traj_dict_ego.pkl', 'wb') as f:
# pickle.dump(tmp_dict, f, pickle.HIGHEST_PROTOCOL)
with open('./datasets/traj_debug/traj_dict_626.pkl', 'wb') as f:
pickle.dump(tmp_dict, f, pickle.HIGHEST_PROTOCOL)
| 36.01
| 103
| 0.666482
|
9c96cf9407e71d09abd07104cf5bd27e2599e27a
| 1,789
|
py
|
Python
|
src/PROBE/start_search.py
|
lelis-research/PyGames-synthesis
|
0c597243c04ce511d8ae5cfd52b1c043267b4503
|
[
"MIT"
] | 1
|
2021-06-03T15:54:16.000Z
|
2021-06-03T15:54:16.000Z
|
src/PROBE/start_search.py
|
olivier-vadiaval/catcher-synthesis
|
0c597243c04ce511d8ae5cfd52b1c043267b4503
|
[
"MIT"
] | 3
|
2021-07-26T19:58:31.000Z
|
2021-07-27T17:35:51.000Z
|
src/PROBE/start_search.py
|
olivier-vadiaval/catcher-synthesis
|
0c597243c04ce511d8ae5cfd52b1c043267b4503
|
[
"MIT"
] | null | null | null |
"""
start_search.py
Author: Olivier Vadiavaloo
Description:
This module provides the implementation of the driver code for the probe
synthesizer. It creates the list of rules, the pcfg, the logger object
and the probe object.
"""
from math import log10, floor
from src.dsl import *
from src.PROBE.rule import *
from src.PROBE.probe import *
from src.Utils.logger import Logger
from src.Evaluation.evaluation import *
def start_probe(time_limit, log_file, is_parallel, game):
rules = [
const_rule,
var_arr_rule,
var_from_arr_rule,
var_scalar_rule,
non_player_pos_rule,
non_player_dir_rule,
player_pos_rule,
for_each_rule,
ite_rule,
it_rule,
strategy_rule,
ra_rule,
plus_rule,
minus_rule,
times_rule,
divide_rule,
gt_rule,
lt_rule,
eq_rule
]
uniform_prob = 1 / len(rules)
uniform_cost = floor(-1 * log10(uniform_prob))
pcfg = {}
for rule in rules:
pcfg[rule] = {probability_key: uniform_prob, cost_key: uniform_cost}
pcfg['dsfs'] = [NonPlayerObjectPosition, NonPlayerObjectApproaching, PlayerPosition]
pcfg['constants'] = [0.5, 2]
pcfg['scalars'] = [
VarArray.new('actions'),
VarScalar.new('paddle_width'),
VarFromArray.new('actions', 0),
VarFromArray.new('actions', 1),
VarFromArray.new('actions', 2)
]
eval_factory = EvaluationFactory(0, 10, False, 'NORMAL')
eval_funct = eval_factory.get_eval_fun(game)
logger = Logger(
log_file,
'PROBE (Guided Bottom-Up Search)',
{'time': time_limit}
)
synthesizer = Probe()
synthesizer.probe(pcfg, rules, eval_funct, time_limit, logger, is_parallel)
| 25.557143
| 88
| 0.645053
|
accd1f7105b898defa161501056b7fe028407de4
| 71,262
|
py
|
Python
|
pygments/lexers/templates.py
|
zaibacu/pygments
|
8d110f8dc23f9154277625f3b3555f03ba68093d
|
[
"BSD-2-Clause"
] | 16
|
2020-09-20T22:32:54.000Z
|
2021-04-02T17:14:25.000Z
|
pygments/lexers/templates.py
|
zaibacu/pygments
|
8d110f8dc23f9154277625f3b3555f03ba68093d
|
[
"BSD-2-Clause"
] | 301
|
2020-10-03T10:46:31.000Z
|
2022-03-27T23:46:23.000Z
|
pygments/lexers/templates.py
|
zaibacu/pygments
|
8d110f8dc23f9154277625f3b3555f03ba68093d
|
[
"BSD-2-Clause"
] | 2
|
2020-09-17T08:27:12.000Z
|
2021-08-23T11:13:52.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.templates
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for various template engines' markup.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexers.html import HtmlLexer, XmlLexer
from pygments.lexers.javascript import JavascriptLexer, LassoLexer
from pygments.lexers.css import CssLexer
from pygments.lexers.php import PhpLexer
from pygments.lexers.python import PythonLexer
from pygments.lexers.perl import PerlLexer
from pygments.lexers.jvm import JavaLexer, TeaLangLexer
from pygments.lexers.data import YamlLexer
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
include, using, this, default, combined
from pygments.token import Error, Punctuation, Whitespace, \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
from pygments.util import html_doctype_matches, looks_like_xml
__all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer',
'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer',
'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer',
'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer',
'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer',
'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer',
'TwigLexer', 'TwigHtmlLexer', 'Angular2Lexer', 'Angular2HtmlLexer']
class ErbLexer(Lexer):
"""
Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
lexer.
Just highlights ruby code between the preprocessor directives, other data
is left untouched by the lexer.
All options are also forwarded to the `RubyLexer`.
"""
name = 'ERB'
aliases = ['erb']
mimetypes = ['application/x-ruby-templating']
_block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
def __init__(self, **options):
from pygments.lexers.ruby import RubyLexer
self.ruby_lexer = RubyLexer(**options)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return
def analyse_text(text):
if '<%' in text and '%>' in text:
return 0.4
class SmartyLexer(RegexLexer):
"""
Generic `Smarty <http://smarty.php.net/>`_ template lexer.
Just highlights smarty code between the preprocessor directives, other
data is left untouched by the lexer.
"""
name = 'Smarty'
aliases = ['smarty']
filenames = ['*.tpl']
mimetypes = ['application/x-smarty']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'[^{]+', Other),
(r'(\{)(\*.*?\*)(\})',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(\{php\})(.*?)(\{/php\})',
bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
Comment.Preproc)),
(r'(\{)(/?[a-zA-Z_]\w*)(\s*)',
bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
(r'\{', Comment.Preproc, 'smarty')
],
'smarty': [
(r'\s+', Text),
(r'\{', Comment.Preproc, '#push'),
(r'\}', Comment.Preproc, '#pop'),
(r'#[a-zA-Z_]\w*#', Name.Variable),
(r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable),
(r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator),
(r'(true|false|null)\b', Keyword.Constant),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_]\w*', Name.Attribute)
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'\{if\s+.*?\}.*?\{/if\}', text):
rv += 0.15
if re.search(r'\{include\s+file=.*?\}', text):
rv += 0.15
if re.search(r'\{foreach\s+.*?\}.*?\{/foreach\}', text):
rv += 0.15
if re.search(r'\{\$.*?\}', text):
rv += 0.01
return rv
class VelocityLexer(RegexLexer):
"""
Generic `Velocity <http://velocity.apache.org/>`_ template lexer.
Just highlights velocity directives and variable references, other
data is left untouched by the lexer.
"""
name = 'Velocity'
aliases = ['velocity']
filenames = ['*.vm', '*.fhtml']
flags = re.MULTILINE | re.DOTALL
identifier = r'[a-zA-Z_]\w*'
tokens = {
'root': [
(r'[^{#$]+', Other),
(r'(#)(\*.*?\*)(#)',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(##)(.*?$)',
bygroups(Comment.Preproc, Comment)),
(r'(#\{?)(' + identifier + r')(\}?)(\s?\()',
bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation),
'directiveparams'),
(r'(#\{?)(' + identifier + r')(\}|\b)',
bygroups(Comment.Preproc, Name.Function, Comment.Preproc)),
(r'\$!?\{?', Punctuation, 'variable')
],
'variable': [
(identifier, Name.Variable),
(r'\(', Punctuation, 'funcparams'),
(r'(\.)(' + identifier + r')',
bygroups(Punctuation, Name.Variable), '#push'),
(r'\}', Punctuation, '#pop'),
default('#pop')
],
'directiveparams': [
(r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
Operator),
(r'\[', Operator, 'rangeoperator'),
(r'\b' + identifier + r'\b', Name.Function),
include('funcparams')
],
'rangeoperator': [
(r'\.\.', Operator),
include('funcparams'),
(r'\]', Operator, '#pop')
],
'funcparams': [
(r'\$!?\{?', Punctuation, 'variable'),
(r'\s+', Text),
(r'[,:]', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"\b[0-9]+\b", Number),
(r'(true|false|null)\b', Keyword.Constant),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text):
rv += 0.25
if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text):
rv += 0.15
if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text):
rv += 0.15
if re.search(r'\$!?\{?[a-zA-Z_]\w*(\([^)]*\))?'
r'(\.\w+(\([^)]*\))?)*\}?', text):
rv += 0.01
return rv
class VelocityHtmlLexer(DelegatingLexer):
"""
Subclass of the `VelocityLexer` that highlights unlexed data
with the `HtmlLexer`.
"""
name = 'HTML+Velocity'
aliases = ['html+velocity']
alias_filenames = ['*.html', '*.fhtml']
mimetypes = ['text/html+velocity']
def __init__(self, **options):
super().__init__(HtmlLexer, VelocityLexer, **options)
class VelocityXmlLexer(DelegatingLexer):
"""
Subclass of the `VelocityLexer` that highlights unlexed data
with the `XmlLexer`.
"""
name = 'XML+Velocity'
aliases = ['xml+velocity']
alias_filenames = ['*.xml', '*.vm']
mimetypes = ['application/xml+velocity']
def __init__(self, **options):
super().__init__(XmlLexer, VelocityLexer, **options)
def analyse_text(text):
rv = VelocityLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class DjangoLexer(RegexLexer):
"""
Generic `django <http://www.djangoproject.com/documentation/templates/>`_
and `jinja <https://jinja.pocoo.org/jinja/>`_ template lexer.
It just highlights django/jinja code between the preprocessor directives,
other data is left untouched by the lexer.
"""
name = 'Django/Jinja'
aliases = ['django', 'jinja']
mimetypes = ['application/x-django-templating', 'application/x-jinja']
flags = re.M | re.S
tokens = {
'root': [
(r'[^{]+', Other),
(r'\{\{', Comment.Preproc, 'var'),
# jinja/django comments
(r'\{[*#].*?[*#]\}', Comment),
# django comments
(r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Comment, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# raw jinja blocks
(r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Text, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# filter blocks
(r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)',
bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
'block'),
(r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
bygroups(Comment.Preproc, Text, Keyword), 'block'),
(r'\{', Other)
],
'varnames': [
(r'(\|)(\s*)([a-zA-Z_]\w*)',
bygroups(Operator, Text, Name.Function)),
(r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
(r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
Keyword),
(r'(loop|block|super|forloop)\b', Name.Builtin),
(r'[a-zA-Z_][\w-]*', Name.Variable),
(r'\.\w+', Name.Variable),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r'([{}()\[\]+\-*/%,:~]|[><=]=?|!=)', Operator),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
],
'var': [
(r'\s+', Text),
(r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames')
],
'block': [
(r'\s+', Text),
(r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames'),
(r'.', Punctuation)
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'\{%\s*(block|extends)', text) is not None:
rv += 0.4
if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
rv += 0.1
if re.search(r'\{\{.*?\}\}', text) is not None:
rv += 0.1
return rv
class MyghtyLexer(RegexLexer):
"""
Generic `myghty templates`_ lexer. Code that isn't Myghty
markup is yielded as `Token.Other`.
.. versionadded:: 0.6
.. _myghty templates: http://www.myghty.org/
"""
name = 'Myghty'
aliases = ['myghty']
filenames = ['*.myt', 'autodelegate']
mimetypes = ['application/x-myghty']
tokens = {
'root': [
(r'\s+', Text),
(r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)',
bygroups(Name.Tag, Text, Name.Function, Name.Tag,
using(this), Name.Tag)),
(r'(?s)(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)',
bygroups(Name.Tag, Name.Function, Name.Tag,
using(PythonLexer), Name.Tag)),
(r'(<&[^|])(.*?)(,.*?)?(&>)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'(?s)(<&\|)(.*?)(,.*?)?(&>)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'</&>', Name.Tag),
(r'(?s)(<%!?)(.*?)(%>)',
bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
bygroups(Name.Tag, using(PythonLexer), Other)),
(r"""(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=[%#]) | # an eval or comment line
(?=</?[%&]) | # a substitution or block or
# call start or end
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)""", bygroups(Other, Operator)),
]
}
class MyghtyHtmlLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `HtmlLexer`.
.. versionadded:: 0.6
"""
name = 'HTML+Myghty'
aliases = ['html+myghty']
mimetypes = ['text/html+myghty']
def __init__(self, **options):
super().__init__(HtmlLexer, MyghtyLexer, **options)
class MyghtyXmlLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `XmlLexer`.
.. versionadded:: 0.6
"""
name = 'XML+Myghty'
aliases = ['xml+myghty']
mimetypes = ['application/xml+myghty']
def __init__(self, **options):
super().__init__(XmlLexer, MyghtyLexer, **options)
class MyghtyJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `JavascriptLexer`.
.. versionadded:: 0.6
"""
name = 'JavaScript+Myghty'
aliases = ['js+myghty', 'javascript+myghty']
mimetypes = ['application/x-javascript+myghty',
'text/x-javascript+myghty',
'text/javascript+mygthy']
def __init__(self, **options):
super().__init__(JavascriptLexer, MyghtyLexer, **options)
class MyghtyCssLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexed data
with the `CssLexer`.
.. versionadded:: 0.6
"""
name = 'CSS+Myghty'
aliases = ['css+myghty']
mimetypes = ['text/css+myghty']
def __init__(self, **options):
super().__init__(CssLexer, MyghtyLexer, **options)
class MasonLexer(RegexLexer):
"""
Generic `mason templates`_ lexer. Stolen from Myghty lexer. Code that isn't
Mason markup is HTML.
.. _mason templates: http://www.masonhq.com/
.. versionadded:: 1.4
"""
name = 'Mason'
aliases = ['mason']
filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler']
mimetypes = ['application/x-mason']
tokens = {
'root': [
(r'\s+', Text),
(r'(?s)(<%doc>)(.*?)(</%doc>)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)',
bygroups(Name.Tag, Text, Name.Function, Name.Tag,
using(this), Name.Tag)),
(r'(?s)(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)',
bygroups(Name.Tag, Name.Function, Name.Tag,
using(PerlLexer), Name.Tag)),
(r'(?s)(<&[^|])(.*?)(,.*?)?(&>)',
bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
(r'(?s)(<&\|)(.*?)(,.*?)?(&>)',
bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
(r'</&>', Name.Tag),
(r'(?s)(<%!?)(.*?)(%>)',
bygroups(Name.Tag, using(PerlLexer), Name.Tag)),
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
bygroups(Name.Tag, using(PerlLexer), Other)),
(r"""(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=[%#]) | # an eval or comment line
(?=</?[%&]) | # a substitution or block or
# call start or end
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)""", bygroups(using(HtmlLexer), Operator)),
]
}
def analyse_text(text):
result = 0.0
if re.search(r'</%(class|doc|init)%>', text) is not None:
result = 1.0
elif re.search(r'<&.+&>', text, re.DOTALL) is not None:
result = 0.11
return result
class MakoLexer(RegexLexer):
"""
Generic `mako templates`_ lexer. Code that isn't Mako
markup is yielded as `Token.Other`.
.. versionadded:: 0.7
.. _mako templates: http://www.makotemplates.org/
"""
name = 'Mako'
aliases = ['mako']
filenames = ['*.mao']
mimetypes = ['application/x-mako']
tokens = {
'root': [
(r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
bygroups(Text, Comment.Preproc, Keyword, Other)),
(r'(\s*)(%)([^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
(r'(\s*)(##[^\n]*)(\n|\Z)',
bygroups(Text, Comment.Preproc, Other)),
(r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
(r'(<%)([\w.:]+)',
bygroups(Comment.Preproc, Name.Builtin), 'tag'),
(r'(</%)([\w.:]+)(>)',
bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
(r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'),
(r'(?s)(<%(?:!?))(.*?)(%>)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(\$\{)(.*?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=%|\#\#) | # an eval or comment line
(?=\#\*) | # multiline comment
(?=</?%) | # a python block
# call start or end
(?=\$\{) | # a substitution
(?<=\n)(?=\s*%) |
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)
''', bygroups(Other, Operator)),
(r'\s+', Text),
],
'ondeftags': [
(r'<%', Comment.Preproc),
(r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
include('tag'),
],
'tag': [
(r'((?:\w+)\s*=)(\s*)(".*?")',
bygroups(Name.Attribute, Text, String)),
(r'/?\s*>', Comment.Preproc, '#pop'),
(r'\s+', Text),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class MakoHtmlLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `HtmlLexer`.
.. versionadded:: 0.7
"""
name = 'HTML+Mako'
aliases = ['html+mako']
mimetypes = ['text/html+mako']
def __init__(self, **options):
super().__init__(HtmlLexer, MakoLexer, **options)
class MakoXmlLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `XmlLexer`.
.. versionadded:: 0.7
"""
name = 'XML+Mako'
aliases = ['xml+mako']
mimetypes = ['application/xml+mako']
def __init__(self, **options):
super().__init__(XmlLexer, MakoLexer, **options)
class MakoJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `JavascriptLexer`.
.. versionadded:: 0.7
"""
name = 'JavaScript+Mako'
aliases = ['js+mako', 'javascript+mako']
mimetypes = ['application/x-javascript+mako',
'text/x-javascript+mako',
'text/javascript+mako']
def __init__(self, **options):
super().__init__(JavascriptLexer, MakoLexer, **options)
class MakoCssLexer(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `CssLexer`.
.. versionadded:: 0.7
"""
name = 'CSS+Mako'
aliases = ['css+mako']
mimetypes = ['text/css+mako']
def __init__(self, **options):
super().__init__(CssLexer, MakoLexer, **options)
# Genshi and Cheetah lexers courtesy of Matt Good.
class CheetahPythonLexer(Lexer):
"""
Lexer for handling Cheetah's special $ tokens in Python syntax.
"""
def get_tokens_unprocessed(self, text):
pylexer = PythonLexer(**self.options)
for pos, type_, value in pylexer.get_tokens_unprocessed(text):
if type_ == Token.Error and value == '$':
type_ = Comment.Preproc
yield pos, type_, value
class CheetahLexer(RegexLexer):
"""
Generic `cheetah templates`_ lexer. Code that isn't Cheetah
markup is yielded as `Token.Other`. This also works for
`spitfire templates`_ which use the same syntax.
.. _cheetah templates: http://www.cheetahtemplate.org/
.. _spitfire templates: http://code.google.com/p/spitfire/
"""
name = 'Cheetah'
aliases = ['cheetah', 'spitfire']
filenames = ['*.tmpl', '*.spt']
mimetypes = ['application/x-cheetah', 'application/x-spitfire']
tokens = {
'root': [
(r'(##[^\n]*)$',
(bygroups(Comment))),
(r'#[*](.|\n)*?[*]#', Comment),
(r'#end[^#\n]*(?:#|$)', Comment.Preproc),
(r'#slurp$', Comment.Preproc),
(r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
(bygroups(Comment.Preproc, using(CheetahPythonLexer),
Comment.Preproc))),
# TODO support other Python syntax like $foo['bar']
(r'(\$)([a-zA-Z_][\w.]*\w)',
bygroups(Comment.Preproc, using(CheetahPythonLexer))),
(r'(?s)(\$\{!?)(.*?)(\})',
bygroups(Comment.Preproc, using(CheetahPythonLexer),
Comment.Preproc)),
(r'''(?sx)
(.+?) # anything, followed by:
(?:
(?=\#[#a-zA-Z]*) | # an eval comment
(?=\$[a-zA-Z_{]) | # a substitution
\Z # end of string
)
''', Other),
(r'\s+', Text),
],
}
class CheetahHtmlLexer(DelegatingLexer):
"""
Subclass of the `CheetahLexer` that highlights unlexed data
with the `HtmlLexer`.
"""
name = 'HTML+Cheetah'
aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah']
mimetypes = ['text/html+cheetah', 'text/html+spitfire']
def __init__(self, **options):
super().__init__(HtmlLexer, CheetahLexer, **options)
class CheetahXmlLexer(DelegatingLexer):
"""
Subclass of the `CheetahLexer` that highlights unlexed data
with the `XmlLexer`.
"""
name = 'XML+Cheetah'
aliases = ['xml+cheetah', 'xml+spitfire']
mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
def __init__(self, **options):
super().__init__(XmlLexer, CheetahLexer, **options)
class CheetahJavascriptLexer(DelegatingLexer):
"""
Subclass of the `CheetahLexer` that highlights unlexed data
with the `JavascriptLexer`.
"""
name = 'JavaScript+Cheetah'
aliases = ['js+cheetah', 'javascript+cheetah',
'js+spitfire', 'javascript+spitfire']
mimetypes = ['application/x-javascript+cheetah',
'text/x-javascript+cheetah',
'text/javascript+cheetah',
'application/x-javascript+spitfire',
'text/x-javascript+spitfire',
'text/javascript+spitfire']
def __init__(self, **options):
super().__init__(JavascriptLexer, CheetahLexer, **options)
class GenshiTextLexer(RegexLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
templates.
"""
name = 'Genshi Text'
aliases = ['genshitext']
mimetypes = ['application/x-genshi-text', 'text/x-genshi']
tokens = {
'root': [
(r'[^#$\s]+', Other),
(r'^(\s*)(##.*)$', bygroups(Text, Comment)),
(r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
include('variable'),
(r'[#$\s]', Other),
],
'directive': [
(r'\n', Text, '#pop'),
(r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
(r'(choose|when|with)([^\S\n]+)(.*)',
bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
(r'(choose|otherwise)\b', Keyword, '#pop'),
(r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
],
'variable': [
(r'(?<!\$)(\$\{)(.+?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(?<!\$)(\$)([a-zA-Z_][\w.]*)',
Name.Variable),
]
}
class GenshiMarkupLexer(RegexLexer):
"""
Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
`GenshiLexer`.
"""
flags = re.DOTALL
tokens = {
'root': [
(r'[^<$]+', Other),
(r'(<\?python)(.*?)(\?>)',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
# yield style and script blocks as Other
(r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
(r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
(r'<\s*[a-zA-Z0-9:.]+', Name.Tag, 'tag'),
include('variable'),
(r'[<$]', Other),
],
'pytag': [
(r'\s+', Text),
(r'[\w:-]+\s*=', Name.Attribute, 'pyattr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'pyattr': [
('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
(r'[^\s>]+', String, '#pop'),
],
'tag': [
(r'\s+', Text),
(r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'),
(r'[\w:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('"', String, 'attr-dstring'),
("'", String, 'attr-sstring'),
(r'[^\s>]*', String, '#pop')
],
'attr-dstring': [
('"', String, '#pop'),
include('strings'),
("'", String)
],
'attr-sstring': [
("'", String, '#pop'),
include('strings'),
("'", String)
],
'strings': [
('[^"\'$]+', String),
include('variable')
],
'variable': [
(r'(?<!\$)(\$\{)(.+?)(\})',
bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
(r'(?<!\$)(\$)([a-zA-Z_][\w\.]*)',
Name.Variable),
]
}
class HtmlGenshiLexer(DelegatingLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
`kid <http://kid-templating.org/>`_ kid HTML templates.
"""
name = 'HTML+Genshi'
aliases = ['html+genshi', 'html+kid']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+genshi']
def __init__(self, **options):
super().__init__(HtmlLexer, GenshiMarkupLexer, **options)
def analyse_text(text):
rv = 0.0
if re.search(r'\$\{.*?\}', text) is not None:
rv += 0.2
if re.search(r'py:(.*?)=["\']', text) is not None:
rv += 0.2
return rv + HtmlLexer.analyse_text(text) - 0.01
class GenshiLexer(DelegatingLexer):
"""
A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
`kid <http://kid-templating.org/>`_ kid XML templates.
"""
name = 'Genshi'
aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
filenames = ['*.kid']
alias_filenames = ['*.xml']
mimetypes = ['application/x-genshi', 'application/x-kid']
def __init__(self, **options):
super().__init__(XmlLexer, GenshiMarkupLexer, **options)
def analyse_text(text):
rv = 0.0
if re.search(r'\$\{.*?\}', text) is not None:
rv += 0.2
if re.search(r'py:(.*?)=["\']', text) is not None:
rv += 0.2
return rv + XmlLexer.analyse_text(text) - 0.01
class JavascriptGenshiLexer(DelegatingLexer):
"""
A lexer that highlights javascript code in genshi text templates.
"""
name = 'JavaScript+Genshi Text'
aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
'javascript+genshi']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+genshi',
'text/x-javascript+genshi',
'text/javascript+genshi']
def __init__(self, **options):
super().__init__(JavascriptLexer, GenshiTextLexer, **options)
def analyse_text(text):
return GenshiLexer.analyse_text(text) - 0.05
class CssGenshiLexer(DelegatingLexer):
"""
A lexer that highlights CSS definitions in genshi text templates.
"""
name = 'CSS+Genshi Text'
aliases = ['css+genshitext', 'css+genshi']
alias_filenames = ['*.css']
mimetypes = ['text/css+genshi']
def __init__(self, **options):
super().__init__(CssLexer, GenshiTextLexer, **options)
def analyse_text(text):
return GenshiLexer.analyse_text(text) - 0.05
class RhtmlLexer(DelegatingLexer):
"""
Subclass of the ERB lexer that highlights the unlexed data with the
html lexer.
Nested Javascript and CSS is highlighted too.
"""
name = 'RHTML'
aliases = ['rhtml', 'html+erb', 'html+ruby']
filenames = ['*.rhtml']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+ruby']
def __init__(self, **options):
super().__init__(HtmlLexer, ErbLexer, **options)
def analyse_text(text):
rv = ErbLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
# one more than the XmlErbLexer returns
rv += 0.5
return rv
class XmlErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights data outside preprocessor
directives with the `XmlLexer`.
"""
name = 'XML+Ruby'
aliases = ['xml+erb', 'xml+ruby']
alias_filenames = ['*.xml']
mimetypes = ['application/xml+ruby']
def __init__(self, **options):
super().__init__(XmlLexer, ErbLexer, **options)
def analyse_text(text):
rv = ErbLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
"""
name = 'CSS+Ruby'
aliases = ['css+erb', 'css+ruby']
alias_filenames = ['*.css']
mimetypes = ['text/css+ruby']
def __init__(self, **options):
super().__init__(CssLexer, ErbLexer, **options)
def analyse_text(text):
return ErbLexer.analyse_text(text) - 0.05
class JavascriptErbLexer(DelegatingLexer):
"""
Subclass of `ErbLexer` which highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Ruby'
aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+ruby',
'text/x-javascript+ruby',
'text/javascript+ruby']
def __init__(self, **options):
super().__init__(JavascriptLexer, ErbLexer, **options)
def analyse_text(text):
return ErbLexer.analyse_text(text) - 0.05
class HtmlPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+PHP'
aliases = ['html+php']
filenames = ['*.phtml']
alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
'*.php[345]']
mimetypes = ['application/x-php',
'application/x-httpd-php', 'application/x-httpd-php3',
'application/x-httpd-php4', 'application/x-httpd-php5']
def __init__(self, **options):
super().__init__(HtmlLexer, PhpLexer, **options)
def analyse_text(text):
rv = PhpLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` that highlights unhandled data with the `XmlLexer`.
"""
name = 'XML+PHP'
aliases = ['xml+php']
alias_filenames = ['*.xml', '*.php', '*.php[345]']
mimetypes = ['application/xml+php']
def __init__(self, **options):
super().__init__(XmlLexer, PhpLexer, **options)
def analyse_text(text):
rv = PhpLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
"""
name = 'CSS+PHP'
aliases = ['css+php']
alias_filenames = ['*.css']
mimetypes = ['text/css+php']
def __init__(self, **options):
super().__init__(CssLexer, PhpLexer, **options)
def analyse_text(text):
return PhpLexer.analyse_text(text) - 0.05
class JavascriptPhpLexer(DelegatingLexer):
"""
Subclass of `PhpLexer` which highlights unmatched data with the
`JavascriptLexer`.
"""
name = 'JavaScript+PHP'
aliases = ['js+php', 'javascript+php']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+php',
'text/x-javascript+php',
'text/javascript+php']
def __init__(self, **options):
super().__init__(JavascriptLexer, PhpLexer, **options)
def analyse_text(text):
return PhpLexer.analyse_text(text)
class HtmlSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+Smarty'
aliases = ['html+smarty']
alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
mimetypes = ['text/html+smarty']
def __init__(self, **options):
super().__init__(HtmlLexer, SmartyLexer, **options)
def analyse_text(text):
rv = SmartyLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`XmlLexer`.
"""
name = 'XML+Smarty'
aliases = ['xml+smarty']
alias_filenames = ['*.xml', '*.tpl']
mimetypes = ['application/xml+smarty']
def __init__(self, **options):
super().__init__(XmlLexer, SmartyLexer, **options)
def analyse_text(text):
rv = SmartyLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`CssLexer`.
"""
name = 'CSS+Smarty'
aliases = ['css+smarty']
alias_filenames = ['*.css', '*.tpl']
mimetypes = ['text/css+smarty']
def __init__(self, **options):
super().__init__(CssLexer, SmartyLexer, **options)
def analyse_text(text):
return SmartyLexer.analyse_text(text) - 0.05
class JavascriptSmartyLexer(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Smarty'
aliases = ['js+smarty', 'javascript+smarty']
alias_filenames = ['*.js', '*.tpl']
mimetypes = ['application/x-javascript+smarty',
'text/x-javascript+smarty',
'text/javascript+smarty']
def __init__(self, **options):
super().__init__(JavascriptLexer, SmartyLexer, **options)
def analyse_text(text):
return SmartyLexer.analyse_text(text) - 0.05
class HtmlDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`HtmlLexer`.
Nested Javascript and CSS is highlighted too.
"""
name = 'HTML+Django/Jinja'
aliases = ['html+django', 'html+jinja', 'htmldjango']
alias_filenames = ['*.html', '*.htm', '*.xhtml']
mimetypes = ['text/html+django', 'text/html+jinja']
def __init__(self, **options):
super().__init__(HtmlLexer, DjangoLexer, **options)
def analyse_text(text):
rv = DjangoLexer.analyse_text(text) - 0.01
if html_doctype_matches(text):
rv += 0.5
return rv
class XmlDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`XmlLexer`.
"""
name = 'XML+Django/Jinja'
aliases = ['xml+django', 'xml+jinja']
alias_filenames = ['*.xml']
mimetypes = ['application/xml+django', 'application/xml+jinja']
def __init__(self, **options):
super().__init__(XmlLexer, DjangoLexer, **options)
def analyse_text(text):
rv = DjangoLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class CssDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`CssLexer`.
"""
name = 'CSS+Django/Jinja'
aliases = ['css+django', 'css+jinja']
alias_filenames = ['*.css']
mimetypes = ['text/css+django', 'text/css+jinja']
def __init__(self, **options):
super().__init__(CssLexer, DjangoLexer, **options)
def analyse_text(text):
return DjangoLexer.analyse_text(text) - 0.05
class JavascriptDjangoLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`JavascriptLexer`.
"""
name = 'JavaScript+Django/Jinja'
aliases = ['js+django', 'javascript+django',
'js+jinja', 'javascript+jinja']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+django',
'application/x-javascript+jinja',
'text/x-javascript+django',
'text/x-javascript+jinja',
'text/javascript+django',
'text/javascript+jinja']
def __init__(self, **options):
super().__init__(JavascriptLexer, DjangoLexer, **options)
def analyse_text(text):
return DjangoLexer.analyse_text(text) - 0.05
class JspRootLexer(RegexLexer):
"""
Base for the `JspLexer`. Yields `Token.Other` for area outside of
JSP tags.
.. versionadded:: 0.7
"""
tokens = {
'root': [
(r'<%\S?', Keyword, 'sec'),
# FIXME: I want to make these keywords but still parse attributes.
(r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
Keyword),
(r'[^<]+', Other),
(r'<', Other),
],
'sec': [
(r'%>', Keyword, '#pop'),
# note: '\w\W' != '.' without DOTALL.
(r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
],
}
class JspLexer(DelegatingLexer):
"""
Lexer for Java Server Pages.
.. versionadded:: 0.7
"""
name = 'Java Server Page'
aliases = ['jsp']
filenames = ['*.jsp']
mimetypes = ['application/x-jsp']
def __init__(self, **options):
super().__init__(XmlLexer, JspRootLexer, **options)
def analyse_text(text):
rv = JavaLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class EvoqueLexer(RegexLexer):
"""
For files using the Evoque templating system.
.. versionadded:: 1.1
"""
name = 'Evoque'
aliases = ['evoque']
filenames = ['*.evoque']
mimetypes = ['application/x-evoque']
flags = re.DOTALL
tokens = {
'root': [
(r'[^#$]+', Other),
(r'#\[', Comment.Multiline, 'comment'),
(r'\$\$', Other),
# svn keywords
(r'\$\w+:[^$\n]*\$', Comment.Multiline),
# directives: begin, end
(r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, Punctuation)),
# directives: evoque, overlay
# see doc for handling first name arg: /directives/evoque/
# + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
# should be using(PythonLexer), not passed out as String
(r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
r'(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
String, using(PythonLexer), Punctuation)),
# directives: if, for, prefer, test
(r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
bygroups(Punctuation, Name.Builtin, Punctuation, None,
using(PythonLexer), Punctuation)),
# directive clauses (no {} expression)
(r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
# expressions
(r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
bygroups(Punctuation, None, using(PythonLexer),
Name.Builtin, None, None, Punctuation)),
(r'#', Other),
],
'comment': [
(r'[^\]#]', Comment.Multiline),
(r'#\[', Comment.Multiline, '#push'),
(r'\]#', Comment.Multiline, '#pop'),
(r'[\]#]', Comment.Multiline)
],
}
class EvoqueHtmlLexer(DelegatingLexer):
"""
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`HtmlLexer`.
.. versionadded:: 1.1
"""
name = 'HTML+Evoque'
aliases = ['html+evoque']
filenames = ['*.html']
mimetypes = ['text/html+evoque']
def __init__(self, **options):
super().__init__(HtmlLexer, EvoqueLexer, **options)
class EvoqueXmlLexer(DelegatingLexer):
"""
Subclass of the `EvoqueLexer` that highlights unlexed data with the
`XmlLexer`.
.. versionadded:: 1.1
"""
name = 'XML+Evoque'
aliases = ['xml+evoque']
filenames = ['*.xml']
mimetypes = ['application/xml+evoque']
def __init__(self, **options):
super().__init__(XmlLexer, EvoqueLexer, **options)
class ColdfusionLexer(RegexLexer):
"""
Coldfusion statements
"""
name = 'cfstatement'
aliases = ['cfs']
filenames = []
mimetypes = []
flags = re.IGNORECASE
tokens = {
'root': [
(r'//.*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'\+\+|--', Operator),
(r'[-+*/^&=!]', Operator),
(r'<=|>=|<|>|==', Operator),
(r'mod\b', Operator),
(r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
(r'\|\||&&', Operator),
(r'\?', Operator),
(r'"', String.Double, 'string'),
# There is a special rule for allowing html in single quoted
# strings, evidently.
(r"'.*?'", String.Single),
(r'\d+', Number),
(r'(if|else|len|var|xml|default|break|switch|component|property|function|do|'
r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|'
r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'(application|session|client|cookie|super|this|variables|arguments)\b',
Name.Constant),
(r'([a-z_$][\w.]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-z_$][\w.]*', Name.Variable),
(r'[()\[\]{};:,.\\]', Punctuation),
(r'\s+', Text),
],
'string': [
(r'""', String.Double),
(r'#.+?#', String.Interp),
(r'[^"#]+', String.Double),
(r'#', String.Double),
(r'"', String.Double, '#pop'),
],
}
class ColdfusionMarkupLexer(RegexLexer):
"""
Coldfusion markup only
"""
name = 'Coldfusion'
aliases = ['cf']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'[^<]+', Other),
include('tags'),
(r'<[^<>]*', Other),
],
'tags': [
(r'<!---', Comment.Multiline, 'cfcomment'),
(r'(?s)<!--.*?-->', Comment),
(r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
(r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
# negative lookbehind is for strings with embedded >
(r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
r'mailpart|mail|header|content|zip|image|lock|argument|try|'
r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
],
'cfoutput': [
(r'[^#<]+', Other),
(r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
Punctuation)),
# (r'<cfoutput.*?>', Name.Builtin, '#push'),
(r'</cfoutput.*?>', Name.Builtin, '#pop'),
include('tags'),
(r'(?s)<[^<>]*', Other),
(r'#', Other),
],
'cfcomment': [
(r'<!---', Comment.Multiline, '#push'),
(r'--->', Comment.Multiline, '#pop'),
(r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline),
],
}
class ColdfusionHtmlLexer(DelegatingLexer):
"""
Coldfusion markup in html
"""
name = 'Coldfusion HTML'
aliases = ['cfm']
filenames = ['*.cfm', '*.cfml']
mimetypes = ['application/x-coldfusion']
def __init__(self, **options):
super().__init__(HtmlLexer, ColdfusionMarkupLexer, **options)
class ColdfusionCFCLexer(DelegatingLexer):
"""
Coldfusion markup/script components
.. versionadded:: 2.0
"""
name = 'Coldfusion CFC'
aliases = ['cfc']
filenames = ['*.cfc']
mimetypes = []
def __init__(self, **options):
super().__init__(ColdfusionHtmlLexer, ColdfusionLexer, **options)
class SspLexer(DelegatingLexer):
"""
Lexer for Scalate Server Pages.
.. versionadded:: 1.4
"""
name = 'Scalate Server Page'
aliases = ['ssp']
filenames = ['*.ssp']
mimetypes = ['application/x-ssp']
def __init__(self, **options):
super().__init__(XmlLexer, JspRootLexer, **options)
def analyse_text(text):
rv = 0.0
if re.search(r'val \w+\s*:', text):
rv += 0.6
if looks_like_xml(text):
rv += 0.2
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class TeaTemplateRootLexer(RegexLexer):
"""
Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
code blocks.
.. versionadded:: 1.5
"""
tokens = {
'root': [
(r'<%\S?', Keyword, 'sec'),
(r'[^<]+', Other),
(r'<', Other),
],
'sec': [
(r'%>', Keyword, '#pop'),
# note: '\w\W' != '.' without DOTALL.
(r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)),
],
}
class TeaTemplateLexer(DelegatingLexer):
"""
Lexer for `Tea Templates <http://teatrove.org/>`_.
.. versionadded:: 1.5
"""
name = 'Tea'
aliases = ['tea']
filenames = ['*.tea']
mimetypes = ['text/x-tea']
def __init__(self, **options):
super().__init__(XmlLexer, TeaTemplateRootLexer, **options)
def analyse_text(text):
rv = TeaLangLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
if '<%' in text and '%>' in text:
rv += 0.1
return rv
class LassoHtmlLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`HtmlLexer`.
Nested JavaScript and CSS is also highlighted.
.. versionadded:: 1.6
"""
name = 'HTML+Lasso'
aliases = ['html+lasso']
alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.lasso', '*.lasso[89]',
'*.incl', '*.inc', '*.las']
mimetypes = ['text/html+lasso',
'application/x-httpd-lasso',
'application/x-httpd-lasso[89]']
def __init__(self, **options):
super().__init__(HtmlLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.01
if html_doctype_matches(text): # same as HTML lexer
rv += 0.5
return rv
class LassoXmlLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`XmlLexer`.
.. versionadded:: 1.6
"""
name = 'XML+Lasso'
aliases = ['xml+lasso']
alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]',
'*.incl', '*.inc', '*.las']
mimetypes = ['application/xml+lasso']
def __init__(self, **options):
super().__init__(XmlLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
class LassoCssLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`CssLexer`.
.. versionadded:: 1.6
"""
name = 'CSS+Lasso'
aliases = ['css+lasso']
alias_filenames = ['*.css']
mimetypes = ['text/css+lasso']
def __init__(self, **options):
options['requiredelimiters'] = True
super().__init__(CssLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.05
if re.search(r'\w+:.+?;', text):
rv += 0.1
if 'padding:' in text:
rv += 0.1
return rv
class LassoJavascriptLexer(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`JavascriptLexer`.
.. versionadded:: 1.6
"""
name = 'JavaScript+Lasso'
aliases = ['js+lasso', 'javascript+lasso']
alias_filenames = ['*.js']
mimetypes = ['application/x-javascript+lasso',
'text/x-javascript+lasso',
'text/javascript+lasso']
def __init__(self, **options):
options['requiredelimiters'] = True
super().__init__(JavascriptLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.05
return rv
class HandlebarsLexer(RegexLexer):
"""
Generic `handlebars <http://handlebarsjs.com/>` template lexer.
Highlights only the Handlebars template tags (stuff between `{{` and `}}`).
Everything else is left for a delegating lexer.
.. versionadded:: 2.0
"""
name = "Handlebars"
aliases = ['handlebars']
tokens = {
'root': [
(r'[^{]+', Other),
# Comment start {{! }} or {{!--
(r'\{\{!.*\}\}', Comment),
# HTML Escaping open {{{expression
(r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'),
# {{blockOpen {{#blockOpen {{/blockClose with optional tilde ~
(r'(\{\{)([#~/]+)([^\s}]*)',
bygroups(Comment.Preproc, Number.Attribute, Number.Attribute), 'tag'),
(r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'),
],
'tag': [
(r'\s+', Text),
# HTML Escaping close }}}
(r'\}\}\}', Comment.Special, '#pop'),
# blockClose}}, includes optional tilde ~
(r'(~?)(\}\})', bygroups(Number, Comment.Preproc), '#pop'),
# {{opt=something}}
(r'([^\s}]+)(=)', bygroups(Name.Attribute, Operator)),
# Partials {{> ...}}
(r'(>)(\s*)(@partial-block)', bygroups(Keyword, Text, Keyword)),
(r'(#?>)(\s*)([\w-]+)', bygroups(Keyword, Text, Name.Variable)),
(r'(>)(\s*)(\()', bygroups(Keyword, Text, Punctuation),
'dynamic-partial'),
include('generic'),
],
'dynamic-partial': [
(r'\s+', Text),
(r'\)', Punctuation, '#pop'),
(r'(lookup)(\s+)(\.|this)(\s+)', bygroups(Keyword, Text,
Name.Variable, Text)),
(r'(lookup)(\s+)(\S+)', bygroups(Keyword, Text,
using(this, state='variable'))),
(r'[\w-]+', Name.Function),
include('generic'),
],
'variable': [
(r'[()/@a-zA-Z][\w-]*', Name.Variable),
(r'\.[\w-]+', Name.Variable),
(r'(this\/|\.\/|(\.\.\/)+)[\w-]+', Name.Variable),
],
'generic': [
include('variable'),
# borrowed from DjangoLexer
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
]
}
class HandlebarsHtmlLexer(DelegatingLexer):
"""
Subclass of the `HandlebarsLexer` that highlights unlexed data with the
`HtmlLexer`.
.. versionadded:: 2.0
"""
name = "HTML+Handlebars"
aliases = ["html+handlebars"]
filenames = ['*.handlebars', '*.hbs']
mimetypes = ['text/html+handlebars', 'text/x-handlebars-template']
def __init__(self, **options):
super().__init__(HtmlLexer, HandlebarsLexer, **options)
class YamlJinjaLexer(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`YamlLexer`.
Commonly used in Saltstack salt states.
.. versionadded:: 2.0
"""
name = 'YAML+Jinja'
aliases = ['yaml+jinja', 'salt', 'sls']
filenames = ['*.sls']
mimetypes = ['text/x-yaml+jinja', 'text/x-sls']
def __init__(self, **options):
super().__init__(YamlLexer, DjangoLexer, **options)
class LiquidLexer(RegexLexer):
"""
Lexer for `Liquid templates
<http://www.rubydoc.info/github/Shopify/liquid>`_.
.. versionadded:: 2.0
"""
name = 'liquid'
aliases = ['liquid']
filenames = ['*.liquid']
tokens = {
'root': [
(r'[^{]+', Text),
# tags and block tags
(r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'),
# output tags
(r'(\{\{)(\s*)([^\s}]+)',
bygroups(Punctuation, Whitespace, using(this, state = 'generic')),
'output'),
(r'\{', Text)
],
'tag-or-block': [
# builtin logic blocks
(r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'),
(r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace),
combined('end-of-block', 'whitespace', 'generic')),
(r'(else)(\s*)(%\})',
bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'),
# other builtin blocks
(r'(capture)(\s+)([^\s%]+)(\s*)(%\})',
bygroups(Name.Tag, Whitespace, using(this, state = 'variable'),
Whitespace, Punctuation), '#pop'),
(r'(comment)(\s*)(%\})',
bygroups(Name.Tag, Whitespace, Punctuation), 'comment'),
(r'(raw)(\s*)(%\})',
bygroups(Name.Tag, Whitespace, Punctuation), 'raw'),
# end of block
(r'(end(case|unless|if))(\s*)(%\})',
bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'),
(r'(end([^\s%]+))(\s*)(%\})',
bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'),
# builtin tags (assign and include are handled together with usual tags)
(r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)',
bygroups(Name.Tag, Whitespace,
using(this, state='generic'), Punctuation, Whitespace),
'variable-tag-markup'),
# other tags or blocks
(r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup')
],
'output': [
include('whitespace'),
(r'\}\}', Punctuation, '#pop'), # end of output
(r'\|', Punctuation, 'filters')
],
'filters': [
include('whitespace'),
(r'\}\}', Punctuation, ('#pop', '#pop')), # end of filters and output
(r'([^\s|:]+)(:?)(\s*)',
bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup')
],
'filter-markup': [
(r'\|', Punctuation, '#pop'),
include('end-of-tag'),
include('default-param-markup')
],
'condition': [
include('end-of-block'),
include('whitespace'),
(r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})',
bygroups(using(this, state = 'generic'), Whitespace, Operator,
Whitespace, using(this, state = 'generic'), Whitespace,
Punctuation)),
(r'\b!', Operator),
(r'\bnot\b', Operator.Word),
(r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)',
bygroups(using(this, state = 'generic'), Whitespace, Operator.Word,
Whitespace, using(this, state = 'generic'))),
include('generic'),
include('whitespace')
],
'generic-value': [
include('generic'),
include('end-at-whitespace')
],
'operator': [
(r'(\s*)((=|!|>|<)=?)(\s*)',
bygroups(Whitespace, Operator, None, Whitespace), '#pop'),
(r'(\s*)(\bcontains\b)(\s*)',
bygroups(Whitespace, Operator.Word, Whitespace), '#pop'),
],
'end-of-tag': [
(r'\}\}', Punctuation, '#pop')
],
'end-of-block': [
(r'%\}', Punctuation, ('#pop', '#pop'))
],
'end-at-whitespace': [
(r'\s+', Whitespace, '#pop')
],
# states for unknown markup
'param-markup': [
include('whitespace'),
# params with colons or equals
(r'([^\s=:]+)(\s*)(=|:)',
bygroups(Name.Attribute, Whitespace, Operator)),
# explicit variables
(r'(\{\{)(\s*)([^\s}])(\s*)(\}\})',
bygroups(Punctuation, Whitespace, using(this, state = 'variable'),
Whitespace, Punctuation)),
include('string'),
include('number'),
include('keyword'),
(r',', Punctuation)
],
'default-param-markup': [
include('param-markup'),
(r'.', Text) # fallback for switches / variables / un-quoted strings / ...
],
'variable-param-markup': [
include('param-markup'),
include('variable'),
(r'.', Text) # fallback
],
'tag-markup': [
(r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
include('default-param-markup')
],
'variable-tag-markup': [
(r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
include('variable-param-markup')
],
# states for different values types
'keyword': [
(r'\b(false|true)\b', Keyword.Constant)
],
'variable': [
(r'[a-zA-Z_]\w*', Name.Variable),
(r'(?<=\w)\.(?=\w)', Punctuation)
],
'string': [
(r"'[^']*'", String.Single),
(r'"[^"]*"', String.Double)
],
'number': [
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer)
],
'generic': [ # decides for variable, string, keyword or number
include('keyword'),
include('string'),
include('number'),
include('variable')
],
'whitespace': [
(r'[ \t]+', Whitespace)
],
# states for builtin blocks
'comment': [
(r'(\{%)(\s*)(endcomment)(\s*)(%\})',
bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
Punctuation), ('#pop', '#pop')),
(r'.', Comment)
],
'raw': [
(r'[^{]+', Text),
(r'(\{%)(\s*)(endraw)(\s*)(%\})',
bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
Punctuation), '#pop'),
(r'\{', Text)
],
}
class TwigLexer(RegexLexer):
"""
`Twig <http://twig.sensiolabs.org/>`_ template lexer.
It just highlights Twig code between the preprocessor directives,
other data is left untouched by the lexer.
.. versionadded:: 2.0
"""
name = 'Twig'
aliases = ['twig']
mimetypes = ['application/x-twig']
flags = re.M | re.S
# Note that a backslash is included in the following two patterns
# PHP uses a backslash as a namespace separator
_ident_char = r'[\\\w-]|[^\x00-\x7f]'
_ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
_ident_end = r'(?:' + _ident_char + ')*'
_ident_inner = _ident_begin + _ident_end
tokens = {
'root': [
(r'[^{]+', Other),
(r'\{\{', Comment.Preproc, 'var'),
# twig comments
(r'\{\#.*?\#\}', Comment),
# raw twig blocks
(r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Other, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
(r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Other, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# filter blocks
(r'(\{%%)(-?\s*)(filter)(\s+)(%s)' % _ident_inner,
bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
'tag'),
(r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
bygroups(Comment.Preproc, Text, Keyword), 'tag'),
(r'\{', Other),
],
'varnames': [
(r'(\|)(\s*)(%s)' % _ident_inner,
bygroups(Operator, Text, Name.Function)),
(r'(is)(\s+)(not)?(\s*)(%s)' % _ident_inner,
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(?i)(true|false|none|null)\b', Keyword.Pseudo),
(r'(in|not|and|b-and|or|b-or|b-xor|is'
r'if|elseif|else|import'
r'constant|defined|divisibleby|empty|even|iterable|odd|sameas'
r'matches|starts\s+with|ends\s+with)\b',
Keyword),
(r'(loop|block|parent)\b', Name.Builtin),
(_ident_inner, Name.Variable),
(r'\.' + _ident_inner, Name.Variable),
(r'\.[0-9]+', Number),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
],
'var': [
(r'\s+', Text),
(r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames')
],
'tag': [
(r'\s+', Text),
(r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames'),
(r'.', Punctuation),
],
}
class TwigHtmlLexer(DelegatingLexer):
"""
Subclass of the `TwigLexer` that highlights unlexed data with the
`HtmlLexer`.
.. versionadded:: 2.0
"""
name = "HTML+Twig"
aliases = ["html+twig"]
filenames = ['*.twig']
mimetypes = ['text/html+twig']
def __init__(self, **options):
super().__init__(HtmlLexer, TwigLexer, **options)
class Angular2Lexer(RegexLexer):
"""
Generic
`angular2 <http://victorsavkin.com/post/119943127151/angular-2-template-syntax>`_
template lexer.
Highlights only the Angular template tags (stuff between `{{` and `}}` and
special attributes: '(event)=', '[property]=', '[(twoWayBinding)]=').
Everything else is left for a delegating lexer.
.. versionadded:: 2.1
"""
name = "Angular2"
aliases = ['ng2']
tokens = {
'root': [
(r'[^{([*#]+', Other),
# {{meal.name}}
(r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'ngExpression'),
# (click)="deleteOrder()"; [value]="test"; [(twoWayTest)]="foo.bar"
(r'([([]+)([\w:.-]+)([\])]+)(\s*)(=)(\s*)',
bygroups(Punctuation, Name.Attribute, Punctuation, Text, Operator, Text),
'attr'),
(r'([([]+)([\w:.-]+)([\])]+)(\s*)',
bygroups(Punctuation, Name.Attribute, Punctuation, Text)),
# *ngIf="..."; #f="ngForm"
(r'([*#])([\w:.-]+)(\s*)(=)(\s*)',
bygroups(Punctuation, Name.Attribute, Text, Operator, Text), 'attr'),
(r'([*#])([\w:.-]+)(\s*)',
bygroups(Punctuation, Name.Attribute, Text)),
],
'ngExpression': [
(r'\s+(\|\s+)?', Text),
(r'\}\}', Comment.Preproc, '#pop'),
# Literals
(r':?(true|false)', String.Boolean),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
# Variabletext
(r'[a-zA-Z][\w-]*(\(.*\))?', Name.Variable),
(r'\.[\w-]+(\(.*\))?', Name.Variable),
# inline If
(r'(\?)(\s*)([^}\s]+)(\s*)(:)(\s*)([^}\s]+)(\s*)',
bygroups(Operator, Text, String, Text, Operator, Text, String, Text)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class Angular2HtmlLexer(DelegatingLexer):
"""
Subclass of the `Angular2Lexer` that highlights unlexed data with the
`HtmlLexer`.
.. versionadded:: 2.0
"""
name = "HTML + Angular2"
aliases = ["html+ng2"]
filenames = ['*.ng2']
def __init__(self, **options):
super().__init__(HtmlLexer, Angular2Lexer, **options)
| 31.587766
| 89
| 0.495931
|
c10810c72bd415507fd08f829d642393752240a3
| 5,633
|
py
|
Python
|
depccg/semantics/ccg2lambda/parse.py
|
erezagami/depccg
|
15347f93e4c6c923c343e53bcb873e0f2f954446
|
[
"MIT"
] | null | null | null |
depccg/semantics/ccg2lambda/parse.py
|
erezagami/depccg
|
15347f93e4c6c923c343e53bcb873e0f2f954446
|
[
"MIT"
] | null | null | null |
depccg/semantics/ccg2lambda/parse.py
|
erezagami/depccg
|
15347f93e4c6c923c343e53bcb873e0f2f954446
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 Pascual Martinez-Gomez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from lxml import etree
from multiprocessing import Pool
from multiprocessing import Lock
import os
import sys
from nltk.sem.logic import LogicalExpressionException
from .ccg2lambda_tools import assign_semantics_to_ccg
from .semantic_index import SemanticIndex
from .logic_parser import lexpr
from .nltk2normal import remove_true
logger = logging.getLogger(__name__)
SEMANTIC_INDEX=None
GOLD_TREES=True
NBEST=0
SENTENCES=None
kMaxTasksPerChild=None
lock = Lock()
def parse(ccg, templates, nbest=0, ncores=3):
global SEMANTIC_INDEX
global SENTENCES
global NBEST
NBEST = nbest
if not os.path.exists(templates):
print('File does not exist: {0}'.format(templates))
sys.exit(1)
logger.info(templates)
SEMANTIC_INDEX = SemanticIndex(templates)
SENTENCES = ccg.findall('.//sentence')
sentence_inds = range(len(SENTENCES))
sem_nodes_lists = semantic_parse_sentences(sentence_inds, ncores)
assert len(sem_nodes_lists) == len(SENTENCES), \
'Element mismatch: {0} vs {1}'.format(len(sem_nodes_lists), len(SENTENCES))
logging.info('Adding XML semantic nodes to sentences...')
formulas_list = []
for sentence, (sem_nodes, formulas) in zip(SENTENCES, sem_nodes_lists):
formulas = [str(remove_true(lexpr(formula))) for formula in formulas]
formulas_list.append(formulas)
sentence.extend(sem_nodes)
logging.info('Finished adding XML semantic nodes to sentences.')
root_xml_str = serialize_tree(ccg)
return root_xml_str, formulas_list
def semantic_parse_sentences(sentence_inds, ncores=1):
if ncores <= 1:
sem_nodes_lists = semantic_parse_sentences_seq(sentence_inds)
else:
sem_nodes_lists = semantic_parse_sentences_par(sentence_inds, ncores)
results = [([etree.fromstring(s) for s in sem_nodes], formulas)
for sem_nodes, formulas in sem_nodes_lists]
return results
def semantic_parse_sentences_par(sentence_inds, ncores=3):
pool = Pool(processes=ncores, maxtasksperchild=kMaxTasksPerChild)
results = pool.map(semantic_parse_sentence, sentence_inds)
pool.close()
pool.join()
return results
def semantic_parse_sentences_seq(sentence_inds):
results = []
for sentence_ind in sentence_inds:
result = semantic_parse_sentence(sentence_ind)
results.append(result)
return results
def semantic_parse_sentence(sentence_ind):
"""
`sentence` is an lxml tree with tokens and ccg nodes.
It returns an lxml semantics node.
"""
global lock
sentence = SENTENCES[sentence_ind]
sem_nodes = []
formulas = []
tree_indices = [int(sentence.get('gold_tree', '0')) + 1]
if NBEST != 1:
tree_indices = get_tree_indices(sentence, NBEST)
for tree_index in tree_indices:
sem_node = etree.Element('semantics')
try:
sem_tree = assign_semantics_to_ccg(
sentence, SEMANTIC_INDEX, tree_index)
filter_attributes(sem_tree)
sem_node.extend(sem_tree.xpath('.//descendant-or-self::span'))
sem_node.set('status', 'success')
sem_node.set('ccg_id',
sentence.xpath('./ccg[{0}]/@id'.format(tree_index))[0])
sem_node.set('root',
sentence.xpath('./ccg[{0}]/@root'.format(tree_index))[0])
formulas.append(sem_tree.attrib['sem'])
except Exception as e:
sem_node.set('status', 'failed')
# from pudb import set_trace; set_trace()
sentence_surf = ' '.join(sentence.xpath('tokens/token/@surf'))
lock.acquire()
logging.error('An error occurred: {0}\nSentence: {1}\nTree XML:\n{2}'.format(
e, sentence_surf,
etree.tostring(sentence, encoding='utf-8', pretty_print=True).decode('utf-8')))
lock.release()
# print('x', end='', file=sys.stdout)
formulas.append('FAILED!')
sem_nodes.append(sem_node)
sem_nodes = [etree.tostring(sem_node) for sem_node in sem_nodes]
return sem_nodes, formulas
def get_tree_indices(sentence, nbest):
num_ccg_trees = int(sentence.xpath('count(./ccg)'))
if nbest < 1:
nbest = num_ccg_trees
return list(range(1, min(nbest, num_ccg_trees) + 1))
keep_attributes = set(['id', 'child', 'sem', 'type'])
def filter_attributes(tree):
if 'coq_type' in tree.attrib and 'child' not in tree.attrib:
sem_type = \
tree.attrib['coq_type'].lstrip('["Parameter ').rstrip('."]')
if sem_type:
tree.attrib['type'] = sem_type
attrib_to_delete = [a for a in tree.attrib.keys() if a not in keep_attributes]
for a in attrib_to_delete:
del tree.attrib[a]
for child in tree:
filter_attributes(child)
return
def serialize_tree(tree):
tree_str = etree.tostring(
tree, xml_declaration=True, encoding='utf-8', pretty_print=True)
return tree_str
| 36.108974
| 95
| 0.677614
|
996f58cbc1164e772d7255dfc0e1f0c8068bf623
| 19,516
|
py
|
Python
|
kedro/template/{{ cookiecutter.repo_name }}/kedro_cli.py
|
andmikey/kedro
|
9b4e4135720609d44ffdf5248246fe805f0b5469
|
[
"Apache-2.0"
] | null | null | null |
kedro/template/{{ cookiecutter.repo_name }}/kedro_cli.py
|
andmikey/kedro
|
9b4e4135720609d44ffdf5248246fe805f0b5469
|
[
"Apache-2.0"
] | null | null | null |
kedro/template/{{ cookiecutter.repo_name }}/kedro_cli.py
|
andmikey/kedro
|
9b4e4135720609d44ffdf5248246fe805f0b5469
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line tools for manipulating a Kedro project.
Intended to be invoked via `kedro`."""
import os
import re
import shutil
import subprocess
import sys
import webbrowser
from collections import Counter
from glob import iglob
from pathlib import Path
from typing import Any, Dict, Iterable, List
import anyconfig
import click
from click import secho, style
from kedro.cli import main as kedro_main
from kedro.cli.utils import (
KedroCliError,
call,
export_nodes,
forward_command,
python_call,
)
from kedro.context import KEDRO_ENV_VAR, load_context
from kedro.runner import SequentialRunner
from kedro.utils import load_obj
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
# get our package onto the python path
PROJ_PATH = Path(__file__).resolve().parent
os.environ["IPYTHONDIR"] = str(PROJ_PATH / ".ipython")
NO_DEPENDENCY_MESSAGE = """{0} is not installed. Please make sure {0} is in
src/requirements.txt and run `kedro install`."""
TAG_ARG_HELP = """Construct the pipeline using only nodes which have this tag
attached. Option can be used multiple times, what results in a
pipeline constructed from nodes having any of those tags."""
PIPELINE_ARG_HELP = """Name of the modular pipeline to run.
If not set, the project pipeline is run by default."""
ENV_ARG_HELP = """Run the pipeline in a configured environment. If not specified,
pipeline will run using environment `local`."""
NODE_ARG_HELP = """Run only nodes with specified names."""
FROM_NODES_HELP = """A list of node names which should be used as a starting point."""
TO_NODES_HELP = """A list of node names which should be used as an end point."""
FROM_INPUTS_HELP = (
"""A list of dataset names which should be used as a starting point."""
)
PARALLEL_ARG_HELP = """Run the pipeline using the `ParallelRunner`.
If not specified, use the `SequentialRunner`. This flag cannot be used together
with --runner."""
OPEN_ARG_HELP = """Open the documentation in your default browser after building."""
RUNNER_ARG_HELP = """Specify a runner that you want to run the pipeline with.
This option cannot be used together with --parallel."""
CONVERT_ALL_HELP = """Extract the nodes from all notebooks in the Kedro project directory,
including sub-folders."""
OVERWRITE_HELP = """If Python file already exists for the equivalent notebook,
overwrite its contents."""
LOAD_VERSION_HELP = """Specify a particular dataset version (timestamp) for loading."""
CONFIG_FILE_HELP = """Specify a YAML configuration file to load the run
command arguments from. If command line arguments are provided, they will
override the loaded ones."""
PARAMS_ARG_HELP = """Specify extra parameters that you want to pass
to the context initializer. Items must be separated by comma, keys - by colon,
example: param1:value1,param2:value2. Each parameter is split by the first comma,
so parameter values are allowed to contain colons, parameter keys are not."""
def _split_string(ctx, param, value):
return [item for item in value.split(",") if item]
def _split_params(ctx, param, value):
result = {}
for item in _split_string(ctx, param, value):
item = item.split(":", 1)
if len(item) != 2:
ctx.fail(
"Invalid format of `{}` option: Item `{}` must contain a key and "
"a value separated by `:`.".format(param.name, item[0])
)
key = item[0].strip()
if not key:
ctx.fail(
"Invalid format of `{}` option: Parameter key cannot be "
"an empty string.".format(param.name)
)
value = item[1].strip()
try:
value = float(value)
except ValueError:
pass
else:
value = int(value) if value.is_integer() else value
result[key] = value
return result
def _reformat_load_versions(ctx, param, value) -> Dict[str, str]:
"""Reformat data structure from tuple to dictionary for `load-version`.
E.g ('dataset1:time1', 'dataset2:time2') -> {"dataset1": "time1", "dataset2": "time2"}.
"""
load_version_separator = ":"
load_versions_dict = {}
for load_version in value:
load_version_list = load_version.split(load_version_separator, 1)
if len(load_version_list) != 2:
raise ValueError(
"Expected the form of `load_version` to be "
"`dataset_name:YYYY-MM-DDThh.mm.ss.sssZ`,"
"found {} instead".format(load_version)
)
load_versions_dict[load_version_list[0]] = load_version_list[1]
return load_versions_dict
def _config_file_callback(ctx, param, value):
"""Config file callback, that replaces command line options with config file
values. If command line options are passed, they override config file values.
"""
ctx.default_map = ctx.default_map or {}
section = ctx.info_name
if value:
config = anyconfig.load(value)[section]
ctx.default_map.update(config)
return value
@click.group(context_settings=CONTEXT_SETTINGS, name=__file__)
def cli():
"""Command line tools for manipulating a Kedro project."""
@cli.command()
@click.option(
"--from-inputs", type=str, default="", help=FROM_INPUTS_HELP, callback=_split_string
)
@click.option(
"--from-nodes", type=str, default="", help=FROM_NODES_HELP, callback=_split_string
)
@click.option(
"--to-nodes", type=str, default="", help=TO_NODES_HELP, callback=_split_string
)
@click.option("--node", "-n", "node_names", type=str, multiple=True, help=NODE_ARG_HELP)
@click.option(
"--runner", "-r", type=str, default=None, multiple=False, help=RUNNER_ARG_HELP
)
@click.option("--parallel", "-p", is_flag=True, multiple=False, help=PARALLEL_ARG_HELP)
@click.option(
"--env",
"-e",
type=str,
default=None,
multiple=False,
envvar=KEDRO_ENV_VAR,
help=ENV_ARG_HELP,
)
@click.option("--tag", "-t", type=str, multiple=True, help=TAG_ARG_HELP)
@click.option(
"--load-version",
"-lv",
type=str,
multiple=True,
help=LOAD_VERSION_HELP,
callback=_reformat_load_versions,
)
@click.option("--pipeline", type=str, default=None, help=PIPELINE_ARG_HELP)
@click.option(
"--config",
"-c",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help=CONFIG_FILE_HELP,
callback=_config_file_callback,
)
@click.option(
"--params", type=str, default="", help=PARAMS_ARG_HELP, callback=_split_params
)
def run(
tag,
env,
parallel,
runner,
node_names,
to_nodes,
from_nodes,
from_inputs,
load_version,
pipeline,
config,
params,
):
"""Run the pipeline."""
if parallel and runner:
raise KedroCliError(
"Both --parallel and --runner options cannot be used together. "
"Please use either --parallel or --runner."
)
if parallel:
runner = "ParallelRunner"
runner_class = load_obj(runner, "kedro.runner") if runner else SequentialRunner
context = load_context(Path.cwd(), env=env, extra_params=params)
context.run(
tags=tag,
runner=runner_class(),
node_names=node_names,
from_nodes=from_nodes,
to_nodes=to_nodes,
from_inputs=from_inputs,
load_versions=load_version,
pipeline_name=pipeline,
)
@forward_command(cli, forward_help=True)
def test(args):
"""Run the test suite."""
try:
import pytest # pylint: disable=unused-import
except ImportError:
raise KedroCliError(NO_DEPENDENCY_MESSAGE.format("pytest"))
else:
python_call("pytest", args)
@cli.command()
@click.argument("files", type=click.Path(exists=True), nargs=-1)
def lint(files):
"""Run flake8, isort and (on Python >=3.6) black."""
# pylint: disable=unused-import
if not files:
files = ("src/tests", "src/{{ cookiecutter.python_package }}")
try:
import flake8
import isort
except ImportError as exc:
raise KedroCliError(NO_DEPENDENCY_MESSAGE.format(exc.name))
python_call("flake8", ("--max-line-length=88",) + files)
python_call("isort", ("-rc", "-tc", "-up", "-fgw=0", "-m=3", "-w=88") + files)
if sys.version_info[:2] >= (3, 6):
try:
import black
except ImportError:
raise KedroCliError(NO_DEPENDENCY_MESSAGE.format("black"))
python_call("black", files)
@cli.command()
def install():
"""Install project dependencies from both requirements.txt
and environment.yml (optional)."""
if (Path.cwd() / "src" / "environment.yml").is_file():
call(["conda", "install", "--file", "src/environment.yml", "--yes"])
pip_command = ["install", "-U", "-r", "src/requirements.txt"]
if os.name == "posix":
python_call("pip", pip_command)
else:
command = [sys.executable, "-m", "pip"] + pip_command
subprocess.Popen(command, creationflags=subprocess.CREATE_NEW_CONSOLE)
@forward_command(cli, forward_help=True)
def ipython(args):
"""Open IPython with project specific variables loaded."""
if "-h" not in args and "--help" not in args:
ipython_message()
call(["ipython"] + list(args))
@cli.command()
def package():
"""Package the project as a Python egg and wheel."""
call([sys.executable, "setup.py", "clean", "--all", "bdist_egg"], cwd="src")
call([sys.executable, "setup.py", "clean", "--all", "bdist_wheel"], cwd="src")
@cli.command("build-docs")
@click.option(
"--open",
"-o",
"open_docs",
is_flag=True,
multiple=False,
default=False,
help=OPEN_ARG_HELP,
)
def build_docs(open_docs):
"""Build the project documentation."""
python_call("pip", ["install", "src/[docs]"])
python_call("pip", ["install", "-r", "src/requirements.txt"])
python_call(
"ipykernel", ["install", "--user", "--name={{ cookiecutter.python_package }}"]
)
shutil.rmtree("docs/build", ignore_errors=True)
call(
[
"sphinx-apidoc",
"--module-first",
"-o",
"docs/source",
"src/{{ cookiecutter.python_package }}",
]
)
call(["sphinx-build", "-M", "html", "docs/source", "docs/build", "-a"])
if open_docs:
docs_page = (Path.cwd() / "docs" / "build" / "html" / "index.html").as_uri()
secho("Opening {}".format(docs_page))
webbrowser.open(docs_page)
@cli.command("build-reqs")
def build_reqs():
"""Build the project dependency requirements."""
requirements_path = Path.cwd() / "src" / "requirements.in"
if not requirements_path.is_file():
secho("No requirements.in found. Copying contents from requirements.txt...")
contents = (Path.cwd() / "src" / "requirements.txt").read_text()
requirements_path.write_text(contents)
python_call("piptools", ["compile", str(requirements_path)])
secho(
(
"Requirements built! Please update requirements.in "
"if you'd like to make a change in your project's dependencies, "
"and re-run build-reqs to generate the new requirements.txt."
)
)
@cli.command("activate-nbstripout")
def activate_nbstripout():
"""Install the nbstripout git hook to automatically clean notebooks."""
secho(
(
"Notebook output cells will be automatically cleared before committing"
" to git."
),
fg="yellow",
)
try:
import nbstripout # pylint: disable=unused-import
except ImportError:
raise KedroCliError(NO_DEPENDENCY_MESSAGE.format("nbstripout"))
try:
res = subprocess.run(
["git", "rev-parse", "--git-dir"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if res.returncode:
raise KedroCliError("Not a git repository. Run `git init` first.")
except FileNotFoundError:
raise KedroCliError("Git executable not found. Install Git first.")
call(["nbstripout", "--install"])
def _build_jupyter_command(
base: str, ip: str, all_kernels: bool, args: Iterable[str]
) -> List[str]:
cmd = [base, "--ip", ip]
if not all_kernels:
project_name = "{{ cookiecutter.project_name }}"
kernel_name = re.sub(r"[^\w]+", "", project_name).strip() or "Kedro"
cmd += [
"--NotebookApp.kernel_spec_manager_class=kedro.cli.jupyter.SingleKernelSpecManager",
"--KernelSpecManager.default_kernel_name='{}'".format(kernel_name),
]
return cmd + list(args)
def _build_jupyter_env(kedro_env: str) -> Dict[str, Any]:
"""Build the environment dictionary that gets injected into the subprocess running
Jupyter. Since the subprocess has access only to the environment variables passed
in, we need to copy the current environment and add ``KEDRO_ENV_VAR``.
"""
if not kedro_env:
return {}
jupyter_env = os.environ.copy()
jupyter_env[KEDRO_ENV_VAR] = kedro_env
return {"env": jupyter_env}
@cli.group()
def jupyter():
"""Open Jupyter Notebook / Lab with project specific variables loaded, or
convert notebooks into Kedro code.
"""
@forward_command(jupyter, "notebook", forward_help=True)
@click.option("--ip", type=str, default="127.0.0.1")
@click.option("--all-kernels", is_flag=True, default=False)
@click.option(
"--env",
"-e",
type=str,
default=None,
multiple=False,
envvar=KEDRO_ENV_VAR,
help=ENV_ARG_HELP,
)
def jupyter_notebook(ip, all_kernels, env, args):
"""Open Jupyter Notebook with project specific variables loaded."""
if "-h" not in args and "--help" not in args:
ipython_message(all_kernels)
arguments = _build_jupyter_command(
"notebook", ip=ip, all_kernels=all_kernels, args=args
)
python_call_kwargs = _build_jupyter_env(env)
python_call("jupyter", arguments, **python_call_kwargs)
@forward_command(jupyter, "lab", forward_help=True)
@click.option("--ip", type=str, default="127.0.0.1")
@click.option("--all-kernels", is_flag=True, default=False)
@click.option(
"--env",
"-e",
type=str,
default=None,
multiple=False,
envvar=KEDRO_ENV_VAR,
help=ENV_ARG_HELP,
)
def jupyter_lab(ip, all_kernels, env, args):
"""Open Jupyter Lab with project specific variables loaded."""
if "-h" not in args and "--help" not in args:
ipython_message(all_kernels)
arguments = _build_jupyter_command("lab", ip=ip, all_kernels=all_kernels, args=args)
python_call_kwargs = _build_jupyter_env(env)
python_call("jupyter", arguments, **python_call_kwargs)
@jupyter.command("convert")
@click.option("--all", "all_flag", is_flag=True, help=CONVERT_ALL_HELP)
@click.option("-y", "overwrite_flag", is_flag=True, help=OVERWRITE_HELP)
@click.argument(
"filepath",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
required=False,
nargs=-1,
)
def convert_notebook(all_flag, overwrite_flag, filepath):
"""Convert selected or all notebooks found in a Kedro project
to Kedro code, by exporting code from the appropriately-tagged cells:
Cells tagged as `node` will be copied over to a Python file matching
the name of the notebook, under `src/<package_name>/nodes`.
*Note*: Make sure your notebooks have unique names!
FILEPATH: Path(s) to exact notebook file(s) to be converted. Both
relative and absolute paths are accepted.
Should not be provided if --all flag is already present.
"""
context = load_context(Path.cwd())
if not filepath and not all_flag:
secho(
"Please specify a notebook filepath "
"or add '--all' to convert all notebooks."
)
sys.exit(1)
kedro_project_path = context.project_path
kedro_package_name = "{{cookiecutter.python_package}}"
if all_flag:
# pathlib glob does not ignore hidden directories,
# whereas Python glob does, which is more useful in
# ensuring checkpoints will not be included
pattern = kedro_project_path / "**" / "*.ipynb"
notebooks = sorted(Path(p) for p in iglob(str(pattern), recursive=True))
else:
notebooks = [Path(f) for f in filepath]
counter = Counter(n.stem for n in notebooks)
non_unique_names = [name for name, counts in counter.items() if counts > 1]
if non_unique_names:
raise KedroCliError(
"Found non-unique notebook names! "
"Please rename the following: {}".format(", ".join(non_unique_names))
)
for notebook in notebooks:
secho("Converting notebook '{}'...".format(str(notebook)))
output_path = (
kedro_project_path
/ "src"
/ kedro_package_name
/ "nodes"
/ "{}.py".format(notebook.stem)
)
if output_path.is_file():
overwrite = overwrite_flag or click.confirm(
"Output file {} already exists. Overwrite?".format(str(output_path)),
default=False,
)
if overwrite:
export_nodes(notebook, output_path)
else:
export_nodes(notebook, output_path)
secho("Done!")
def ipython_message(all_kernels=True):
"""Show a message saying how we have configured the IPython env."""
ipy_vars = ["startup_error", "context"]
secho("-" * 79, fg="cyan")
secho("Starting a Kedro session with the following variables in scope")
secho(", ".join(ipy_vars), fg="green")
secho(
"Use the line magic {} to refresh them".format(
style("%reload_kedro", fg="green")
)
)
secho("or to see the error message if they are undefined")
if not all_kernels:
secho("The choice of kernels is limited to the default one.", fg="yellow")
secho("(restart with --all-kernels to get access to others)", fg="yellow")
secho("-" * 79, fg="cyan")
if __name__ == "__main__":
os.chdir(str(PROJ_PATH))
kedro_main()
| 32.966216
| 96
| 0.660228
|
122d5cb2917c380c4e070985bb2c90648ddff57a
| 414
|
py
|
Python
|
hangie.py
|
Srinivassan-Ramamurthy/python_programs
|
53b390669c7e88532c67d80b758a9199d6fde8cf
|
[
"bzip2-1.0.6"
] | null | null | null |
hangie.py
|
Srinivassan-Ramamurthy/python_programs
|
53b390669c7e88532c67d80b758a9199d6fde8cf
|
[
"bzip2-1.0.6"
] | null | null | null |
hangie.py
|
Srinivassan-Ramamurthy/python_programs
|
53b390669c7e88532c67d80b758a9199d6fde8cf
|
[
"bzip2-1.0.6"
] | null | null | null |
hangman=[' _______',' |/ |',' | (_)',' | \|/',' | |',' | / \\',' |',' _|___']
a='aeroplane'
c=[]
for i in a:
c.append('_ ')
print(*c)
n=len(a)
def inp():
user=input("enter the word")
for i in range(len(a)):
if(a[i] == user):
c[i] = user
print(*c)
for i in range(8):
inp()
| 20.7
| 135
| 0.321256
|
29ec37c71cdee3430a2cc2065ec481d0b1ed6bf4
| 455
|
py
|
Python
|
tests/small-build/test_api.py
|
NaxoAI/conceptnet5
|
2ec9fbff06d63fc303f8f2b1b62fd1ceed8e4b74
|
[
"Apache-2.0"
] | null | null | null |
tests/small-build/test_api.py
|
NaxoAI/conceptnet5
|
2ec9fbff06d63fc303f8f2b1b62fd1ceed8e4b74
|
[
"Apache-2.0"
] | null | null | null |
tests/small-build/test_api.py
|
NaxoAI/conceptnet5
|
2ec9fbff06d63fc303f8f2b1b62fd1ceed8e4b74
|
[
"Apache-2.0"
] | 1
|
2021-08-05T13:34:32.000Z
|
2021-08-05T13:34:32.000Z
|
from conceptnet5 import api
from nose.tools import eq_
def test_related_query():
# Test that we can look up related terms
result = api.query_related('/c/en/test', limit=3)
eq_(len(result['related']), 3)
def test_related_query_malformed():
# Test that we fulfill a query for related terms to a nonsense URI, and
# there are simply no results
result = api.query_related('/c/en,test', limit=3)
eq_(len(result['related']), 0)
| 26.764706
| 75
| 0.696703
|
3ea08a45f63f2f005ce24582aa32c12b73c4a15e
| 115
|
py
|
Python
|
aws_config/configure/config_utils.py
|
rwalk/straw
|
1940d521538635d5eab394d0ed4c87caf366b0c7
|
[
"MIT"
] | 58
|
2016-06-03T18:16:32.000Z
|
2021-08-11T19:01:03.000Z
|
aws_config/configure/config_utils.py
|
rwalk333/straw
|
1940d521538635d5eab394d0ed4c87caf366b0c7
|
[
"MIT"
] | 1
|
2019-08-22T14:22:14.000Z
|
2019-08-22T14:22:14.000Z
|
aws_config/configure/config_utils.py
|
rwalk/straw
|
1940d521538635d5eab394d0ed4c87caf366b0c7
|
[
"MIT"
] | 18
|
2016-06-23T02:24:51.000Z
|
2022-01-20T11:17:53.000Z
|
#!/usr/bin/python3
def quiet_wrap(cmd):
return(" ".join(["nohup",cmd, "< /dev/null > std.out 2> std.err &"]))
| 23
| 73
| 0.582609
|
b22d31bee0f54f3f318466edea3b4df9e587a99b
| 299
|
py
|
Python
|
output/models/nist_data/atomic/unsigned_byte/schema_instance/nistschema_sv_iv_atomic_unsigned_byte_min_inclusive_1_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/atomic/unsigned_byte/schema_instance/nistschema_sv_iv_atomic_unsigned_byte_min_inclusive_1_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/atomic/unsigned_byte/schema_instance/nistschema_sv_iv_atomic_unsigned_byte_min_inclusive_1_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.nist_data.atomic.unsigned_byte.schema_instance.nistschema_sv_iv_atomic_unsigned_byte_min_inclusive_1_xsd.nistschema_sv_iv_atomic_unsigned_byte_min_inclusive_1 import NistschemaSvIvAtomicUnsignedByteMinInclusive1
__all__ = [
"NistschemaSvIvAtomicUnsignedByteMinInclusive1",
]
| 49.833333
| 230
| 0.909699
|
89a352d5cf8a637b912708e03cd14b07232e67d5
| 3,306
|
py
|
Python
|
mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
|
yypurpose/mmdetection
|
ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
|
yypurpose/mmdetection
|
ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
|
yypurpose/mmdetection
|
ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c
|
[
"Apache-2.0"
] | null | null | null |
from mmcv.cnn.bricks import build_plugin_layer
from mmcv.runner import force_fp32
from mmdet.models.builder import ROI_EXTRACTORS
from .base_roi_extractor import BaseRoIExtractor
@ROI_EXTRACTORS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Default: 'sum'.
pre_cfg (dict | None): Specify pre-processing modules. Default: None.
post_cfg (dict | None): Specify post-processing modules. Default: None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation='sum',
pre_cfg=None,
post_cfg=None,
**kwargs):
super(GenericRoIExtractor, self).__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats += roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
| 39.357143
| 80
| 0.613128
|
45372e214f923de5d79ced5fa1ccf9adf7237631
| 4,187
|
py
|
Python
|
iblrig/fake_task_settings.py
|
int-brain-lab/iblr
|
18569278fc2d8cd3266adb2a5f660a43f8f2582e
|
[
"MIT"
] | 13
|
2018-08-07T21:56:08.000Z
|
2021-12-06T17:53:37.000Z
|
iblrig/fake_task_settings.py
|
int-brain-lab/iblr
|
18569278fc2d8cd3266adb2a5f660a43f8f2582e
|
[
"MIT"
] | 360
|
2018-07-24T16:35:26.000Z
|
2022-03-23T15:28:56.000Z
|
iblrig/fake_task_settings.py
|
int-brain-lab/iblr
|
18569278fc2d8cd3266adb2a5f660a43f8f2582e
|
[
"MIT"
] | 15
|
2019-03-12T16:25:05.000Z
|
2021-09-06T10:30:24.000Z
|
# =============================================================================
# TASK PARAMETER DEFINITION (should appear on GUI) init trial objects values
# =============================================================================
# IBL rig root folder
IBLRIG_FOLDER = "C:\\iblrig"
IBLRIG_DATA_FOLDER = None # If None data folder will be ..\\iblrig_data from IBLRIG_FOLDER # noqa
# SOUND, AMBIENT SENSOR, AND VIDEO RECORDINGS
RECORD_SOUND = True
RECORD_AMBIENT_SENSOR_DATA = True
# REWARDS
AUTOMATIC_CALIBRATION = True # Wether to look for a calibration session and func to define the valve opening time # noqa
CALIBRATION_VALUE = (
0.067 # calibration value for 3ul of target reward amount (ignored if automatic ON) # noqa
)
REWARD_AMOUNT = 3.0 # (µl) Amount of reward to be delivered upon correct choice each trial (overwitten if adaptive ON) # noqa
REWARD_TYPE = "Water 10% Sucrose" # Water, Water 10% Sucrose, Water 15% Sucrose, Water 2% Citric Acid (Guo et al.. PLoS One 2014) # noqa
# TASK
NTRIALS = 2000 # Number of trials for the current session
USE_AUTOMATIC_STOPPING_CRITERIONS = (
True # Weather to check for the Automatic stopping criterions or not # noqa
)
USE_VISUAL_STIMULUS = True # Run the visual stim in bonsai
BONSAI_EDITOR = False # Whether to open the visual stim Bonsai editor or not
REPEAT_ON_ERROR = True
# STATE TIMERS
QUIESCENCE_THRESHOLDS = [-2, 2] # degree
QUIESCENT_PERIOD = 0.2 # + x, where x~exp(0.35), t ∈ 0.2 <= R <= 0.5
INTERACTIVE_DELAY = 0.0 # (s) how long after stim onset the CL starts
RESPONSE_WINDOW = 60 # Time to move the wheel after go tone (seconds)
ITI_CORRECT = 1 # how long the stim should stay visible after CORRECT choice
ITI_ERROR = 2 # how long the stim should stay visible after ERROR choice
# VISUAL STIM
STIM_POSITIONS = [-35, 35] # All possible positions for this session (deg)
STIM_PROBABILITY_LEFT = 0.5
STIM_FREQ = 0.10 # Probably constant - NOT IN USE
STIM_ANGLE = 0.0 # Vertical orientation of Gabor patch - NOT IN USE
STIM_SIGMA = 7.0 # (azimuth_degree) Size of Gabor patch
STIM_GAIN = 8.0 # (azimuth_degree/mm) Gain of the RE to stimulus movement (used if ADAPTIVE_GAIN = FALSE) # noqa
SYNC_SQUARE_X = 1.33
SYNC_SQUARE_Y = -1.03
# SOUNDS
SOFT_SOUND = "xonar" # Use software sound 'xonar', 'sysdefault' or False for BpodSoundCard # noqa
SOUND_BOARD_BPOD_PORT = "Serial3" # (on Bpod) - Ignored if using SOFT_SOUND
WHITE_NOISE_DURATION = 0.5 # Length of noise burst
WHITE_NOISE_AMPLITUDE = 0.05
GO_TONE_DURATION = 0.1 # Length of tone
GO_TONE_FREQUENCY = 5000 # 5KHz
GO_TONE_AMPLITUDE = 0.0272 # [0->1] 0.0272 for 70dB SPL Xonar
# ADAPTIVE PARAMETERS
# ADAPTIVE REWARD PARAMETERS (IGNORED IF ADAPTIVE_REWARD = False)
ADAPTIVE_REWARD = True # wether to increase reware at session start usin AR_* criteria # noqa
AR_INIT_VALUE = 3 # µl
AR_MAX_VALUE = 3
AR_MIN_VALUE = 1.5 # (µl) 1.5 µl for sugar water, 2 µl for normal water
AR_STEP = 0.1 # µl
AR_CRIT = 200 # number of trials performed
# CONTRASTS
CONTRAST_SET = [
1.0,
0.5,
0.25,
0.125,
0.0625,
0.0,
] # Full contrast set, used if adaptive contrast = False # noqa
REPEAT_CONTRASTS = [1.0, 0.5] # Contrasts to be repeated if error
# ADAPTIVE_CONTRAST PARAMETERS (IGNORED IF ADAPTIVE_CONTRAST = False)
ADAPTIVE_CONTRAST = True # Wether to use Adaptive contrast rule or not
AC_INIT_CONTRASTS = [1.0, 0.5]
AC_BUFFER_SIZE = 50 # Buffer size to compute preformance for each contrast
AC_PERF_CRIT = 0.7 # Criterion for adding next contrast L AND R have to pass
AC_NTRIALS_TO_SIX = 200 # Number of trials after 0.125 to introduce the 6% contrast # noqa
AC_NTRIALS_TO_ZERO = 400 # Number of trials after 0.125 to introduce the 0% contrast # noqa
AC_NTRIALS_TO_REMOVE_50 = 600 # Number of trials after 0.125 to remove the 50% contrst # noqa
# ADAPTIVE_GAIN PARAMETERS (IGNORED IF ADAPTIVE_GAIN = False)
ADAPTIVE_GAIN = True
AG_INIT_VALUE = 8.0 # Adaptive Gain init value (azimuth_degree/mm)
AG_MIN_VALUE = 4.0 # (azimuth_degree/mm)
# POSITION BIAS FOR REPEATED TRIALS
RESPONSE_BUFFER_LENGTH = 10
# POOP COUNT LOGGING
POOP_COUNT = True # Wether to ask for a poop count at the end of the session
| 49.845238
| 138
| 0.713637
|
170433965815e07d5153c51b0f81b61abb305be9
| 258
|
py
|
Python
|
sphinxcontrib/test_reports/directives/__init__.py
|
lassebn/sphinx-test-reports
|
1655c81a3b9e79cd46244810cd7f3ef8fb84c1aa
|
[
"MIT"
] | null | null | null |
sphinxcontrib/test_reports/directives/__init__.py
|
lassebn/sphinx-test-reports
|
1655c81a3b9e79cd46244810cd7f3ef8fb84c1aa
|
[
"MIT"
] | null | null | null |
sphinxcontrib/test_reports/directives/__init__.py
|
lassebn/sphinx-test-reports
|
1655c81a3b9e79cd46244810cd7f3ef8fb84c1aa
|
[
"MIT"
] | 1
|
2021-08-18T10:15:17.000Z
|
2021-08-18T10:15:17.000Z
|
from .test_common import TestCommonDirective
from .test_file import TestFileDirective, TestFile
from .test_suite import TestSuiteDirective, TestSuite
from .test_case import TestCaseDirective, TestCase
from .test_report import TestReportDirective, TestReport
| 43
| 56
| 0.872093
|
1979af5fb3706350953c3cf7542c026719d5d908
| 18,865
|
py
|
Python
|
official/nlp/bert/run_classifier.py
|
akshit-protonn/models
|
38c8c6fe4144c93d6aadd19981c2b90570c29eba
|
[
"Apache-2.0"
] | 82,518
|
2016-02-05T12:07:23.000Z
|
2022-03-31T23:09:47.000Z
|
official/nlp/bert/run_classifier.py
|
akshit-protonn/models
|
38c8c6fe4144c93d6aadd19981c2b90570c29eba
|
[
"Apache-2.0"
] | 9,021
|
2016-03-08T01:02:05.000Z
|
2022-03-31T08:06:35.000Z
|
official/nlp/bert/run_classifier.py
|
akshit-protonn/models
|
38c8c6fe4144c93d6aadd19981c2b90570c29eba
|
[
"Apache-2.0"
] | 54,341
|
2016-02-06T17:19:55.000Z
|
2022-03-31T10:27:44.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT classification or regression finetuning runner in TF 2.x."""
import functools
import json
import math
import os
# Import libraries
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf
from official.common import distribute_utils
from official.modeling import performance
from official.nlp import optimization
from official.nlp.bert import bert_models
from official.nlp.bert import common_flags
from official.nlp.bert import configs as bert_configs
from official.nlp.bert import input_pipeline
from official.nlp.bert import model_saving_utils
from official.utils.misc import keras_utils
flags.DEFINE_enum(
'mode', 'train_and_eval', ['train_and_eval', 'export_only', 'predict'],
'One of {"train_and_eval", "export_only", "predict"}. `train_and_eval`: '
'trains the model and evaluates in the meantime. '
'`export_only`: will take the latest checkpoint inside '
'model_dir and export a `SavedModel`. `predict`: takes a checkpoint and '
'restores the model to output predictions on the test set.')
flags.DEFINE_string('train_data_path', None,
'Path to training data for BERT classifier.')
flags.DEFINE_string('eval_data_path', None,
'Path to evaluation data for BERT classifier.')
flags.DEFINE_string(
'input_meta_data_path', None,
'Path to file that contains meta data about input '
'to be used for training and evaluation.')
flags.DEFINE_integer('train_data_size', None, 'Number of training samples '
'to use. If None, uses the full train data. '
'(default: None).')
flags.DEFINE_string('predict_checkpoint_path', None,
'Path to the checkpoint for predictions.')
flags.DEFINE_integer(
'num_eval_per_epoch', 1,
'Number of evaluations per epoch. The purpose of this flag is to provide '
'more granular evaluation scores and checkpoints. For example, if original '
'data has N samples and num_eval_per_epoch is n, then each epoch will be '
'evaluated every N/n samples.')
flags.DEFINE_integer('train_batch_size', 32, 'Batch size for training.')
flags.DEFINE_integer('eval_batch_size', 32, 'Batch size for evaluation.')
common_flags.define_common_bert_flags()
FLAGS = flags.FLAGS
LABEL_TYPES_MAP = {'int': tf.int64, 'float': tf.float32}
def get_loss_fn(num_classes):
"""Gets the classification loss function."""
def classification_loss_fn(labels, logits):
"""Classification loss."""
labels = tf.squeeze(labels)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(
tf.cast(labels, dtype=tf.int32), depth=num_classes, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(
tf.cast(one_hot_labels, dtype=tf.float32) * log_probs, axis=-1)
return tf.reduce_mean(per_example_loss)
return classification_loss_fn
def get_dataset_fn(input_file_pattern,
max_seq_length,
global_batch_size,
is_training,
label_type=tf.int64,
include_sample_weights=False,
num_samples=None):
"""Gets a closure to create a dataset."""
def _dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
batch_size = ctx.get_per_replica_batch_size(
global_batch_size) if ctx else global_batch_size
dataset = input_pipeline.create_classifier_dataset(
tf.io.gfile.glob(input_file_pattern),
max_seq_length,
batch_size,
is_training=is_training,
input_pipeline_context=ctx,
label_type=label_type,
include_sample_weights=include_sample_weights,
num_samples=num_samples)
return dataset
return _dataset_fn
def run_bert_classifier(strategy,
bert_config,
input_meta_data,
model_dir,
epochs,
steps_per_epoch,
steps_per_loop,
eval_steps,
warmup_steps,
initial_lr,
init_checkpoint,
train_input_fn,
eval_input_fn,
training_callbacks=True,
custom_callbacks=None,
custom_metrics=None):
"""Run BERT classifier training using low-level API."""
max_seq_length = input_meta_data['max_seq_length']
num_classes = input_meta_data.get('num_labels', 1)
is_regression = num_classes == 1
def _get_classifier_model():
"""Gets a classifier model."""
classifier_model, core_model = (
bert_models.classifier_model(
bert_config,
num_classes,
max_seq_length,
hub_module_url=FLAGS.hub_module_url,
hub_module_trainable=FLAGS.hub_module_trainable))
optimizer = optimization.create_optimizer(initial_lr,
steps_per_epoch * epochs,
warmup_steps, FLAGS.end_lr,
FLAGS.optimizer_type)
classifier_model.optimizer = performance.configure_optimizer(
optimizer,
use_float16=common_flags.use_float16(),
use_graph_rewrite=common_flags.use_graph_rewrite())
return classifier_model, core_model
# tf.keras.losses objects accept optional sample_weight arguments (eg. coming
# from the dataset) to compute weighted loss, as used for the regression
# tasks. The classification tasks, using the custom get_loss_fn don't accept
# sample weights though.
loss_fn = (tf.keras.losses.MeanSquaredError() if is_regression
else get_loss_fn(num_classes))
# Defines evaluation metrics function, which will create metrics in the
# correct device and strategy scope.
if custom_metrics:
metric_fn = custom_metrics
elif is_regression:
metric_fn = functools.partial(
tf.keras.metrics.MeanSquaredError,
'mean_squared_error',
dtype=tf.float32)
else:
metric_fn = functools.partial(
tf.keras.metrics.SparseCategoricalAccuracy,
'accuracy',
dtype=tf.float32)
# Start training using Keras compile/fit API.
logging.info('Training using TF 2.x Keras compile/fit API with '
'distribution strategy.')
return run_keras_compile_fit(
model_dir,
strategy,
_get_classifier_model,
train_input_fn,
eval_input_fn,
loss_fn,
metric_fn,
init_checkpoint,
epochs,
steps_per_epoch,
steps_per_loop,
eval_steps,
training_callbacks=training_callbacks,
custom_callbacks=custom_callbacks)
def run_keras_compile_fit(model_dir,
strategy,
model_fn,
train_input_fn,
eval_input_fn,
loss_fn,
metric_fn,
init_checkpoint,
epochs,
steps_per_epoch,
steps_per_loop,
eval_steps,
training_callbacks=True,
custom_callbacks=None):
"""Runs BERT classifier model using Keras compile/fit API."""
with strategy.scope():
training_dataset = train_input_fn()
evaluation_dataset = eval_input_fn() if eval_input_fn else None
bert_model, sub_model = model_fn()
optimizer = bert_model.optimizer
if init_checkpoint:
checkpoint = tf.train.Checkpoint(model=sub_model, encoder=sub_model)
checkpoint.read(init_checkpoint).assert_existing_objects_matched()
if not isinstance(metric_fn, (list, tuple)):
metric_fn = [metric_fn]
bert_model.compile(
optimizer=optimizer,
loss=loss_fn,
metrics=[fn() for fn in metric_fn],
steps_per_execution=steps_per_loop)
summary_dir = os.path.join(model_dir, 'summaries')
summary_callback = tf.keras.callbacks.TensorBoard(summary_dir)
checkpoint = tf.train.Checkpoint(model=bert_model, optimizer=optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=model_dir,
max_to_keep=None,
step_counter=optimizer.iterations,
checkpoint_interval=0)
checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager)
if training_callbacks:
if custom_callbacks is not None:
custom_callbacks += [summary_callback, checkpoint_callback]
else:
custom_callbacks = [summary_callback, checkpoint_callback]
history = bert_model.fit(
x=training_dataset,
validation_data=evaluation_dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_steps=eval_steps,
callbacks=custom_callbacks)
stats = {'total_training_steps': steps_per_epoch * epochs}
if 'loss' in history.history:
stats['train_loss'] = history.history['loss'][-1]
if 'val_accuracy' in history.history:
stats['eval_metrics'] = history.history['val_accuracy'][-1]
return bert_model, stats
def get_predictions_and_labels(strategy,
trained_model,
eval_input_fn,
is_regression=False,
return_probs=False):
"""Obtains predictions of trained model on evaluation data.
Note that list of labels is returned along with the predictions because the
order changes on distributing dataset over TPU pods.
Args:
strategy: Distribution strategy.
trained_model: Trained model with preloaded weights.
eval_input_fn: Input function for evaluation data.
is_regression: Whether it is a regression task.
return_probs: Whether to return probabilities of classes.
Returns:
predictions: List of predictions.
labels: List of gold labels corresponding to predictions.
"""
@tf.function
def test_step(iterator):
"""Computes predictions on distributed devices."""
def _test_step_fn(inputs):
"""Replicated predictions."""
inputs, labels = inputs
logits = trained_model(inputs, training=False)
if not is_regression:
probabilities = tf.nn.softmax(logits)
return probabilities, labels
else:
return logits, labels
outputs, labels = strategy.run(_test_step_fn, args=(next(iterator),))
# outputs: current batch logits as a tuple of shard logits
outputs = tf.nest.map_structure(strategy.experimental_local_results,
outputs)
labels = tf.nest.map_structure(strategy.experimental_local_results, labels)
return outputs, labels
def _run_evaluation(test_iterator):
"""Runs evaluation steps."""
preds, golds = list(), list()
try:
with tf.experimental.async_scope():
while True:
probabilities, labels = test_step(test_iterator)
for cur_probs, cur_labels in zip(probabilities, labels):
if return_probs:
preds.extend(cur_probs.numpy().tolist())
else:
preds.extend(tf.math.argmax(cur_probs, axis=1).numpy())
golds.extend(cur_labels.numpy().tolist())
except (StopIteration, tf.errors.OutOfRangeError):
tf.experimental.async_clear_error()
return preds, golds
test_iter = iter(strategy.distribute_datasets_from_function(eval_input_fn))
predictions, labels = _run_evaluation(test_iter)
return predictions, labels
def export_classifier(model_export_path, input_meta_data, bert_config,
model_dir):
"""Exports a trained model as a `SavedModel` for inference.
Args:
model_export_path: a string specifying the path to the SavedModel directory.
input_meta_data: dictionary containing meta data about input and model.
bert_config: Bert configuration file to define core bert layers.
model_dir: The directory where the model weights and training/evaluation
summaries are stored.
Raises:
Export path is not specified, got an empty string or None.
"""
if not model_export_path:
raise ValueError('Export path is not specified: %s' % model_export_path)
if not model_dir:
raise ValueError('Export path is not specified: %s' % model_dir)
# Export uses float32 for now, even if training uses mixed precision.
tf.keras.mixed_precision.set_global_policy('float32')
classifier_model = bert_models.classifier_model(
bert_config,
input_meta_data.get('num_labels', 1),
hub_module_url=FLAGS.hub_module_url,
hub_module_trainable=False)[0]
model_saving_utils.export_bert_model(
model_export_path, model=classifier_model, checkpoint_dir=model_dir)
def run_bert(strategy,
input_meta_data,
model_config,
train_input_fn=None,
eval_input_fn=None,
init_checkpoint=None,
custom_callbacks=None,
custom_metrics=None):
"""Run BERT training."""
# Enables XLA in Session Config. Should not be set for TPU.
keras_utils.set_session_config(FLAGS.enable_xla)
performance.set_mixed_precision_policy(common_flags.dtype())
epochs = FLAGS.num_train_epochs * FLAGS.num_eval_per_epoch
train_data_size = (
input_meta_data['train_data_size'] // FLAGS.num_eval_per_epoch)
if FLAGS.train_data_size:
train_data_size = min(train_data_size, FLAGS.train_data_size)
logging.info('Updated train_data_size: %s', train_data_size)
steps_per_epoch = int(train_data_size / FLAGS.train_batch_size)
warmup_steps = int(epochs * train_data_size * 0.1 / FLAGS.train_batch_size)
eval_steps = int(
math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size))
if not strategy:
raise ValueError('Distribution strategy has not been specified.')
if not custom_callbacks:
custom_callbacks = []
if FLAGS.log_steps:
custom_callbacks.append(
keras_utils.TimeHistory(
batch_size=FLAGS.train_batch_size,
log_steps=FLAGS.log_steps,
logdir=FLAGS.model_dir))
trained_model, _ = run_bert_classifier(
strategy,
model_config,
input_meta_data,
FLAGS.model_dir,
epochs,
steps_per_epoch,
FLAGS.steps_per_loop,
eval_steps,
warmup_steps,
FLAGS.learning_rate,
init_checkpoint or FLAGS.init_checkpoint,
train_input_fn,
eval_input_fn,
custom_callbacks=custom_callbacks,
custom_metrics=custom_metrics)
if FLAGS.model_export_path:
model_saving_utils.export_bert_model(
FLAGS.model_export_path, model=trained_model)
return trained_model
def custom_main(custom_callbacks=None, custom_metrics=None):
"""Run classification or regression.
Args:
custom_callbacks: list of tf.keras.Callbacks passed to training loop.
custom_metrics: list of metrics passed to the training loop.
"""
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
label_type = LABEL_TYPES_MAP[input_meta_data.get('label_type', 'int')]
include_sample_weights = input_meta_data.get('has_sample_weights', False)
if not FLAGS.model_dir:
FLAGS.model_dir = '/tmp/bert20/'
bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.mode == 'export_only':
export_classifier(FLAGS.model_export_path, input_meta_data, bert_config,
FLAGS.model_dir)
return
strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
eval_input_fn = get_dataset_fn(
FLAGS.eval_data_path,
input_meta_data['max_seq_length'],
FLAGS.eval_batch_size,
is_training=False,
label_type=label_type,
include_sample_weights=include_sample_weights)
if FLAGS.mode == 'predict':
num_labels = input_meta_data.get('num_labels', 1)
with strategy.scope():
classifier_model = bert_models.classifier_model(
bert_config, num_labels)[0]
checkpoint = tf.train.Checkpoint(model=classifier_model)
latest_checkpoint_file = (
FLAGS.predict_checkpoint_path or
tf.train.latest_checkpoint(FLAGS.model_dir))
assert latest_checkpoint_file
logging.info('Checkpoint file %s found and restoring from '
'checkpoint', latest_checkpoint_file)
checkpoint.restore(
latest_checkpoint_file).assert_existing_objects_matched()
preds, _ = get_predictions_and_labels(
strategy,
classifier_model,
eval_input_fn,
is_regression=(num_labels == 1),
return_probs=True)
output_predict_file = os.path.join(FLAGS.model_dir, 'test_results.tsv')
with tf.io.gfile.GFile(output_predict_file, 'w') as writer:
logging.info('***** Predict results *****')
for probabilities in preds:
output_line = '\t'.join(
str(class_probability)
for class_probability in probabilities) + '\n'
writer.write(output_line)
return
if FLAGS.mode != 'train_and_eval':
raise ValueError('Unsupported mode is specified: %s' % FLAGS.mode)
train_input_fn = get_dataset_fn(
FLAGS.train_data_path,
input_meta_data['max_seq_length'],
FLAGS.train_batch_size,
is_training=True,
label_type=label_type,
include_sample_weights=include_sample_weights,
num_samples=FLAGS.train_data_size)
run_bert(
strategy,
input_meta_data,
bert_config,
train_input_fn,
eval_input_fn,
custom_callbacks=custom_callbacks,
custom_metrics=custom_metrics)
def main(_):
custom_main(custom_callbacks=None, custom_metrics=None)
if __name__ == '__main__':
flags.mark_flag_as_required('bert_config_file')
flags.mark_flag_as_required('input_meta_data_path')
flags.mark_flag_as_required('model_dir')
app.run(main)
| 36.489362
| 80
| 0.680732
|
53be355cbf73c27ff19e6c062e1158bac91f1576
| 1,657
|
py
|
Python
|
src/plume/utils/extended_path.py
|
malarinv/plume-asr
|
79aa5e85788070aad688d41fbbf9f8b1f8aa8fb5
|
[
"MIT"
] | null | null | null |
src/plume/utils/extended_path.py
|
malarinv/plume-asr
|
79aa5e85788070aad688d41fbbf9f8b1f8aa8fb5
|
[
"MIT"
] | null | null | null |
src/plume/utils/extended_path.py
|
malarinv/plume-asr
|
79aa5e85788070aad688d41fbbf9f8b1f8aa8fb5
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import json
from .lazy_import import lazy_module
yaml = lazy_module("ruamel.yaml")
pydub = lazy_module("pydub")
class ExtendedPath(type(Path())):
"""docstring for ExtendedPath."""
def read_json(self, verbose=False):
if verbose:
print(f"reading json from {self}")
with self.open("r") as jf:
return json.load(jf)
def read_yaml(self, verbose=False):
yaml_o = yaml.YAML(typ="safe", pure=True)
if verbose:
print(f"reading yaml from {self}")
with self.open("r") as yf:
return yaml_o.load(yf)
def read_jsonl(self, verbose=False):
if verbose:
print(f"reading jsonl from {self}")
with self.open("r") as jf:
for ln in jf.readlines():
yield json.loads(ln)
def read_audio_segment(self):
return pydub.AudioSegment.from_file(self)
def write_json(self, data, verbose=False):
if verbose:
print(f"writing json to {self}")
self.parent.mkdir(parents=True, exist_ok=True)
with self.open("w") as jf:
json.dump(data, jf, indent=2)
def write_yaml(self, data, verbose=False):
yaml_o = yaml.YAML()
if verbose:
print(f"writing yaml to {self}")
with self.open("w") as yf:
yaml_o.dump(data, yf)
def write_jsonl(self, data, verbose=False):
if verbose:
print(f"writing jsonl to {self}")
self.parent.mkdir(parents=True, exist_ok=True)
with self.open("w") as jf:
for d in data:
jf.write(json.dumps(d) + "\n")
| 29.070175
| 54
| 0.578153
|
bcfaf0265ce5962a6aa97718ae60a18dfc49299c
| 2,408
|
py
|
Python
|
controllers/stats.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 205
|
2015-01-20T08:26:09.000Z
|
2022-03-27T19:59:33.000Z
|
controllers/stats.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 249
|
2015-02-10T09:56:35.000Z
|
2022-03-23T19:54:36.000Z
|
controllers/stats.py
|
himansu1997/eden
|
1e2cf2b00f55da46b1ce3e6b7ad44b5345d7a1dc
|
[
"MIT"
] | 231
|
2015-02-10T09:33:17.000Z
|
2022-02-18T19:56:05.000Z
|
# -*- coding: utf-8 -*-
"""
Sahana Eden Stats Controller
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
return s3db.cms_index(c, alt_function="index_alt")
# -----------------------------------------------------------------------------
def index_alt():
"""
Module homepage for non-Admin users when no CMS content found
"""
# Just redirect to the Demographic Data
s3_redirect_default(URL(f = "demographic_data",
args = "summary",
))
# -----------------------------------------------------------------------------
def parameter():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def data():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def source():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def demographic():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def demographic_data():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def demographic_aggregate():
""" REST Controller """
def clear_aggregates(r, **attr):
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
s3db.stats_demographic_rebuild_all_aggregates()
redirect(URL(c = "stats",
f = "demographic_aggregate",
args = "",
))
s3db.set_method("stats", "demographic_aggregate",
method = "clear",
action = clear_aggregates,
)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def impact_type():
""" REST Controller for impact types """
return s3_rest_controller()
# END =========================================================================
| 28.329412
| 79
| 0.382475
|
d539dcc03b28d213ba1dfd6b4732fe577d9e0dfb
| 69,375
|
py
|
Python
|
tensorlayerx/backend/ops/paddle_nn.py
|
tensorlayer/TensorLayerX
|
4e3e6f13687309dda7787f0b86e35a62bb3adbad
|
[
"Apache-2.0"
] | 34
|
2021-12-03T08:19:23.000Z
|
2022-03-13T08:34:34.000Z
|
tensorlayerx/backend/ops/paddle_nn.py
|
tensorlayer/TensorLayerX
|
4e3e6f13687309dda7787f0b86e35a62bb3adbad
|
[
"Apache-2.0"
] | null | null | null |
tensorlayerx/backend/ops/paddle_nn.py
|
tensorlayer/TensorLayerX
|
4e3e6f13687309dda7787f0b86e35a62bb3adbad
|
[
"Apache-2.0"
] | 3
|
2021-12-28T16:57:20.000Z
|
2022-03-18T02:23:14.000Z
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import paddle as pd
from paddle import framework
import paddle.nn.functional as F
import numpy as np
import paddle.fluid as fluid
from paddle.nn import initializer as I
from paddle.fluid.layers.utils import map_structure, flatten, pack_sequence_as
from paddle.fluid.data_feeder import convert_dtype
from paddle.fluid.dygraph import Layer, LayerList
from paddle.nn.layer.rnn import RNNCellBase
import warnings
import math
def padding_format(padding):
"""
Checks that the padding format correspond format.
Parameters
----------
padding : str
Must be one of the following:"same", "SAME", "VALID", "valid"
Returns
-------
str "SAME" or "VALID"
"""
if padding in ["SAME", "same"]:
padding = "SAME"
elif padding in ["VALID", "valid"]:
padding = "VALID"
elif padding == None:
padding = None
elif isinstance(padding, tuple) or isinstance(padding, int):
return padding
else:
raise Exception("Unsupported padding: " + str(padding))
return padding
def preprocess_1d_format(data_format, padding):
"""
Checks that the 1-D dataformat format correspond format.
Parameters
----------
data_format : str
Must be one of the following:"channels_last","NWC","NCW","channels_first"
padding : str
Must be one of the following:"same","valid","SAME","VALID"
Returns
-------
str "NWC" or "NCW" and "SAME" or "VALID"
"""
if data_format in ["channels_last", "NWC", "NLC"]:
data_format = "NLC"
elif data_format in ["channels_first", "NCW", "NCL"]:
data_format = "NCL"
elif data_format == None:
data_format = None
else:
raise Exception("Unsupported data format: " + str(data_format))
padding = padding_format(padding)
return data_format, padding
def preprocess_2d_format(data_format, padding):
"""
Checks that the 2-D dataformat format correspond format.
Parameters
----------
data_format : str
Must be one of the following:"channels_last","NHWC","NCHW","channels_first"
padding : str
Must be one of the following:"same","valid","SAME","VALID"
Returns
-------
str "NHWC" or "NCHW" and "SAME" or "VALID"
"""
if data_format in ["channels_last", "NHWC", "nhwc"]:
data_format = "NHWC"
elif data_format in ["channels_first", "NCHW", "nchw"]:
data_format = "NCHW"
elif data_format == None:
data_format = None
else:
raise Exception("Unsupported data format: " + str(data_format))
padding = padding_format(padding)
return data_format, padding
def preprocess_3d_format(data_format, padding):
"""
Checks that the 3-D dataformat format correspond format.
Parameters
----------
data_format : str
Must be one of the following:"channels_last","NDHWC","NCDHW","channels_first"
padding : str
Must be one of the following:"same","valid","SAME","VALID"
Returns
-------
str "NDHWC" or "NCDHW" and "SAME" or "VALID"
"""
if data_format in ['channels_last', 'NDHWC']:
data_format = 'NDHWC'
elif data_format in ['channels_first', 'NCDHW']:
data_format = 'NCDHW'
elif data_format == None:
data_format = None
else:
raise Exception("Unsupported data format: " + str(data_format))
padding = padding_format(padding)
return data_format, padding
def nchw_to_nhwc(x):
"""
Channels first to channels last
Parameters
----------
x : tensor
channels first tensor data
Returns
-------
channels last tensor data
"""
if len(x.shape) == 3:
x = pd.transpose(x, (0, 2, 1))
elif len(x.shape) == 4:
x = pd.transpose(x, (0, 2, 3, 1))
elif len(x.shape) == 5:
x = pd.transpose(x, (0, 2, 3, 4, 1))
else:
raise Exception("Unsupported dimensions")
return x
def nhwc_to_nchw(x):
"""
Channles last to channels first
Parameters
----------
x : tensor
channels last tensor data
Returns
-------
channels first tensor data
"""
if len(x.shape) == 3:
x = pd.transpose(x, (0, 2, 1))
elif len(x.shape) == 4:
x = pd.transpose(x, (0, 3, 1, 2))
elif len(x.shape) == 5:
x = pd.transpose(x, (0, 4, 1, 2, 3))
else:
raise Exception("Unsupported dimensions")
return x
class ReLU(object):
def __init__(self):
pass
def __call__(self, x):
return F.relu(x)
def relu(x):
"""
Computes rectified linear: max(features, 0).
Parameters
----------
x : tensor
Must be one of the following types: float32, float64, int32, uint8, int16,
int8, int64, bfloat16, uint16, half, uint32, uint64, qint8.
Returns
-------
A Tensor. Has the same type as features.
"""
return F.relu(x)
class ELU(object):
def __init__(self, alpha=1.0):
self.alpha = alpha
def __call__(self, x):
return F.elu(x, alpha=self.alpha)
def elu(x, alpha=1.0):
"""
Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
](http://arxiv.org/abs/1511.07289)
Parameters
----------
x : tensor
Must be one of the following types: half, bfloat16, float32, float64.
Returns
-------
A Tensor with the same type as features.
"""
return F.elu(x, alpha=alpha)
class ReLU6(object):
def __init__(self):
pass
def __call__(self, x):
return F.relu6(x)
def relu6(x):
"""
Computes Rectified Linear 6: min(max(features, 0), 6).
Parameters
----------
x : tensor
Must be one of the following types: float32, float64, int32, uint8, int16,
int8, int64, bfloat16, uint16, half, uint32, uint64, qint8.
Returns
-------
A Tensor with the same type as features.
"""
return F.relu6(x)
class LeakyReLU(object):
def __init__(self, negative_slope=0.2):
self.negative_slope = negative_slope
def __call__(self, x):
return F.leaky_relu(x, negative_slope=self.negative_slope)
def leaky_relu(x, negative_slope=0.01):
"""
Compute the Leaky ReLU activation function.
Parameters
----------
x : tensor
representing preactivation values. Must be one of the following types:
float16, float32, float64, int32, int64.
Returns
-------
The activation value.
"""
return F.leaky_relu(x, negative_slope)
class Softplus(object):
def __init__(self):
pass
def __call__(self, x):
return F.softplus(x)
class Tanh(object):
def __init__(self):
pass
def __call__(self, x):
return F.tanh(x)
class Sigmoid(object):
def __init__(self):
pass
def __call__(self, x):
return F.sigmoid(x)
def sigmoid(x):
"""
Computes sigmoid of x element-wise.
Parameters
----------
x : tensor
A Tensor with type float16, float32, float64, complex64, or complex128.
Returns
-------
A Tensor with the same type as x.
"""
return F.sigmoid(x)
class Softmax(object):
def __init__(self, axis = -1):
self.axis = axis
def __call__(self, x):
return F.softmax(x, axis=self.axis)
def softmax(logits, axis=-1):
"""
Computes softmax activations.
Parameters
----------
logits : tensor
Must be one of the following types: half, float32, float64.
axis : int
The dimension softmax would be performed on. The default is -1 which indicates the last dimension.
Returns
-------
A Tensor. Has the same type and shape as logits.
"""
return F.softmax(logits, axis=axis)
class GeLU(object):
def __init__(self, approximate=False):
self.approximate = approximate
def __call__(self, x):
return F.gelu(x, approximate=self.approximate)
def gelu(x, approximate=False):
return F.gelu(x, approximate=approximate)
class Dropout(object):
def __init__(self, p, seed=1):
self.p = p
self.seed = seed
def __call__(self, inputs):
output = F.dropout(inputs, p=self.p, mode='upscale_in_train')
return output
class BiasAdd(object):
"""
Adds bias to value.
Parameters
----------
x : tensor
A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128.
bias : tensor
Must be the same type as value unless value is a quantized type,
in which case a different quantized type may be used.
Returns
-------
A Tensor with the same type as value.
"""
def __init__(self, data_format='channels_last'):
super(BiasAdd, self).__init__()
if data_format in ['channels_first', 'NCL', 'NCW', 'NCHW', 'NCDHW']:
self.data_format = 'channels_first'
elif data_format in ['channels_last', 'NLC', 'NWC', 'NHWC', 'NDHWC']:
self.data_format = 'channels_last'
else:
raise ("Unsupported data format: " + str(data_format))
def __call__(self, x, bias):
if len(x.shape) > 2 and self.data_format == 'channels_first':
x = nchw_to_nhwc(x)
outputs = pd.add(x, bias)
if len(x.shape) > 2 and self.data_format == 'channels_first':
outputs = nhwc_to_nchw(outputs)
return outputs
def bias_add(x, bias):
"""
Adds bias to value.
Parameters
----------
x : tensor
A Tensor with type float, double, int64, int32, uint8, int16, int8, complex64, or complex128.
bias : tensor
Must be the same type as value unless value is a quantized type,
in which case a different quantized type may be used.
data_format : A string.
'N...C' and 'NC...' are supported.
name : str
A name for the operation (optional).
Returns
-------
A Tensor with the same type as value.
"""
#TODO the bias_add only supports channels_last
outputs = pd.add(x, bias)
return outputs
class Conv1D(object):
def __init__(self, stride, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None):
super(Conv1D, self).__init__()
self.data_format, self.padding = preprocess_1d_format(padding=padding, data_format=data_format)
self.stride = stride
self.dilations = dilations
def __call__(self, input, filters):
output = F.conv1d(
x=input, weight=filters, stride=self.stride, dilation=self.dilations, data_format=self.data_format,
padding=self.padding
)
return output
def conv1d(input, filters, stride, padding, data_format='NWC', dilations=None, name=None):
"""
Computes a 1-D convolution given 3-D input and filter tensors.
Parameters
----------
input : tensor
A 3D Tensor. Must be of type float16, float32, or float64
filters : tensor
A 3D Tensor. Must have the same type as input.
stride : int of list
An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step.
padding : string
'SAME' or 'VALID'
data_format : string
An optional string from "NWC", "NCW". Defaults to "NWC", the data is stored in the order of
[batch, in_width, in_channels]. The "NCW" format stores data as [batch, in_channels, in_width].
dilations : int or list
An int or list of ints that has length 1 or 3 which defaults to 1.
The dilation factor for each dimension of input. If set to k > 1,
there will be k-1 skipped cells between each filter element on that dimension.
Dilations in the batch and depth dimensions must be 1.
name : string
A name for the operation (optional).
Returns
-------
A Tensor. Has the same type as input.
"""
outputs = F.conv1d(
x=input, weight=filters, stride=stride, padding=padding, data_format=data_format, dilation=dilations, name=name
)
return outputs
class Conv2D(object):
def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None):
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
if self.data_format is 'NHWC':
self._stride = (strides[1], strides[2])
self._dilation = (dilations[1], dilations[2])
elif self.data_format is 'NCHW':
self._stride = (strides[2], strides[3])
self._dilation = (dilations[2], dilations[3])
def __call__(self, inputs, filters):
outputs = F.conv2d(
x=inputs, weight=filters, stride=self._stride, dilation=self._dilation, padding=self.padding,
data_format=self.data_format
)
return outputs
def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None):
"""
Computes a 2-D convolution given 4-D input and filters tensors.
Parameters
----------
input : tensor
Must be one of the following types: half, bfloat16, float32, float64. A 4-D tensor.
The dimension order is interpreted according to the value of data_format, see below for details.
filters : tensor
Must have the same type as input. A 4-D tensor of shape [filter_height, filter_width, in_channels, out_channels]
strides : int of list
The stride of the sliding window for each dimension of input. If a single value is given it is replicated in the H and W dimension.
By default the N and C dimensions are set to 1. The dimension order is determined by the value of data_format, see below for details.
padding : string
"SAME" or "VALID"
data_format : string
"NHWC", "NCHW". Defaults to "NCHW".
dilations : list or ints
list of ints that has length 1, 2 or 4, defaults to 1. The dilation factor for each dimension ofinput.
Returns
-------
A Tensor. Has the same type as input.
"""
data_format, padding = preprocess_2d_format(data_format, padding)
if data_format is 'NHWC':
_stride = (strides[1], strides[2])
_dilation = (dilations[1], dilations[2])
elif data_format is 'NCHW':
_stride = (strides[2], strides[3])
_dilation = (dilations[2], dilations[3])
outputs = F.conv2d(
x=input, weight=filters, stride=_stride, dilation=_dilation, padding=padding, data_format=data_format
)
return outputs
class Conv3D(object):
def __init__(self, strides, padding, data_format='NDHWC', dilations=None, out_channel=None, k_size=None):
self.data_format, self.padding = preprocess_3d_format(data_format, padding)
if self.data_format is 'NDHWC':
self._strides = (strides[1], strides[2], strides[3])
self._dilations = (dilations[1], dilations[2], dilations[3])
elif self.data_format is 'NCDHW':
self._strides = (strides[2], strides[3], strides[4])
self._dilations = (dilations[2], dilations[3], dilations[4])
def __call__(self, input, filters):
outputs = F.conv3d(
x=input, weight=filters, stride=self._strides, dilation=self._dilations, data_format=self.data_format,
padding=self.padding
)
return outputs
def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None, name=None):
"""
Computes a 3-D convolution given 5-D input and filters tensors.
Parameters
----------
input : tensor
Must be one of the following types: half, bfloat16, float32, float64.
Shape [batch, in_depth, in_height, in_width, in_channels].
filters : tensor
Must have the same type as input. Shape [filter_depth, filter_height, filter_width, in_channels, out_channels].
in_channels must match between input and filters.
strides : tuple of ints
A list of ints that has length >= 5. 1-D tensor of length 5.
The stride of the sliding window for each dimension of input.
Must have strides[0] = strides[4] = 1.
padding : string
A string from: "SAME", "VALID". The type of padding algorithm to use.
data_format : string
An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data.
With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width].
dilations : touple of ints
Defaults to [1, 1, 1, 1, 1]. 1-D tensor of length 5. The dilation factor for each dimension of input.
If set to k > 1, there will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of data_format, see above for details.
Dilations in the batch and depth dimensions must be 1.
name : string
A name for the operation (optional).
Returns
-------
A Tensor. Has the same type as input.
"""
data_format, padding = preprocess_3d_format(data_format, padding)
if data_format is 'NDHWC':
_strides = (strides[1], strides[2], strides[3])
_dilations = (dilations[1], dilations[2], dilations[3])
elif data_format is 'NCDHW':
_strides = (strides[2], strides[3], strides[4])
_dilations = (dilations[2], dilations[3], dilations[4])
outputs = F.conv3d(
x=input, weight=filters, stride=_strides, dilation=_dilations, data_format=data_format, padding=padding,
name=name
)
return outputs
def lrn(inputs, depth_radius, bias, alpha, beta):
"""
Local Response Normalization.
Parameters
----------
inputs : tensor
Must be one of the following types: half, bfloat16, float32. 4-D.
depth_radius : int
Defaults to 5. 0-D. Half-width of the 1-D normalization window.
bias : float
Defaults to 1. An offset (usually positive to avoid dividing by 0).
alpha : float
Defaults to 1. A scale factor, usually positive.
beta : float
Defaults to 0.5. An exponent.
Returns
-------
A Tensor. Has the same type as input.
"""
pass
def moments(x, axes, shift=None, keepdims=False):
"""
Calculates the mean and variance of x.
Parameters
----------
x : tensor
A Tensor
axes : ints
Axes along which to compute mean and variance.
shift : int
Not used in the current implementation.
keepdims : bool
produce moments with the same dimensionality as the input.
Returns
-------
Two Tensor objects: mean and variance.
"""
pass
class MaxPool1d(object):
def __init__(self, ksize, strides, padding, data_format=None):
self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding)
self.ksize = ksize
self.strides = strides
def __call__(self, inputs):
if self.data_format == 'NLC':
inputs = nhwc_to_nchw(inputs)
outputs = F.max_pool1d(inputs, self.ksize, self.strides, self.padding)
if self.data_format == 'NLC':
outputs = nchw_to_nhwc(outputs)
return outputs
class MaxPool(object):
def __init__(self, ksize, strides, padding, data_format=None):
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
self.ksize = ksize
if self.data_format is 'NHWC':
self._stride = (strides[1], strides[2])
elif self.data_format is 'NCHW':
self._stride = (strides[2], strides[3])
def __call__(self, inputs):
outputs = F.max_pool2d(
x=inputs, kernel_size=self.ksize, stride=self._stride, padding=self.padding, data_format=self.data_format
)
return outputs
def max_pool(input, ksize, strides, padding, data_format=None):
"""
Performs the max pooling on the input.
Parameters
----------
input : tensor
Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels] if data_format does not start
with "NC" (default), or [batch_size, num_channels] + input_spatial_shape if data_format starts with "NC".
Pooling happens over the spatial dimensions only.
ksize : int or list of ints
An int or list of ints that has length 1, N or N+2.
The size of the window for each dimension of the input tensor.
strides : int or list of ints
An int or list of ints that has length 1, N or N+2.
The stride of the sliding window for each dimension of the input tensor.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
Returns
-------
A Tensor of format specified by data_format. The max pooled output tensor.
"""
pass
class AvgPool1d(object):
def __init__(self, ksize, strides, padding, data_format=None):
self.data_format, self.padding = preprocess_1d_format(data_format=data_format, padding=padding)
self.ksize = ksize
self.strides = strides
def __call__(self, inputs):
if self.data_format == 'NLC':
inputs = nhwc_to_nchw(inputs)
outputs = F.avg_pool1d(inputs, self.ksize, self.strides, self.padding)
if self.data_format == 'NLC':
outputs = nchw_to_nhwc(outputs)
return outputs
class AvgPool(object):
def __init__(self, ksize, strides, padding, data_format=None):
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
self.filter_size = ksize
if self.data_format is 'NHWC':
self._stride = (strides[1], strides[2])
elif self.data_format is 'NCHW':
self._stride = (strides[2], strides[3])
def __call__(self, inputs):
outputs = F.avg_pool2d(
inputs, kernel_size=self.filter_size, stride=self._stride, padding=self.padding,
data_format=self.data_format
)
return outputs
def avg_pool(input, ksize, strides, padding):
"""
Performs the avg pooling on the input.
Parameters
----------
input : tensor
Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels]
if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape
if data_format starts with "NC". Pooling happens over the spatial dimensions only.
ksize : int or list of ints
An int or list of ints that has length 1, N or N+2.
The size of the window for each dimension of the input tensor.
strides : int or list of ints
An int or list of ints that has length 1, N or N+2.
The stride of the sliding window for each dimension of the input tensor.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
Returns
-------
A Tensor of format specified by data_format. The average pooled output tensor.
"""
pass
class MaxPool3d(object):
def __init__(self, ksize, strides, padding, data_format=None):
self.data_format, self.padding = preprocess_3d_format(data_format, padding)
self.ksize = ksize
if self.data_format == 'NCDHW':
self.strides = (strides[2], strides[3], strides[4])
if self.data_format == 'NDHWC':
self.strides = (strides[1], strides[2], strides[3])
def __call__(self, inputs):
outputs = F.max_pool3d(
inputs, kernel_size=self.ksize, stride=self.strides, padding=self.padding, data_format=self.data_format
)
return outputs
def max_pool3d(input, ksize, strides, padding, data_format=None, name=None):
"""
Performs the max pooling on the input.
Parameters
----------
input : tensor
A 5-D Tensor of the format specified by data_format.
ksize : int or list of ints
An int or list of ints that has length 1, 3 or 5.
The size of the window for each dimension of the input tensor.
strides : int or list of ints
An int or list of ints that has length 1, 3 or 5.
The stride of the sliding window for each dimension of the input tensor.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
data_format : string
"NDHWC", "NCDHW". Defaults to "NDHWC". The data format of the input and output data.
With the default format "NDHWC", the data is stored in the order of: [batch, in_depth, in_height, in_width, in_channels].
Alternatively, the format could be "NCDHW", the data storage order is: [batch, in_channels, in_depth, in_height, in_width].
name : string
A name for the operation (optional).
Returns
-------
A Tensor of format specified by data_format. The max pooled output tensor.
"""
pass
class AvgPool3d(object):
def __init__(self, ksize, strides, padding, data_format=None):
self.data_format, self.padding = preprocess_3d_format(data_format, padding)
self.ksize = ksize
if self.data_format == 'NCDHW':
self.strides = (strides[2], strides[3], strides[4])
if self.data_format == 'NDHWC':
self.strides = (strides[1], strides[2], strides[3])
def __call__(self, inputs):
outputs = F.avg_pool3d(
inputs, kernel_size=self.ksize, stride=self.strides, padding=self.padding, data_format=self.data_format
)
return outputs
def avg_pool3d(input, ksize, strides, padding, data_format=None, name=None):
"""
Performs the average pooling on the input.
Parameters
----------
input : tensor
A 5-D Tensor of shape [batch, height, width, channels] and type float32, float64, qint8, quint8, or qint32.
ksize : int or list of ints
An int or list of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor.
strides : int or list of ints
An int or list of ints that has length 1, 3 or 5.
The stride of the sliding window for each dimension of the input tensor.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
data_format : string
'NDHWC' and 'NCDHW' are supported.
name : string
Optional name for the operation.
Returns
-------
A Tensor with the same type as value. The average pooled output tensor.
"""
pass
def pool(input, window_shape, pooling_type, strides=None, padding='VALID', data_format=None, dilations=None, name=None):
"""
Performs an N-D pooling operation.
Parameters
----------
input : tensor
Tensor of rank N+2, of shape [batch_size] + input_spatial_shape + [num_channels]
if data_format does not start with "NC" (default), or [batch_size, num_channels] + input_spatial_shape
if data_format starts with "NC". Pooling happens over the spatial dimensions only.
window_shape : int
Sequence of N ints >= 1.
pooling_type : string
Specifies pooling operation, must be "AVG" or "MAX".
strides : ints
Sequence of N ints >= 1. Defaults to [1]*N. If any value of strides is > 1, then all values of dilation_rate must be 1.
padding : string
The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of tf.ops.convolution for details.
data_format : string
Specifies whether the channel dimension of the input and output is the last dimension (default, or if data_format does not start with "NC"),
or the second dimension (if data_format starts with "NC").
For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations : list of ints
Dilation rate. List of N ints >= 1. Defaults to [1]*N. If any value of dilation_rate is > 1, then all values of strides must be 1.
name : string
Optional. Name of the op.
Returns
-------
Tensor of rank N+2, of shape [batch_size] + output_spatial_shape + [num_channels]
"""
pass
class DepthwiseConv2d(object):
def __init__(self, strides, padding, data_format=None, dilations=None, ksize=None, channel_multiplier=1):
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
if self.data_format == 'NHWC':
self._stride = (strides[1], strides[2])
if self.data_format == 'NCHW':
self._stride = (strides[2], strides[3])
self.dilations = dilations
self.channel_multiplier = channel_multiplier
def __call__(self, input, filter, point_filter=None):
if self.data_format == 'NHWC':
channel = input.shape[-1]
elif self.data_format == 'NCHW':
channel = input.shape[1]
depthwise_conv = F.conv2d(
input, filter, data_format=self.data_format, groups=channel, dilation=self.dilations, stride=self._stride,
padding=self.padding
)
pointwise_conv = F.conv2d(depthwise_conv, point_filter, data_format=self.data_format, padding=self.padding)
return pointwise_conv
def depthwise_conv2d(input, filter, strides, padding, data_format=None, dilations=None, name=None):
"""
Depthwise 2-D convolution.
Parameters
----------
input : tensor
4-D with shape according to data_format.
filter : tensor
4-D with shape [filter_height, filter_width, in_channels, channel_multiplier].
strides : list
1-D of size 4. The stride of the sliding window for each dimension of input.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
data_format : string
The data format for input. Either "NHWC" (default) or "NCHW".
dilations : list
1-D of size 2. The dilation rate in which we sample input values across the height and width dimensions in atrous convolution.
If it is greater than 1, then all values of strides must be 1.
name : string
A name for this operation (optional).
Returns
-------
A 4-D Tensor with shape according to data_format.
E.g., for "NHWC" format, shape is [batch, out_height, out_width, in_channels * channel_multiplier].
"""
pass
class Conv1d_transpose(object):
def __init__(
self, stride, padding, data_format='NWC', dilations=None, out_channel=None, k_size=None, in_channels=None
):
self.stride = stride
self.dilations = dilations
self.data_format, self.padding = preprocess_1d_format(data_format, padding)
def __call__(self, input, filters):
out = F.conv1d_transpose(
x=input,
weight=filters,
padding=self.padding,
stride=self.stride,
dilation=self.dilations,
data_format=self.data_format,
)
return out
def conv1d_transpose(
input, filters, output_shape, stride, padding='SAME', data_format='NWC', dilations=None, name=None
):
"""
The transpose of conv1d.
Parameters
----------
input : tensor
A 3-D Tensor of type float and shape [batch, in_width, in_channels]
for NWC data format or [batch, in_channels, in_width] for NCW data format.
filters : tensor
A 3-D Tensor with the same type as value and shape [filter_width, output_channels, in_channels].
filter's in_channels dimension must match that of value.
output_shape : tensor
A 1-D Tensor, containing three elements, representing the output shape of the deconvolution op.
strides : list
An int or list of ints that has length 1 or 3. The number of entries by which the filter is moved right at each step.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
data_format : string
'NWC' and 'NCW' are supported.
dilations : list
An int or list of ints that has length 1 or 3 which defaults to 1.
The dilation factor for each dimension of input. If set to k > 1,
there will be k-1 skipped cells between each filter element on that dimension.
Dilations in the batch and depth dimensions must be 1.
name : string
Optional name for the returned tensor.
Returns
-------
A Tensor with the same type as value.
"""
data_format, padding = preprocess_1d_format(data_format, padding)
output = F.conv1d_transpose(
x=input,
weight=filters,
stride=stride,
padding=padding,
dilation=dilations,
data_format=data_format,
output_size=output_shape,
)
return output
class Conv2d_transpose(object):
def __init__(
self, strides, padding, data_format='NHWC', dilations=None, name=None, out_channel=None, k_size=None,
in_channels=None
):
self.strides = strides
self.dilations = dilations
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
def __call__(self, input, filters):
output = F.conv2d_transpose(
x=input, weight=filters, stride=self.strides, padding=self.padding, dilation=self.dilations,
data_format=self.data_format
)
return output
def conv2d_transpose(
input, filters, output_shape, strides, padding='SAME', data_format='NHWC', dilations=None, name=None
):
"""
The transpose of conv2d.
Parameters
----------
input : tensor
A 4-D Tensor of type float and shape [batch, height, width, in_channels]
for NHWC data format or [batch, in_channels, height, width] for NCHW data format.
filters : tensor
A 4-D Tensor with the same type as input and shape [height, width,
output_channels, in_channels]. filter's in_channels dimension must match that of input.
output_shape : tensor
A 1-D Tensor representing the output shape of the deconvolution op.
strides : list
An int or list of ints that has length 1, 2 or 4. The stride of the sliding window for each dimension of input.
If a single value is given it is replicated in the H and W dimension.
By default the N and C dimensions are set to 0.
The dimension order is determined by the value of data_format, see below for details.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
data_format : string
'NHWC' and 'NCHW' are supported.
dilations : list
An int or list of ints that has length 1, 2 or 4, defaults to 1.
name : string
Optional name for the returned tensor.
Returns
-------
A Tensor with the same type as input.
"""
data_format, padding = preprocess_2d_format(data_format, padding)
output = F.conv2d_transpose(
x=input,
weight=filters,
output_size=output_shape,
stride=strides,
padding=padding,
dilation=dilations,
data_format=data_format,
)
return output
class Conv3d_transpose(object):
def __init__(
self, strides, padding, data_format='NDHWC', dilations=None, name=None, out_channel=None, k_size=None,
in_channels=None
):
self.strides = strides
self.dilations = dilations
self.data_format, self.padding = preprocess_3d_format(data_format, padding)
def __call__(self, input, filters):
output = F.conv3d_transpose(
x=input, weight=filters, stride=self.strides, padding=self.padding, dilation=self.dilations,
data_format=self.data_format
)
return output
def conv3d_transpose(
input, filters, output_shape, strides, padding='SAME', data_format='NDHWC', dilations=None, name=None
):
"""
The transpose of conv3d.
Parameters
----------
input : tensor
A 5-D Tensor of type float and shape [batch, height, width, in_channels] for
NHWC data format or [batch, in_channels, height, width] for NCHW data format.
filters : tensor
A 5-D Tensor with the same type as value and shape [height, width, output_channels, in_channels].
filter's in_channels dimension must match that of value.
output_shape : tensor
A 1-D Tensor representing the output shape of the deconvolution op.
strides : list
An int or list of ints that has length 1, 3 or 5.
padding : string
'VALID' or 'SAME'. The padding algorithm. See the "returns" section of tf.ops.convolution for details.
data_format : string
'NDHWC' and 'NCDHW' are supported.
dilations : list of ints
An int or list of ints that has length 1, 3 or 5, defaults to 1.
name : string
Optional name for the returned tensor.
Returns
-------
A Tensor with the same type as value.
"""
data_format, padding = preprocess_3d_format(data_format, padding)
output = F.conv3d_transpose(
x=input,
weight=filters,
output_size=output_shape,
stride=strides,
padding=padding,
dilation=dilations,
data_format=data_format,
)
return output
class BatchNorm(object):
def __init__(
self, decay=0.9, epsilon=0.00001, beta=None, gamma=None, moving_mean=None, moving_var=None, num_features=None,
data_format='channels_last', is_train=False
):
self.decay = decay
self.epsilon = epsilon
self.data_format = data_format
self.beta = beta
self.gamma = gamma
self.moving_mean = moving_mean
self.moving_var = moving_var
self.num_features = num_features
self.is_train = is_train
self.axes = None
def __call__(self, inputs):
data_format = self.channel_format(inputs)
outputs = pd.nn.functional.batch_norm(
inputs, self.moving_mean, self.moving_var, weight=self.gamma, bias=self.beta, training=self.is_train,
momentum=self.decay, epsilon=self.epsilon, data_format=data_format
)
return outputs
def channel_format(self, inputs):
""" return "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". """
len_in_shape = len(inputs.shape)
if len_in_shape == 2:
return 'NC'
if self.data_format == 'channels_last':
if len_in_shape == 3:
return 'NLC'
if len_in_shape == 4:
return 'NHWC'
if len_in_shape == 5:
return 'NDHWC'
if self.data_format == 'channels_first':
if len_in_shape == 3:
return 'NCL'
if len_in_shape == 4:
return 'NCHW'
if len_in_shape == 5:
return 'NCDHW'
class GroupConv2D(object):
def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, groups):
self.out_channel = out_channel
self.k_size = k_size
self.groups = groups
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
if self.data_format is 'NHWC':
self.strides = (strides[1], strides[2])
self.dilations = (dilations[1], dilations[2])
elif self.data_format is 'NCHW':
self.strides = (strides[2], strides[3])
self.dilations = (dilations[2], dilations[3])
def __call__(self, inputs, filters):
outputs = F.conv2d(
inputs, weight=filters, stride=self.strides, padding=self.padding, dilation=self.dilations,
groups=self.groups, data_format=self.data_format
)
return outputs
class SeparableConv1D(object):
def __init__(self, stride, padding, data_format, dilations, out_channel, k_size, in_channel, depth_multiplier):
self.stride = stride
self.dilations = dilations
self.out_channel = out_channel
self.k_size = k_size
self.in_channel = int(in_channel)
self.depth_multiplier = depth_multiplier
self.data_format, self.padding = preprocess_1d_format(data_format, padding)
def __call__(self, inputs, depthwise_filters, pointwise_filters):
outputs = F.conv1d(
inputs, weight=depthwise_filters, stride=self.stride, padding=self.padding, dilation=self.dilations,
groups=self.in_channel, data_format=self.data_format
)
outputs = F.conv1d(
outputs, weight=pointwise_filters, stride=1, padding=self.padding, dilation=1, groups=1,
data_format=self.data_format
)
return outputs
class SeparableConv2D(object):
def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, in_channel, depth_multiplier):
self.out_channel = out_channel
self.k_size = k_size
self.in_channel = int(in_channel)
self.depth_multiplier = depth_multiplier
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
if self.data_format is 'NHWC':
self.strides = (strides[1], strides[2])
self.dilations = (dilations[1], dilations[2])
elif self.data_format is 'NCHW':
self.strides = (strides[2], strides[3])
self.dilations = (dilations[2], dilations[3])
def __call__(self, inputs, depthwise_filters, pointwise_filters):
outputs = F.conv2d(
inputs, weight=depthwise_filters, stride=self.strides, padding=self.padding, dilation=self.dilations,
groups=self.in_channel, data_format=self.data_format
)
outputs = F.conv2d(
outputs, weight=pointwise_filters, stride=1, padding=self.padding, dilation=1, groups=1,
data_format=self.data_format
)
return outputs
class AdaptiveMeanPool1D(object):
def __init__(self, output_size, data_format):
self.data_format, _ = preprocess_1d_format(data_format, None)
self.output_size = output_size
def __call__(self, input):
if self.data_format == 'NLC':
input = nhwc_to_nchw(input)
output = F.adaptive_avg_pool1d(input, self.output_size)
if self.data_format == 'NLC':
output = nchw_to_nhwc(output)
return output
class AdaptiveMeanPool2D(object):
def __init__(self, output_size, data_format):
self.data_format, _ = preprocess_2d_format(data_format, None)
self.output_size = output_size
def __call__(self, inputs):
return F.adaptive_avg_pool2d(inputs, output_size=self.output_size, data_format=self.data_format)
class AdaptiveMeanPool3D(object):
def __init__(self, output_size, data_format):
self.data_format, _ = preprocess_3d_format(data_format, None)
self.output_size = output_size
def __call__(self, inputs):
return F.adaptive_avg_pool3d(inputs, output_size=self.output_size, data_format=self.data_format)
class AdaptiveMaxPool1D(object):
def __init__(self, output_size, data_format):
self.data_format, _ = preprocess_1d_format(data_format, None)
self.output_size = output_size
def __call__(self, input):
if self.data_format == 'NLC':
input = nhwc_to_nchw(input)
output = F.adaptive_max_pool1d(input, self.output_size)
if self.data_format == 'NLC':
output = nchw_to_nhwc(output)
return output
class AdaptiveMaxPool2D(object):
def __init__(self, output_size, data_format):
self.data_format, _ = preprocess_2d_format(data_format, None)
self.output_size = output_size
def __call__(self, inputs):
if self.data_format == 'NHWC':
inputs = nhwc_to_nchw(inputs)
output = F.adaptive_max_pool2d(inputs, self.output_size)
if self.data_format == 'NHWC':
output = nchw_to_nhwc(output)
return output
class AdaptiveMaxPool3D(object):
def __init__(self, output_size, data_format):
self.data_format, _ = preprocess_3d_format(data_format, None)
self.output_size = output_size
def __call__(self, inputs):
if self.data_format == 'NDHWC':
inputs = nhwc_to_nchw(inputs)
output = F.adaptive_max_pool3d(inputs, self.output_size)
if self.data_format == 'NDHWC':
output = nchw_to_nhwc(output)
return output
class BinaryConv2D(object):
def __init__(self, strides, padding, data_format, dilations, out_channel, k_size, in_channel):
pass
def __call__(self, inputs, filters):
raise NotImplementedError
class DorefaConv2D(object):
def __init__(self, bitW, bitA, strides, padding, data_format, dilations, out_channel, k_size, in_channel):
pass
def __call__(self, inputs, filters):
raise NotImplementedError
class rnncell(RNNCellBase):
def __init__(self, weight_ih, weight_hh, bias_ih, bias_hh, act):
super(rnncell, self).__init__()
self.weight_ih = weight_ih
self.weight_hh = weight_hh
self.bias_ih = bias_ih
self.bias_hh = bias_hh
self.act_fn = F.relu if act == 'relu' else F.tanh
self.input_size = weight_ih.shape[1]
def forward(self, input, h):
i2h = pd.matmul(input, self.weight_ih, transpose_y=True)
if self.bias_ih is not None:
i2h += self.bias_ih
h2h = pd.matmul(h, self.weight_hh, transpose_y=True)
if self.bias_hh is not None:
h2h += self.bias_hh
h = self.act_fn(i2h + h2h)
return h, h
class lstmcell(RNNCellBase):
def __init__(self, weight_ih, weight_hh, bias_ih, bias_hh, act=None):
super(lstmcell, self).__init__()
self.weight_ih = weight_ih
self.weight_hh = weight_hh
self.bias_ih = bias_ih
self.bias_hh = bias_hh
self.gate_act_fn = F.sigmoid
self.act_fn = F.tanh
self.input_size = weight_ih.shape[1]
def forward(self, inputs, h, c):
gates = pd.matmul(inputs, self.weight_ih, transpose_y=True)
if self.bias_ih is not None:
gates += self.bias_ih
gates += pd.matmul(h, self.weight_hh, transpose_y=True)
if self.bias_hh is not None:
gates += self.bias_hh
gates_slices = pd.split(gates, num_or_sections=4, axis=-1)
i = self.gate_act_fn(gates_slices[0])
f = self.gate_act_fn(gates_slices[1])
o = self.gate_act_fn(gates_slices[3])
c = f * c + i * self.act_fn(gates_slices[2])
h = o * self.act_fn(c)
return h, h, c
class grucell(RNNCellBase):
def __init__(self, weight_ih, weight_hh, bias_ih, bias_hh, act=None):
super(grucell, self).__init__()
self.weight_ih = weight_ih
self.weight_hh = weight_hh
self.bias_ih = bias_ih
self.bias_hh = bias_hh
self.gate_act_fn = F.sigmoid
self.act_fn = F.tanh
self.input_size = weight_ih.shape[1]
def forward(self, input, h):
x_gates = pd.matmul(input, self.weight_ih, transpose_y=True)
if self.bias_ih is not None:
x_gates = x_gates + self.bias_ih
h_gates = pd.matmul(h, self.weight_hh, transpose_y=True)
if self.bias_hh is not None:
h_gates = h_gates + self.bias_hh
x_r, x_z, x_c = pd.split(x_gates, num_or_sections=3, axis=-1)
h_r, h_z, h_c = pd.split(h_gates, num_or_sections=3, axis=-1)
r = self.gate_act_fn(x_r + h_r)
z = self.gate_act_fn(x_z + h_z)
c = self.act_fn(x_c + r * h_c) # apply reset gate after mm
h = (h - c) * z + c
return h, h
def split_states(states, bidirectional=False, state_components=1):
if state_components == 1:
states = pd.unstack(states)
if not bidirectional:
return states
else:
return list(zip(states[::2], states[1::2]))
else:
assert len(states) == state_components
states = tuple([pd.unstack(item) for item in states])
if not bidirectional:
return list(zip(*states))
else:
states = list(zip(*states))
return list(zip(states[::2], states[1::2]))
def concat_states(states, bidirectional=False, state_components=1):
if state_components == 1:
return pd.stack(flatten(states))
else:
states = flatten(states)
componnets = []
for i in range(state_components):
componnets.append(states[i::state_components])
return tuple([pd.stack(item) for item in componnets])
class rnnbase(LayerList):
def __init__(
self,
mode,
input_size,
hidden_size,
num_layers,
bias,
batch_first,
dropout,
bidirectional,
is_train,
w_ih,
w_hh,
b_ih,
b_hh,
):
super(rnnbase, self).__init__()
self.mode = mode
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.time_major = False if batch_first else True
self.dropout = dropout
self.bidirect = 2 if bidirectional else 1
self.state_components = 2 if mode == 'LSTM' else 1
self.training = is_train
self.w_ih = w_ih
self.w_hh = w_hh
self.b_ih = b_ih
self.b_hh = b_hh
self.bias = bias
RNN = pd.nn.RNN
BiRNN = pd.nn.BiRNN
kwargs = {"weight_ih_attr": None, "weight_hh_attr": None, "bias_ih_attr": self.bias, "bias_hh_attr": self.bias}
act = None
rnn_cls = None
if mode == "LSTM":
rnn_cls = pd.nn.LSTMCell
elif mode == "GRU":
rnn_cls = pd.nn.GRUCell
elif mode == 'RNN_TANH':
rnn_cls = pd.nn.SimpleRNNCell
kwargs["activation"] = 'tanh'
elif mode == 'RNN_RELU':
rnn_cls = pd.nn.SimpleRNNCell
kwargs["activation"] = 'relu'
if not bidirectional:
is_reverse = False
for i in range(self.num_layers):
weight_ih = self.w_ih[i]
weight_hh = self.w_hh[i]
if self.bias:
bias_ih = self.b_ih[i]
bias_hh = self.b_hh[i]
else:
bias_ih = None
bias_hh = None
cell = rnn_cls(input_size=self.input_size, hidden_size=self.hidden_size, **kwargs)
cell.weight_ih = weight_ih
cell.weight_hh = weight_hh
cell.bias_ih = bias_ih
cell.bias_hh = bias_hh
# cell = rnn_cls(weight_ih, weight_hh, bias_ih, bias_hh, act)
self.append(RNN(cell, is_reverse, self.time_major))
else:
for i in range(self.num_layers):
weight_ih_fw = self.w_ih[2 * i]
weight_hh_fw = self.w_hh[2 * i]
weight_ih_bw = self.w_ih[2 * i + 1]
weight_hh_bw = self.w_hh[2 * i + 1]
if self.bias:
bias_ih_fw = self.b_ih[2 * i]
bias_hh_fw = self.b_hh[2 * i]
bias_ih_bw = self.b_ih[2 * i + 1]
bias_hh_bw = self.b_hh[2 * i + 1]
else:
bias_ih_fw = None
bias_hh_fw = None
bias_ih_bw = None
bias_hh_bw = None
layer_input_size = self.input_size if i == 0 else self.hidden_size * self.bidirect
cell_fw = rnn_cls(input_size=layer_input_size, hidden_size=self.hidden_size, **kwargs)
cell_fw.weight_ih = weight_ih_fw
cell_fw.weight_hh = weight_hh_fw
cell_fw.bias_ih = bias_ih_fw
cell_fw.bias_hh = bias_hh_fw
cell_bw = rnn_cls(input_size=layer_input_size, hidden_size=self.hidden_size, **kwargs)
cell_bw.weight_ih = weight_ih_bw
cell_bw.weight_hh = weight_hh_bw
cell_bw.bias_ih = bias_ih_bw
cell_bw.bias_hh = bias_hh_bw
self.append(BiRNN(cell_fw, cell_bw, self.time_major))
self.could_use_cudnn = True
self.could_use_cudnn &= len(self.parameters()) == num_layers * 4 * self.bidirect
param_names = []
for layer in range(self.num_layers):
for direction in range(self.bidirect):
suffix = '_reverse' if direction == 1 else ''
param_names.extend(['weight_ih_l{}{}', 'weight_hh_l{}{}'])
if bias != False: param_names.append('bias_ih_l{}{}')
if bias != False: param_names.append('bias_hh_l{}{}')
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, self.parameters()):
setattr(self, name, param)
self.flatten_parameters()
def flatten_parameters(self):
if self.could_use_cudnn:
params = self.parameters(include_sublayers=False)
shape = [np.prod(param.shape) for param in params]
self._all_weights = [None] * len(params)
for i, param in enumerate(params):
offset = 0 if i % 4 < 2 else (2 * self.num_layers * self.bidirect)
layer_idx = i // 4
self._all_weights[offset + layer_idx * 2 + i % 2] = param
self._flat_weight = [
self.create_parameter(
shape=[np.sum(shape)], dtype=params[0].dtype, default_initializer=I.Constant(0.0)
)
]
self._dropout_state = self.create_variable(dtype=fluid.core.VarDesc.VarType.UINT8)
with fluid.program_guard(fluid.default_startup_program(), fluid.default_startup_program()):
with framework.no_grad():
self._helper.append_op(
type="coalesce_tensor", inputs={"Input": self._all_weights}, outputs={
"Output": self._all_weights,
"FusedOutput": self._flat_weight
}, attrs={
"copy_data": True,
"use_align": False,
"dtype": params[0].dtype
}
)
def _cudnn_impl(self, inputs, initial_states, sequence_length):
if not self.time_major:
inputs = pd.tensor.transpose(inputs, [1, 0, 2])
out = self._helper.create_variable_for_type_inference(inputs.dtype)
state = [self._helper.create_variable_for_type_inference(inputs.dtype) for i in range(self.state_components)]
reserve = self._helper.create_variable_for_type_inference(
dtype=fluid.core.VarDesc.VarType.UINT8, stop_gradient=True
)
inputs = {
'Input': inputs,
'WeightList': self._all_weights,
'PreState': initial_states,
'SequenceLength': sequence_length
}
attrs = {
'dropout_prob': self.dropout,
'is_bidirec': self.bidirect == 2,
'input_size': self.input_size,
'hidden_size': self.hidden_size,
'num_layers': self.num_layers,
'mode': self.mode,
'is_test': not self.training
}
outputs = {
'Out': out,
'State': state,
'Reserve': reserve,
'DropoutState': self._dropout_state,
}
self._helper.append_op(type="rnn", inputs=inputs, outputs=outputs, attrs=attrs)
out = pd.tensor.transpose(out, [1, 0, 2]) if not self.time_major else out
return out, tuple(state) if len(state) > 1 else state[0]
def forward(self, inputs, initial_states=None):
batch_index = 1 if self.time_major else 0
dtype = inputs.dtype
sequence_length = None
if initial_states is None:
state_shape = (self.num_layers * self.bidirect, -1, self.hidden_size)
if self.state_components == 1:
initial_states = pd.fluid.layers.fill_constant_batch_size_like(
inputs, state_shape, dtype, 0, batch_index, 1
)
else:
initial_states = tuple(
[
pd.fluid.layers.fill_constant_batch_size_like(inputs, state_shape, dtype, 0, batch_index, 1)
for _ in range(self.state_components)
]
)
if self.could_use_cudnn:
# Add CPU kernel and dispatch in backend later
return self._cudnn_impl(inputs, initial_states, sequence_length)
states = split_states(initial_states, self.bidirect == 2, self.state_components)
final_states = []
for i, rnn_layer in enumerate(self):
if i > 0:
inputs = F.dropout(inputs, self.dropout, training=self.training, mode="upscale_in_train")
outputs, final_state = rnn_layer(inputs, states[i], sequence_length)
final_states.append(final_state)
inputs = outputs
final_states = concat_states(final_states, self.bidirect == 2, self.state_components)
return outputs, final_states
class layernorm(object):
def __init__(self, normalized_shape, gamma, beta, eps, input_shape):
self.normalized_shape = normalized_shape
self.gamma = gamma
self.beta = beta
self.eps = eps
self.input_shape = input_shape
def __call__(self, input):
scale = pd.flatten(self.gamma)
offset = pd.flatten(self.beta)
output = pd.nn.functional.layer_norm(
input, normalized_shape=self.normalized_shape, weight=scale, bias=offset, epsilon=self.eps
)
return output
class multiheadattention(object):
def __init__(
self,
embed_dim,
num_heads,
dropout,
batch_first,
need_weights,
q_weight,
k_weight,
v_weight,
out_weight,
q_bias,
k_bias,
v_bias,
out_bias,
train,
):
self.embed_dim_check = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.need_weights = need_weights
self.q_weight = q_weight
self.k_weight = k_weight
self.v_weight = v_weight
self.out_weight = out_weight
self.q_bias = q_bias
self.k_bias = k_bias
self.v_bias = v_bias
self.out_bias = out_bias
self.train = train
def __call__(self, q, k, v, attn_mask, key_padding_mask):
#transpose tensor shape
k = q if k is None else k
v = q if v is None else v
if self.batch_first:
q = pd.transpose(q, perm=(1, 0, 2))
k = pd.transpose(k, perm=(1, 0, 2))
v = pd.transpose(v, perm=(1, 0, 2))
#check tensor shape
tgt_len, batch_size, embed_dim = q.shape
src_len, _, _ = k.shape
if embed_dim != self.embed_dim_check:
raise ValueError("Expecting embedding dimension is {}, but got {}".format(self.embed_dim_check, embed_dim))
head_dim = embed_dim // self.num_heads
if head_dim * self.num_heads != embed_dim:
raise ValueError("embedding dimension {} not divisible by num_heads {}".format(embed_dim, self.num_heads))
if k.shape[:2] != v.shape[:2]:
raise ValueError(
"key's sequence length and batch size {} do not match value's {}".format(k.shape[:2], v.shape[:2])
)
#compute q k v linear projection
q = F.linear(q, self.q_weight, self.q_bias)
k = F.linear(k, self.k_weight, self.k_bias)
v = F.linear(v, self.v_weight, self.v_bias)
# check and prep attention mask
if attn_mask is not None:
if convert_dtype(attn_mask.dtype) == 'uint8':
warnings.warn("attn_mask tensor dtype should better be bool.")
attn_mask = pd.cast(attn_mask, dtype='bool')
elif convert_dtype(attn_mask.dtype) not in ('float32', 'float64', 'bool'):
raise TypeError(
"attn_mask tensor dtype should be in ('float32', 'float64', 'bool','uint8'),"
"but got {}".format(attn_mask.dtype)
)
if attn_mask.dim() == 2:
if attn_mask.shape != [tgt_len, src_len]:
raise ValueError(
"The shape of 2D attn_mask should be {}, but got {}.".format(
[tgt_len, src_len], attn_mask.shape
)
)
attn_mask = pd.unsqueeze(attn_mask, axis=0)
elif attn_mask.dim() == 3:
size_3d = [batch_size * self.num_heads, tgt_len, src_len]
if attn_mask.shape != size_3d:
raise ValueError(
"The shape of 3D attn_mask should be {}, but got {}.".format(size_3d, attn_mask.shape)
)
else:
raise ValueError("attn_mask's dimension {} is not supported.".format(attn_mask.dim()))
# prep mulithead q k v
q = pd.transpose(pd.reshape(q, shape=(tgt_len, batch_size * self.num_heads, head_dim)), perm=(1, 0, 2))
k = pd.transpose(pd.reshape(k, shape=(src_len, batch_size * self.num_heads, head_dim)), perm=(1, 0, 2))
v = pd.transpose(pd.reshape(v, shape=(src_len, batch_size * self.num_heads, head_dim)), perm=(1, 0, 2))
#check and prep key padding mask
if key_padding_mask is not None:
if key_padding_mask.shape != [batch_size, src_len]:
raise ValueError(
"Expecting key_padding_mask shape is {}, but got {}.".format(
[batch_size, src_len], key_padding_mask.shape
)
)
if convert_dtype(key_padding_mask.dtype) == 'uint8':
warnings.warn("key_padding_mask tensor dtype should better be bool.")
key_padding_mask = pd.cast(key_padding_mask, dtype='bool')
elif convert_dtype(key_padding_mask.dtype) != 'bool':
raise TypeError(
"key_padding_mask tensor dtype should be 'bool' or 'uint8', but got {}.".format(
key_padding_mask.dtype
)
)
key_padding_mask = key_padding_mask.reshape((batch_size, 1, 1, src_len)).expand(
(-1, self.num_heads, -1, -1)
).reshape((batch_size * self.num_heads, 1, src_len))
if attn_mask is None:
attn_mask = key_padding_mask
elif convert_dtype(attn_mask.dtype) == 'bool':
attn_mask = pd.logical_or(attn_mask, key_padding_mask)
else:
# attn_mask = attn_mask.expand((self.num_heads * batch_size, -1, -1))
# key_padding_mask = key_padding_mask.expand((-1,tgt_len, -1))
# attn_mask = attn_mask.numpy()
# key_padding_mask = key_padding_mask.numpy()
# attn_mask[key_padding_mask] = float('-inf')
# attn_mask = pd.to_tensor(attn_mask, dtype='float32')
key_padding_mask_inf = pd.full_like(key_padding_mask, fill_value=float('-inf'), dtype='float32')
attn_mask = pd.where(key_padding_mask, key_padding_mask_inf, attn_mask)
#convert bool mask to float
if attn_mask is not None and convert_dtype(attn_mask.dtype) == 'bool':
# new_attn_mask = pd.zeros_like(attn_mask, dtype='float32')
# np_new_attn_mask = new_attn_mask.numpy()
# np_attn_mask = attn_mask.numpy()
# np_new_attn_mask[np_attn_mask] = float('-inf')
# attn_mask = pd.to_tensor(np_new_attn_mask, dtype='float32')
new_attn_mask_zero = pd.zeros_like(attn_mask, dtype='float32')
new_attn_mask_inf = pd.ones_like(attn_mask, dtype='float32') * -np.inf
attn_mask = pd.where(attn_mask, new_attn_mask_inf, new_attn_mask_zero)
# attention and out projection
q = q / math.sqrt(embed_dim)
k = pd.transpose(k, perm=(0, 2, 1))
attn = pd.bmm(q, k)
if attn_mask is not None:
attn += attn_mask
attn = pd.nn.functional.softmax(attn)
if self.dropout:
attn = F.dropout(attn, self.dropout, training=self.train, mode="upscale_in_train")
output = pd.bmm(attn, v)
output = pd.reshape(pd.transpose(output, perm=(1, 0, 2)), shape=(tgt_len, batch_size, embed_dim))
output = F.linear(output, weight=self.out_weight, bias=self.out_bias)
if self.batch_first:
output = pd.transpose(output, perm=(1, 0, 2))
if self.need_weights:
attn = pd.reshape(attn, shape=(batch_size, self.num_heads, tgt_len, src_len))
attn = pd.sum(attn, axis=1) / self.num_heads
return output, attn
else:
return output, None
class BinaryDense(object):
def __init__(self, weights, bias):
self.weights = weights
self.bias = bias
def __call__(self, inputs):
raise NotImplementedError
class DorefaDense(object):
def __init__(self, weights, bias, bitW, bitA):
self.weights = weights
self.bias = bias
self.bitW = bitW
self.bitA = bitA
def __call__(self, inputs):
raise NotImplementedError
class TernaryDense(object):
def __init__(self, weights, bias):
self.weights = weights
self.bias = bias
def __call__(self, inputs):
raise NotImplementedError
class QuanDense(object):
def __init__(self, weights, bias, bitW, bitA):
self.weights = weights
self.bias = bias
self.bitW = bitW
self.bitA = bitA
def __call__(self, inputs):
raise NotImplementedError
class QuanDenseBn(object):
def __init__(
self, weights, scale_para, offset_para, moving_mean, moving_variance, decay, bitW, bitA, epsilon, is_train
):
self.weights = weights
self.scale_para = scale_para
self.offset_para = offset_para
self.moving_mean = moving_mean
self.moving_variance = moving_variance
self.decay = decay
self.bitW = bitW
self.bitA = bitA
self.epsilon = epsilon
self.is_train = is_train
def __call__(self, inputs):
raise NotImplementedError
class TernaryConv(object):
def __init__(self, weights, strides, padding, data_format, dilations):
self.weights = weights
self.strides = strides
self.dilations = dilations
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
def __call__(self, inputs):
raise NotImplementedError
class QuanConv(object):
def __init__(self, weights, strides, padding, data_format, dilations, bitW, bitA):
self.weights = weights
self.strides = strides
self.dilations = dilations
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
self.bitW = bitW
self.bitA = bitA
def __call__(self, inputs):
raise NotImplementedError
class QuanConvBn(object):
def __init__(
self, weights, scale_para, offset_para, moving_mean, moving_variance, strides, padding, data_format, dilations,
bitW, bitA, decay, epsilon, is_train
):
self.weights = weights
self.strides = strides
self.dilations = dilations
self.data_format, self.padding = preprocess_2d_format(data_format, padding)
self.bitW = bitW
self.bitA = bitA
self.scale_para = scale_para
self.offset_para = offset_para
self.moving_mean = moving_mean
self.moving_variance = moving_variance
self.decay = decay
self.epsilon = epsilon
self.is_train = is_train
def __call__(self, inputs):
raise NotImplementedError
class PReLU(object):
def __init__(self, data_format):
self.data_format, _ = preprocess_2d_format(data_format, None)
def __call__(self, input, weight):
return F.prelu(input, weight, data_format=self.data_format)
def prelu(input, weight, data_format):
data_format, _ = preprocess_2d_format(data_format, None)
return F.prelu(input, weight, data_format=data_format)
| 34.15805
| 148
| 0.619459
|
70e8371498b0e891c36ea2b78808ae580cb9fedc
| 1,047
|
py
|
Python
|
alife/alife_shelter.py
|
flags/Reactor-3
|
b41a2904c9ec8cc14bcee03611602d0e568acf12
|
[
"MIT"
] | 56
|
2015-04-20T08:31:29.000Z
|
2021-12-19T14:05:18.000Z
|
alife/alife_shelter.py
|
HexDecimal/Reactor-3
|
b41a2904c9ec8cc14bcee03611602d0e568acf12
|
[
"MIT"
] | 2
|
2018-07-24T11:24:41.000Z
|
2021-05-16T03:04:53.000Z
|
alife/alife_shelter.py
|
HexDecimal/Reactor-3
|
b41a2904c9ec8cc14bcee03611602d0e568acf12
|
[
"MIT"
] | 9
|
2015-11-03T02:56:20.000Z
|
2021-04-28T08:19:57.000Z
|
from globals import *
import life as lfe
import references
import judgement
import chunks
import goals
import maps
import random
STATE = 'shelter'
TIER = TIER_SURVIVAL-.1
def get_tier(life):
if not lfe.execute_raw(life, 'discover', 'desires_shelter') and lfe.execute_raw(life, 'state', 'shelter'):
return TIER_IDLE-.1
return TIER
def tick(life):
if not 'shelter' in life['state_flags']:
life['state_flags']['shelter'] = judgement.get_best_shelter(life)
if not life['state_flags']['shelter'] in life['known_chunks']:
judgement.judge_chunk(life, life['state_flags']['shelter'])
if not chunks.get_flag(life, life['state_flags']['shelter'], 'shelter_cover'):
return False
if not list(life['pos'][:2]) in chunks.get_flag(life, life['state_flags']['shelter'], 'shelter_cover'):
if not lfe.path_dest(life) or (not chunks.position_is_in_chunk(lfe.path_dest(life), life['state_flags']['shelter'])):
_cover = chunks.get_flag(life, life['state_flags']['shelter'], 'shelter_cover')
lfe.walk_to(life, random.choice(_cover))
| 29.083333
| 119
| 0.727794
|
5f1f40fdb37c506c6a6922dc33154cee6361df5a
| 1,434
|
py
|
Python
|
agrc/messaging.py
|
stdavis/agrc.python
|
cd9a25c21f283ee01a0e59b2328f29991e396864
|
[
"MIT"
] | 2
|
2016-09-29T16:52:28.000Z
|
2018-05-07T23:08:51.000Z
|
agrc/messaging.py
|
stdavis/agrc.python
|
cd9a25c21f283ee01a0e59b2328f29991e396864
|
[
"MIT"
] | 4
|
2015-02-17T14:23:17.000Z
|
2016-11-08T15:53:35.000Z
|
agrc/messaging.py
|
agrc/agrc.python
|
0e211baa58c99d55e53568648cd22912a4d466b8
|
[
"MIT"
] | 4
|
2015-09-10T13:39:54.000Z
|
2018-07-23T16:08:21.000Z
|
from smtplib import SMTP
from email.mime.text import MIMEText
class Emailer:
# set this to True to prevent emails from actually being sent
fromAddress = 'noreply@utah.gov'
server = 'send.state.ut.us'
port = 25
def __init__(self, toAddress, testing=False):
"""
split multiple emails addresses with a ';' (e.g. toAddress='hello@test.com;hello2@test.com')
"""
self.testing = testing
if toAddress is not None:
self.toAddress = toAddress
else:
raise Exception('You must provide a toAddress')
if testing:
print('Emailer: Testing only. No emails will be sent!')
def sendEmail(self, subject, body, toAddress=False):
"""
sends an email using the agrcpythonemailer@gmail.com account
"""
if not toAddress:
toAddress = self.toAddress
toAddress = toAddress.split(';')
message = MIMEText(body)
message['Subject'] = subject
message['From'] = self.fromAddress
message['To'] = ','.join(toAddress)
if not self.testing:
s = SMTP(self.server, self.port)
s.sendmail(self.fromAddress, toAddress, message.as_string())
s.quit()
print('email sent')
else:
print('***Begin Test Email Message***')
print(message)
print('***End Test Email Message***')
| 28.68
| 100
| 0.580893
|
0b2f87b299d3b2252148a658feea4abd7016f8c3
| 118
|
py
|
Python
|
secrets.py
|
borisboychev/Instagram-bot
|
d6ea1d6e954d081117bd84459ac871b3bb233bc0
|
[
"MIT"
] | null | null | null |
secrets.py
|
borisboychev/Instagram-bot
|
d6ea1d6e954d081117bd84459ac871b3bb233bc0
|
[
"MIT"
] | null | null | null |
secrets.py
|
borisboychev/Instagram-bot
|
d6ea1d6e954d081117bd84459ac871b3bb233bc0
|
[
"MIT"
] | null | null | null |
""""ENTER YOUR CREDENTIALS HERE"""
username = "" # <- ENTER USERNAME HERE
password = "" # <- ENTER PASSWORD HERE
| 23.6
| 39
| 0.627119
|
7b40f6c26b26e8aa8e5b7b3e58579f15d476c2e6
| 1,370
|
py
|
Python
|
tests/ut_repytests_testsocketclose.py
|
SeattleTestbed/repy_v1
|
f40a02e2e398b1ec67fede84b41a264ae7356d2c
|
[
"MIT"
] | 1
|
2021-08-18T05:58:17.000Z
|
2021-08-18T05:58:17.000Z
|
tests/ut_repytests_testsocketclose.py
|
SeattleTestbed/repy_v1
|
f40a02e2e398b1ec67fede84b41a264ae7356d2c
|
[
"MIT"
] | 3
|
2015-11-17T21:01:03.000Z
|
2016-07-14T09:08:04.000Z
|
tests/ut_repytests_testsocketclose.py
|
SeattleTestbed/repy_v1
|
f40a02e2e398b1ec67fede84b41a264ae7356d2c
|
[
"MIT"
] | 5
|
2015-07-02T13:29:23.000Z
|
2021-09-25T07:48:30.000Z
|
#pragma repy
"""
Author: Armon Dadgar
Description:
This tests the behavior of sockets when close() is called and
there is data in the recv buffer. "Bad" behavior would be if
new_conn throws a "Socket closed" exception. Good behavior is
a clean termination.
When we call close(), it is possible that a TCP RST packet
would be sent, and that the receiver of the RST packet
dumps all the data in the recv buffer. Then, if a read
were to be made on the recv buffer, it is empty and the
connection is closed, so a "Socket closed" exception would
be raised.
However, if we do not send a TCP RST, and only send a FIN
packet, then the data from the recv buffer can be read
without causing a "Socket closed" exception.
This test is to guarentee the "good" behavior, by which
we avoid sending a RST packet.
"""
# Handle a new connection
def new_conn(ip,port,sock,ch1,ch2):
# Values to send
values = "1 2 3 4 5 "
# Send this
sock.send(values)
# Get the response
sleep(0.4)
response = sock.recv(4)
# Done
sock.close()
stopcomm(ch2)
if callfunc == "initialize":
# Get the ip
ip = getmyip()
# Setup listener
waitforconn(ip,<connport>,new_conn)
# Connect
sock = openconn(ip,<connport>)
# Read the first 2 numbers
num1 = sock.recv(2)
num2 = sock.recv(2)
# Close now
sock.send("bad!")
sock.close()
| 22.833333
| 63
| 0.692701
|
f43dc6258f7e9dbcd6962376a790c90f57a1a7b4
| 1,585
|
py
|
Python
|
fbtor/test/BitVecConvertTest.py
|
Aimmig/FloatingBoolector
|
039ca8be5e9527613a26ce423c6e4817a0b3a2cd
|
[
"MIT"
] | null | null | null |
fbtor/test/BitVecConvertTest.py
|
Aimmig/FloatingBoolector
|
039ca8be5e9527613a26ce423c6e4817a0b3a2cd
|
[
"MIT"
] | null | null | null |
fbtor/test/BitVecConvertTest.py
|
Aimmig/FloatingBoolector
|
039ca8be5e9527613a26ce423c6e4817a0b3a2cd
|
[
"MIT"
] | null | null | null |
from fbtor.BitVecConvert import BitVecConvStatic
from fbtor.FBoolectorTypes import FPType, RMode, WIDTH, MAN, EXP
from .test_converter import *
import pytest
"""
Test cases for the string to bitstring implementation:
"""
def test_Zero():
for t in FPType:
for r in RMode:
for sign in ["+","-"]:
res = BitVecConvStatic.convertToBinary(sign+"0",t,r)
signbit = "0"
if sign == "-":
signbit ="1"
assert res == (signbit + (t.value[WIDTH]-1)*"0")
def test_Inf():
for t in FPType:
for r in RMode:
for sign in ["+","-"]:
res = BitVecConvStatic.convertToBinary(sign+"inf",t,r)
signbit = "0"
if sign == "-":
signbit ="1"
assert res == (signbit + t.value[EXP]*"1"+ t.value[MAN]*"0")
@pytest.mark.parametrize('inp,s,e,m,fptype,rmode', subnormal)
def test_subnormal(inp,s,e,m, fptype,rmode):
assert BitVecConvStatic.convertToBinary(inp,fptype,rmode) == s+e+m
@pytest.mark.parametrize('inp,s,e,m,fptype,rmode', small)
def test_small(inp,s,e,m, fptype,rmode):
assert BitVecConvStatic.convertToBinary(inp,fptype,rmode) == s+e+m
@pytest.mark.parametrize('inp,s,e,m,fptype,rmode', large)
def test_large(inp,s,e,m, fptype,rmode):
assert BitVecConvStatic.convertToBinary(inp,fptype,rmode) == s+e+m
@pytest.mark.parametrize('inp,s,e,m,fptype,rmode', normal)
def test_normal(inp,s,e,m, fptype,rmode):
assert BitVecConvStatic.convertToBinary(inp,fptype,rmode) == s+e+m
| 35.222222
| 76
| 0.615773
|
b2e9dbbaa4223a1edded083c8c33627936e2e0aa
| 5,593
|
py
|
Python
|
pxr/usd/usdShade/testenv/testUsdShadeCoordSysAPI.py
|
DougRogers-DigitalFish/USD
|
d8a405a1344480f859f025c4f97085143efacb53
|
[
"BSD-2-Clause"
] | 3,680
|
2016-07-26T18:28:11.000Z
|
2022-03-31T09:55:05.000Z
|
pxr/usd/usdShade/testenv/testUsdShadeCoordSysAPI.py
|
DougRogers-DigitalFish/USD
|
d8a405a1344480f859f025c4f97085143efacb53
|
[
"BSD-2-Clause"
] | 1,759
|
2016-07-26T19:19:59.000Z
|
2022-03-31T21:24:00.000Z
|
pxr/usd/usdShade/testenv/testUsdShadeCoordSysAPI.py
|
DougRogers-DigitalFish/USD
|
d8a405a1344480f859f025c4f97085143efacb53
|
[
"BSD-2-Clause"
] | 904
|
2016-07-26T18:33:40.000Z
|
2022-03-31T09:55:16.000Z
|
#!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import sys, unittest
from pxr import Usd, UsdShade, UsdGeom, Sdf
class TestUsdShadeCoordSysAPI(unittest.TestCase):
def test_CoordSys(self):
stage = Usd.Stage.Open('test.usda')
# Test CanContainPropertyName
self.assertTrue(UsdShade.CoordSysAPI.CanContainPropertyName(
stage.GetPropertyAtPath('/World.coordSys:worldSpace').GetName()))
self.assertFalse(UsdShade.CoordSysAPI.CanContainPropertyName(
stage.GetPropertyAtPath('/World.xformOp:translate').GetName()))
world = stage.GetPrimAtPath('/World')
model = stage.GetPrimAtPath('/World/Model')
geom = stage.GetPrimAtPath('/World/Model/Geom')
box = stage.GetPrimAtPath('/World/Model/Geom/Box')
worldCoords = UsdShade.CoordSysAPI(world)
modelCoords = UsdShade.CoordSysAPI(model)
geomCoords = UsdShade.CoordSysAPI(geom)
boxCoords = UsdShade.CoordSysAPI(box)
# Local Bindings
self.assertEqual(
worldCoords.GetLocalBindings(),
[('worldSpace', '/World.coordSys:worldSpace', '/World/Space')])
self.assertEqual(
modelCoords.GetLocalBindings(),
[('instanceSpace', '/World/Model.coordSys:instanceSpace',
'/World/Model'),
('modelSpace', '/World/Model.coordSys:modelSpace',
'/World/Model/Geom'),
('paintSpace', '/World/Model.coordSys:paintSpace',
'/World/Model/Place3dTexture')])
self.assertEqual(
geomCoords.GetLocalBindings(), [])
self.assertEqual(
boxCoords.GetLocalBindings(), [])
# Full (including inherited) Bindings
self.assertEqual(
worldCoords.FindBindingsWithInheritance(),
[('worldSpace', '/World.coordSys:worldSpace', '/World/Space')])
self.assertEqual(
modelCoords.FindBindingsWithInheritance(),
[('instanceSpace', '/World/Model.coordSys:instanceSpace',
'/World/Model'),
('modelSpace', '/World/Model.coordSys:modelSpace',
'/World/Model/Geom'),
('paintSpace', '/World/Model.coordSys:paintSpace',
'/World/Model/Place3dTexture'),
('worldSpace', '/World.coordSys:worldSpace', '/World/Space')])
self.assertEqual(
geomCoords.FindBindingsWithInheritance(),
[('instanceSpace', '/World/Model.coordSys:instanceSpace',
'/World/Model'),
('modelSpace', '/World/Model.coordSys:modelSpace',
'/World/Model/Geom'),
('paintSpace', '/World/Model.coordSys:paintSpace',
'/World/Model/Place3dTexture'),
('worldSpace', '/World.coordSys:worldSpace', '/World/Space')])
self.assertEqual(
boxCoords.FindBindingsWithInheritance(),
[('instanceSpace', '/World/Model.coordSys:instanceSpace',
'/World/Model'),
('modelSpace', '/World/Model.coordSys:modelSpace',
'/World/Model/Geom'),
('paintSpace', '/World/Model.coordSys:paintSpace',
'/World/Model/Place3dTexture'),
('worldSpace', '/World.coordSys:worldSpace', '/World/Space')])
# Bind
relName = UsdShade.CoordSysAPI.GetCoordSysRelationshipName('boxSpace')
self.assertFalse(geom.HasRelationship(relName))
geomCoords.Bind('boxSpace', box.GetPath())
self.assertTrue(geom.HasRelationship(relName))
self.assertEqual(
geomCoords.GetLocalBindings(),
[('boxSpace', '/World/Model/Geom.coordSys:boxSpace',
box.GetPath())])
# BlockBinding
self.assertTrue(geom.HasRelationship(relName))
self.assertTrue(geom.GetRelationship(relName).HasAuthoredTargets())
self.assertNotEqual(geom.GetRelationship(relName).GetTargets(), [])
geomCoords.BlockBinding('boxSpace')
self.assertEqual(
geomCoords.GetLocalBindings(), [])
self.assertEqual(geom.GetRelationship(relName).GetTargets(), [])
self.assertTrue(geom.GetRelationship(relName).HasAuthoredTargets())
# ClearBinding
geomCoords.ClearBinding('boxSpace', False)
self.assertTrue(geom.HasRelationship(relName))
self.assertFalse(geom.GetRelationship(relName).HasAuthoredTargets())
geomCoords.ClearBinding('boxSpace', True)
self.assertFalse(geom.HasRelationship(relName))
if __name__ == '__main__':
unittest.main()
| 44.744
| 78
| 0.650456
|
47aea8df3617f50ad11ea9a1efac0b4af064b14f
| 6,856
|
py
|
Python
|
pluginsmanager/model/system/system_effect.py
|
SpotlightKid/PluginsManager
|
2dcc9f6a79b48e9c9be82efffd855352fa15c5c7
|
[
"Apache-2.0"
] | 9
|
2017-05-24T09:55:34.000Z
|
2020-06-22T03:55:51.000Z
|
pluginsmanager/model/system/system_effect.py
|
SpotlightKid/PluginsManager
|
2dcc9f6a79b48e9c9be82efffd855352fa15c5c7
|
[
"Apache-2.0"
] | 97
|
2016-11-17T16:30:35.000Z
|
2021-10-09T00:27:56.000Z
|
pluginsmanager/model/system/system_effect.py
|
SpotlightKid/PluginsManager
|
2dcc9f6a79b48e9c9be82efffd855352fa15c5c7
|
[
"Apache-2.0"
] | 3
|
2017-05-21T19:20:38.000Z
|
2019-11-04T23:53:59.000Z
|
# Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pluginsmanager.model.effect import Effect
from pluginsmanager.model.system.system_input import SystemInput
from pluginsmanager.model.system.system_output import SystemOutput
from pluginsmanager.model.system.system_midi_input import SystemMidiInput
from pluginsmanager.model.system.system_midi_output import SystemMidiOutput
from pluginsmanager.util.dict_tuple import DictTuple
class SystemEffect(Effect):
"""
Representation of the system instance: audio and/or midi interfaces.
System output is equivalent with audio input: You connect the
instrument in the audio card input and it captures and send the
audio to :class:`.SystemOutput` for you connect in a input plugins.
System input is equivalent with audio output: The audio card receives
the audio processed in your :class:`.SystemInput` and send it to audio
card output for you connects in amplifier, headset.
Because no autodetection of existing ports in audio card
has been implemented, you must explicitly inform in the
creation of the SystemEffect object::
>>> sys_effect = SystemEffect('system', ('capture_1', 'capture_2'), ('playback_1', 'playback_2'))
Unlike effects that should be added in the pedalboard, SystemEffects MUST NOT::
>>> builder = Lv2EffectBuilder()
>>> pedalboard = Pedalboard('Rocksmith')
>>> reverb = builder.build('http://calf.sourceforge.net/plugins/Reverb')
>>> pedalboard.append(reverb)
However the pedalboard must have the connections::
>>> pedalboard.connect(sys_effect.outputs[0], reverb.inputs[0])
An bypass example::
>>> pedalboard = Pedalboard('Bypass example')
>>> sys_effect = SystemEffect('system', ('capture_1', 'capture_2'), ('playback_1', 'playback_2'))
>>> pedalboard.connect(sys_effect.outputs[0], sys_effect.inputs[0])
>>> pedalboard.connect(sys_effect.outputs[1], sys_effect.inputs[1])
You can create multiple SystemEffect for multiple audio/midi interfaces. In the following example,
exists Jack provides audio system ports and two midi ports are added by I/O ports::
>>> audio_system = SystemEffect('system', inputs=['playback_1', 'playback_2'])
>>> midi_system = SystemEffect('ttymidi', midi_outputs=['MIDI_in'], midi_inputs=['MIDI_out'])
>>> pedalboard = Pedalboard('MDA-EP')
>>> ep = builder.build('http://moddevices.com/plugins/mda/EPiano')
>>> pedalboard.connect(ep.outputs[0], audio_system.inputs[0])
>>> pedalboard.connect(ep.outputs[1], audio_system.inputs[1])
>>> pedalboard.connect(audio_system.midi_outputs[0], ep.midi_inputs[0])
You can check the audio/midi ports defined in your environment using `jack_lsp`_::
root@zynthian:~ # As example in Zynthian project: http://zynthian.org
root@zynthian:~ jack_lsp -A
system:playback_1
alsa_pcm:hw:0:in1
system:playback_2
alsa_pcm:hw:0:in2
ttymidi:MIDI_in
ttymidi:MIDI_out
Zyncoder:output
Zyncoder:input
.. _jack_lsp: http://manpages.ubuntu.com/manpages/xenial/man1/jack_lsp.1.html
If you prefer, you can use a unique SystemEffect if `alias` the ports::
localhost@localdomain:~ jack_alias system:midi_capture1 ttymidi:MIDI_in
localhost@localdomain:~ jack_alias system:midi_playback1 ttymidi:MIDI_out
>>> sys_effect = SystemEffect(
... 'system',
... inputs=['playback_1', 'playback_2'],
... midi_outputs=['midi_capture1'],
... midi_inputs=['midi_playback1']
... )
>>> pedalboard = Pedalboard('MDA-EP')
>>> ep = builder.build('http://moddevices.com/plugins/mda/EPiano')
>>> pedalboard.connect(ep.outputs[0], sys_effect.inputs[0])
>>> pedalboard.connect(ep.outputs[1], sys_effect.inputs[1])
>>> pedalboard.connect(sys_effect.midi_outputs[0], ep.midi_inputs[0])
:param string representation: Audio card representation. Usually 'system'
:param tuple(string) outputs: Tuple of outputs representation. Usually a output representation
starts with ``capture_``
:param tuple(string) inputs: Tuple of inputs representation. Usually a input representation
starts with ``playback_``
:param tuple(string) midi_outputs: Tuple of midi outputs representation.
:param tuple(string) midi_inputs: Tuple of midi inputs representation.
"""
def __init__(self, representation, outputs=None, inputs=None, midi_outputs=None, midi_inputs=None):
super(SystemEffect, self).__init__()
self.representation = representation
self._params = tuple()
inputs = inputs if inputs is not None else []
inputs = [SystemInput(self, effect_input) for effect_input in inputs]
self._inputs = DictTuple(inputs, lambda _input: str(_input))
outputs = outputs if outputs is not None else []
outputs = [SystemOutput(self, effect_output) for effect_output in outputs]
self._outputs = DictTuple(outputs, lambda _output: str(_output))
midi_inputs = midi_inputs if midi_inputs is not None else []
midi_inputs = [SystemMidiInput(self, effect_input) for effect_input in midi_inputs]
self._midi_inputs = DictTuple(midi_inputs, lambda _input: str(_input))
midi_outputs = midi_outputs if midi_outputs is not None else []
midi_outputs = [SystemMidiOutput(self, effect_output) for effect_output in midi_outputs]
self._midi_outputs = DictTuple(midi_outputs, lambda _output: str(_output))
def __str__(self):
return self.representation
@property
def __dict__(self):
return {
'technology': 'system',
}
@property
def is_possible_connect_itself(self):
"""
return bool: Is possible connect the with it self?
"""
return True
@property
def is_unique_for_all_pedalboards(self):
"""
return bool: Is unique for all pedalboards?
Example: :class:`.SystemEffect` is unique for all pedalboards
"""
return True
@property
def use_real_identifier(self):
"""
return bool: For this audio plugin, is necessary use the real effect identifier?
"""
return True
| 41.551515
| 103
| 0.695741
|
17f81bc5a26cc05a274106a86bf22043abde1f97
| 329
|
py
|
Python
|
rastervision/v2/learner/learner_pipeline.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 1
|
2019-11-07T10:02:23.000Z
|
2019-11-07T10:02:23.000Z
|
rastervision/v2/learner/learner_pipeline.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
rastervision/v2/learner/learner_pipeline.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
from rastervision.v2.core.pipeline import Pipeline
LEARNER_PIPELINE = 'learner_pipeline'
class LearnerPipeline(Pipeline):
commands = ['train']
gpu_commands = ['train']
def train(self):
learner_cfg = self.config.learner
learner = learner_cfg.build(learner_cfg, self.tmp_dir)
learner.main()
| 23.5
| 62
| 0.699088
|
b65b70d840a6989ac50381689635aa979903c55f
| 163
|
py
|
Python
|
artext/__init__.py
|
nlpcl-lab/artext
|
e216fae54932d4f0dc47b251bc67416d687dbcf3
|
[
"MIT"
] | null | null | null |
artext/__init__.py
|
nlpcl-lab/artext
|
e216fae54932d4f0dc47b251bc67416d687dbcf3
|
[
"MIT"
] | 1
|
2019-11-23T05:36:14.000Z
|
2019-12-06T11:16:06.000Z
|
artext/__init__.py
|
nlpcl-lab/artext
|
e216fae54932d4f0dc47b251bc67416d687dbcf3
|
[
"MIT"
] | null | null | null |
from artext import config
from artext import utils
from artext.artext import Artext
version_info = (0, 2, 5)
__version__ = '.'.join(str(c) for c in version_info)
| 23.285714
| 52
| 0.754601
|
68c348959272043b3442a15128f4448b580f83ad
| 52,243
|
py
|
Python
|
src/skmultiflow/metrics/measure_collection.py
|
mertozer94/scikit-multiflow
|
a6e719cad900805a85d17143c05a3da9dd4987e8
|
[
"BSD-3-Clause"
] | null | null | null |
src/skmultiflow/metrics/measure_collection.py
|
mertozer94/scikit-multiflow
|
a6e719cad900805a85d17143c05a3da9dd4987e8
|
[
"BSD-3-Clause"
] | null | null | null |
src/skmultiflow/metrics/measure_collection.py
|
mertozer94/scikit-multiflow
|
a6e719cad900805a85d17143c05a3da9dd4987e8
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from skmultiflow.utils.data_structures import FastBuffer, FastComplexBuffer, ConfusionMatrix, MOLConfusionMatrix
from skmultiflow.utils import check_weights
from timeit import default_timer as timer
class ClassificationMeasurements(object):
""" Class used to keep updated statistics about a classifier, in order
to be able to provide, at any given moment, any relevant metric about
that classifier.
It combines a ConfusionMatrix object, with some additional statistics,
to compute a range of performance metrics.
In order to keep statistics updated, the class won't require lots of
information, but two: the predictions and true labels.
At any given moment, it can compute the following statistics: accuracy,
kappa, kappa_t, kappa_m, majority_class and error rate.
Parameters
----------
targets: list
A list containing the possible labels.
dtype: data type (Default: numpy.int64)
The data type of the existing labels.
Examples
--------
"""
def __init__(self, targets=None, dtype=np.int64):
super().__init__()
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 2
self.confusion_matrix = ConfusionMatrix(self.n_targets, dtype)
self.last_true_label = None
self.last_prediction = None
self.last_sample = None
self.sample_count = 0
self.majority_classifier = 0
self.correct_no_change = 0
self.targets = targets
def reset(self):
if self.targets is not None:
self.n_targets = len(self.targets)
else:
self.n_targets = 2
self.last_true_label = None
self.last_prediction = None
self.last_sample = None
self.sample_count = 0
self.majority_classifier = 0
self.correct_no_change = 0
self.confusion_matrix.restart(self.n_targets)
def add_result(self, y_true, y_pred, weight=1.0):
""" Updates its statistics with the results of a prediction.
Parameters
----------
y_true: int
The true label.
y_pred: int
The classifier's prediction
weight: float
Sample's weight
"""
check_weights(weight)
true_y = self._get_target_index(y_true, True)
pred = self._get_target_index(y_pred, True)
self.confusion_matrix.update(true_y, pred)
self.sample_count += 1
if self.get_majority_class() == y_true:
self.majority_classifier += weight
if self.last_true_label == y_true:
self.correct_no_change += weight
self.last_true_label = y_true
self.last_prediction = y_pred
def get_last(self):
return self.last_true_label, self.last_prediction
def get_majority_class(self):
""" Computes the true majority class.
Returns
-------
int
The true majority class.
"""
if (self.n_targets is None) or (self.n_targets == 0):
return False
majority_class = 0
max_prob = 0.0
for i in range(self.n_targets):
sum_value = 0.0
for j in range(self.n_targets):
sum_value += self.confusion_matrix.value_at(i, j)
sum_value = sum_value / self.sample_count
if sum_value > max_prob:
max_prob = sum_value
majority_class = i
return majority_class
def get_accuracy(self):
""" Computes the accuracy.
Returns
-------
float
The accuracy.
"""
sum_value = 0.0
n, _ = self.confusion_matrix.shape()
for i in range(n):
sum_value += self.confusion_matrix.value_at(i, i)
try:
return sum_value / self.sample_count
except ZeroDivisionError:
return 0.0
def get_incorrectly_classified_ratio(self):
return 1.0 - self.get_accuracy()
def _get_target_index(self, target, add_label=False):
""" Computes the index of an element in the self.targets list.
Also reshapes the ConfusionMatrix and adds new found targets
if add is True.
Parameters
----------
target: int
A class label.
add_label: bool
Either to add new found labels to the targets list or not.
Returns
-------
int
The target index in the self.targets list.
"""
if (self.targets is None) and add_label:
self.targets = []
self.targets.append(target)
self.n_targets = len(self.targets)
self.confusion_matrix.reshape(len(self.targets), len(self.targets))
elif (self.targets is None) and (not add_label):
return None
if (target not in self.targets) and add_label:
self.targets.append(target)
self.n_targets = len(self.targets)
self.confusion_matrix.reshape(len(self.targets), len(self.targets))
for i in range(len(self.targets)):
if self.targets[i] == target:
return i
return None
def get_kappa(self):
""" Computes the Cohen's kappa coefficient.
Returns
-------
float
The Cohen's kappa coefficient.
"""
p0 = self.get_accuracy()
pc = 0.0
n_rows, n_cols = self.confusion_matrix.shape()
for i in range(n_rows):
row = self.confusion_matrix.row(i)
column = self.confusion_matrix.column(i)
sum_row = np.sum(row) / self.sample_count
sum_column = np.sum(column) / self.sample_count
pc += sum_row * sum_column
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
def get_kappa_t(self):
""" Computes the Cohen's kappa T coefficient. This measures the
temporal correlation between samples.
Returns
-------
float
The Cohen's kappa T coefficient.
"""
p0 = self.get_accuracy()
if self.sample_count != 0:
pc = self.correct_no_change / self.sample_count
else:
pc = 0
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
def get_g_mean(self):
""" Compute the G-mean of the classifier.
Returns
-------
float
The G-mean
"""
tn = self.confusion_matrix.value_at(0, 0)
fp = self.confusion_matrix.value_at(0, 1)
if tn + fp == 0:
specificity = 0
else:
specificity = tn / (tn + fp)
sensitivity = self.get_recall()
return np.sqrt((sensitivity * specificity))
def get_f1_score(self):
""" Compute the F1-score of the classifier.
Returns
-------
float
The F1-score
"""
precision = self.get_precision()
recall = self.get_recall()
if recall + precision == 0:
return 0.0
else:
return 2 * (precision * recall) / (precision + recall)
def get_precision(self):
""" compute the precision of the classifier.
Returns
-------
float
The precision
"""
tp = self.confusion_matrix.value_at(1, 1)
fp = self.confusion_matrix.value_at(0, 1)
if tp + fp == 0:
return 0.0
else:
return tp / (tp + fp)
def get_recall(self):
""" Compute the recall.
Returns
-------
float
The recall.
"""
tp = self.confusion_matrix.value_at(1, 1)
fn = self.confusion_matrix.value_at(1, 0)
if tp + fn == 0:
return 0.0
else:
return tp / (tp + fn)
def get_kappa_m(self):
""" Computes the Cohen's kappa M coefficient.
Returns
-------
float
The Cohen's kappa M coefficient.
"""
p0 = self.get_accuracy()
if self.sample_count != 0:
pc = self.majority_classifier / self.sample_count
else:
pc = 0
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
@property
def _matrix(self):
return self.confusion_matrix.matrix
def get_info(self):
return '{}:'.format(type(self).__name__) + \
' - sample_count: {}'.format(self.sample_count) + \
' - accuracy: {:.6f}'.format(self.get_accuracy()) + \
' - kappa: {:.6f}'.format(self.get_kappa()) + \
' - kappa_t: {:.6f}'.format(self.get_kappa_t()) + \
' - kappa_m: {:.6f}'.format(self.get_kappa_m()) + \
' - f1-score: {:.6f}'.format(self.get_f1_score()) + \
' - precision: {:.6f}'.format(self.get_precision()) + \
' - recall: {:.6f}'.format(self.get_recall()) + \
' - G-mean: {:.6f}'.format(self.get_g_mean()) + \
' - majority_class: {}'.format(self.get_majority_class())
class WindowClassificationMeasurements(object):
""" This class will maintain a fixed sized window of the newest information
about one classifier. It can provide, as requested, any of the relevant
current metrics about the classifier, measured inside the window.
To keep track of statistics inside a window, the class will use a
ConfusionMatrix object, alongside FastBuffers, to simulate fixed sized
windows of the important classifier's attributes.
Its functionality is somewhat similar to those of the
ClassificationMeasurements class. The difference is that the statistics
kept by this class are local, or partial, while the statistics kept by
the ClassificationMeasurements class are global.
At any given moment, it can compute the following statistics: accuracy,
kappa, kappa_t, kappa_m, majority_class and error rate.
Parameters
----------
targets: list
A list containing the possible labels.
dtype: data type (Default: numpy.int64)
The data type of the existing labels.
window_size: int (Default: 200)
The width of the window. Determines how many samples the object
can see.
Examples
--------
"""
def __init__(self, targets=None, dtype=np.int64, window_size=200):
super().__init__()
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 2
self.confusion_matrix = ConfusionMatrix(self.n_targets, dtype)
self.last_class = None
self.targets = targets
self.window_size = window_size
self.true_labels = FastBuffer(window_size)
self.predictions = FastBuffer(window_size)
self.temp = 0
self.last_prediction = None
self.last_true_label = None
self.last_sample = None
self.majority_classifier = 0
self.correct_no_change = 0
self.majority_classifier_correction = FastBuffer(window_size)
self.correct_no_change_correction = FastBuffer(window_size)
def reset(self):
if self.targets is not None:
self.n_targets = len(self.targets)
else:
self.n_targets = 2
self.true_labels = FastBuffer(self.window_size)
self.predictions = FastBuffer(self.window_size)
self.temp = 0
self.last_prediction = None
self.last_true_label = None
self.last_sample = None
self.majority_classifier = 0
self.correct_no_change = 0
self.confusion_matrix.restart(self.n_targets)
self.majority_classifier_correction = FastBuffer(self.window_size)
self.correct_no_change_correction = FastBuffer(self.window_size)
def add_result(self, y_true, y_pred, weight=1.0):
""" Updates its statistics with the results of a prediction.
If needed it will remove samples from the observation window.
Parameters
----------
y_true: int
The true label.
y_pred: int
The classifier's prediction
weight: float
Sample's weight
"""
check_weights(weight)
true_y = self._get_target_index(y_true, True)
pred = self._get_target_index(y_pred, True)
old_true = self.true_labels.add_element(np.array([y_true]))
old_predict = self.predictions.add_element(np.array([y_pred]))
# Verify if it's needed to decrease the count of any label
# pair in the confusion matrix
if (old_true is not None) and (old_predict is not None):
self.temp += 1
self.confusion_matrix.remove(self._get_target_index(old_true[0]),
self._get_target_index(old_predict[0]))
self.correct_no_change += self.correct_no_change_correction.peek()
self.majority_classifier += self.majority_classifier_correction.peek()
# Verify if it's needed to decrease the majority_classifier count
if (self.get_majority_class() == y_true) and (self.get_majority_class() is not None):
self.majority_classifier += weight
self.majority_classifier_correction.add_element([-1])
else:
self.majority_classifier_correction.add_element([0])
# Verify if it's needed to decrease the correct_no_change
if (self.last_true_label == y_true) and (self.last_true_label is not None):
self.correct_no_change += weight
self.correct_no_change_correction.add_element([-1])
else:
self.correct_no_change_correction.add_element([0])
self.confusion_matrix.update(true_y, pred, weight=weight)
self.last_true_label = y_true
self.last_prediction = y_pred
def get_last(self):
return self.last_true_label, self.last_prediction
def get_majority_class(self):
""" Computes the window/current true majority class.
Returns
-------
int
The true window/current majority class.
"""
if (self.n_targets is None) or (self.n_targets == 0):
return None
majority_class = 0
max_prob = 0.0
for i in range(self.n_targets):
sum_value = 0.0
for j in range(self.n_targets):
sum_value += self.confusion_matrix.value_at(i, j)
sum_value = sum_value / self.true_labels.get_current_size()
if sum_value > max_prob:
max_prob = sum_value
majority_class = i
return majority_class
def get_accuracy(self):
""" Computes the window/current accuracy.
Returns
-------
float
The window/current accuracy.
"""
sum_value = 0.0
n, _ = self.confusion_matrix.shape()
for i in range(n):
sum_value += self.confusion_matrix.value_at(i, i)
try:
return sum_value / self.true_labels.get_current_size()
except ZeroDivisionError:
return 0.0
def get_incorrectly_classified_ratio(self):
return 1.0 - self.get_accuracy()
def _get_target_index(self, target, add=False):
""" Computes the index of an element in the self.targets list.
Also reshapes the ConfusionMatrix and adds new found targets
if add is True.
Parameters
----------
target: int
A class label.
add: bool
Either to add new found labels to the targets list or not.
Returns
-------
int
The target index in the self.targets list.
"""
if (self.targets is None) and add:
self.targets = []
self.targets.append(target)
self.n_targets = len(self.targets)
self.confusion_matrix.reshape(len(self.targets), len(self.targets))
elif (self.targets is None) and (not add):
return None
if target not in self.targets and add:
self.targets.append(target)
self.n_targets = len(self.targets)
self.confusion_matrix.reshape(len(self.targets), len(self.targets))
for i in range(len(self.targets)):
if self.targets[i] == target:
return i
return None
def get_kappa(self):
""" Computes the window/current Cohen's kappa coefficient.
Returns
-------
float
The window/current Cohen's kappa coefficient.
"""
p0 = self.get_accuracy()
pc = 0.0
n_rows, n_cols = self.confusion_matrix.shape()
for i in range(n_rows):
row = self.confusion_matrix.row(i)
column = self.confusion_matrix.column(i)
sum_row = np.sum(row) / self.true_labels.get_current_size()
sum_column = np.sum(column) / self.true_labels.get_current_size()
pc += sum_row * sum_column
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
def get_kappa_t(self):
""" Computes the window/current Cohen's kappa T coefficient. This measures
the temporal correlation between samples.
Returns
-------
float
The window/current Cohen's kappa T coefficient.
"""
p0 = self.get_accuracy()
if self.sample_count != 0:
pc = self.correct_no_change / self.sample_count
else:
pc = 0
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
def get_kappa_m(self):
""" Computes the window/current Cohen's kappa M coefficient.
Returns
-------
float
The window/current Cohen's kappa M coefficient.
"""
p0 = self.get_accuracy()
if self.sample_count != 0:
pc = self.majority_classifier / self.sample_count
else:
pc = 0
if pc == 1:
return 1
return (p0 - pc) / (1.0 - pc)
def get_g_mean(self):
""" Compute the G-mean of the classifier.
Returns
-------
float
The G-mean
"""
tn = self.confusion_matrix.value_at(0, 0)
fp = self.confusion_matrix.value_at(0, 1)
if tn + fp == 0:
specificity = 0
else:
specificity = tn / (tn + fp)
sensitivity = self.get_recall()
return np.sqrt((sensitivity * specificity))
def get_f1_score(self):
""" Compute the F1-score of the classifier.
Returns
-------
float
The F1-score
"""
precision = self.get_precision()
recall = self.get_recall()
if recall + precision == 0:
return 0.0
else:
return 2 * (precision * recall) / (precision + recall)
def get_precision(self):
""" compute the precision of the classifier.
Returns
-------
float
The precision
"""
tp = self.confusion_matrix.value_at(1, 1)
fp = self.confusion_matrix.value_at(0, 1)
if tp + fp == 0:
return 0.0
else:
return tp / (tp + fp)
def get_recall(self):
""" Compute the recall.
Returns
-------
float
The recall.
"""
tp = self.confusion_matrix.value_at(1, 1)
fn = self.confusion_matrix.value_at(1, 0)
if tp + fn == 0:
return 0.0
else:
return tp / (tp + fn)
@property
def _matrix(self):
return self.confusion_matrix.matrix
@property
def sample_count(self):
return self.true_labels.get_current_size()
def get_info(self):
return '{}:'.format(type(self).__name__) + \
' - sample_count: {}'.format(self.sample_count) + \
' - window_size: {}'.format(self.window_size) + \
' - accuracy: {:.6f}'.format(self.get_accuracy()) + \
' - kappa: {:.6f}'.format(self.get_kappa()) + \
' - kappa_t: {:.6f}'.format(self.get_kappa_t()) + \
' - kappa_m: {:.6f}'.format(self.get_kappa_m()) + \
' - f1-score: {:.6f}'.format(self.get_f1_score()) + \
' - precision: {:.6f}'.format(self.get_precision()) + \
' - recall: {:.6f}'.format(self.get_recall()) + \
' - G-mean: {:.6f}'.format(self.get_g_mean()) + \
' - majority_class: {}'.format(self.get_majority_class())
class MultiTargetClassificationMeasurements(object):
""" This class will keep updated statistics about a multi output classifier,
using a confusion matrix adapted to multi output problems, the
MOLConfusionMatrix, alongside other relevant attributes.
The performance metrics for multi output tasks are different from those used
for normal classification tasks. Thus, the statistics provided by this class
are different from those provided by the ClassificationMeasurements and from
the WindowClassificationMeasurements.
At any given moment, it can compute the following statistics: hamming_loss,
hamming_score, exact_match and j_index.
Parameters
----------
targets: list
A list containing the possible labels.
dtype: data type (Default: numpy.int64)
The data type of the existing labels.
Examples
--------
"""
def __init__(self, targets=None, dtype=np.int64):
super().__init__()
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 0
self.confusion_matrix = MOLConfusionMatrix(self.n_targets, dtype)
self.last_true_label = None
self.last_prediction = None
self.sample_count = 0
self.targets = targets
self.exact_match_count = 0
self.j_sum = 0
def reset(self):
if self.targets is not None:
self.n_targets = len(self.targets)
else:
self.n_targets = 0
self.confusion_matrix.restart(self.n_targets)
self.last_true_label = None
self.last_prediction = None
self.sample_count = 0
self.exact_match_count = 0
self.j_sum = 0
def add_result(self, y_true, y_pred):
""" Updates its statistics with the results of a prediction.
Adds the result to the MOLConfusionMatrix and update exact_matches and
j-index sum counts.
Parameters
----------
y_true: list or numpy.ndarray
The true label.
y_pred: list or numpy.ndarray
The classifier's prediction
"""
self.last_true_label = y_true
self.last_prediction = y_pred
m = 0
if isinstance(y_true, np.ndarray):
m = y_true.size
elif isinstance(y_true, list):
m = len(y_true)
self.n_targets = m
equal = True
for i in range(m):
self.confusion_matrix.update(i, y_true[i], y_pred[i])
# update exact_match count
if y_true[i] != y_pred[i]:
equal = False
# update exact_match
if equal:
self.exact_match_count += 1
# update j_index count
inter = sum((y_true * y_pred) > 0) * 1.
union = sum((y_true + y_pred) > 0) * 1.
if union > 0:
self.j_sum += inter / union
elif np.sum(y_true) == 0:
self.j_sum += 1
self.sample_count += 1
def get_last(self):
return self.last_true_label, self.last_prediction
def get_hamming_loss(self):
""" Computes the Hamming loss, which is the complement of the
Hamming score metric.
Returns
-------
float
The hamming loss.
"""
return 1.0 - self.get_hamming_score()
def get_hamming_score(self):
""" Computes the Hamming score, defined as the number of correctly
classified labels divided by the total number of labels classified.
Returns
-------
float
The Hamming score.
"""
try:
return self.confusion_matrix.get_sum_main_diagonal() / (self.sample_count * self.n_targets)
except ZeroDivisionError:
return 0.0
def get_exact_match(self):
""" Computes the exact match metric.
This is the most strict multi output metric, defined as the number of
samples that have all their labels correctly classified, divided by the
total number of samples.
Returns
-------
float
The exact match metric.
"""
return self.exact_match_count / self.sample_count
def get_j_index(self):
""" Computes the Jaccard index, also known as the intersection over union
metric. It is calculated by dividing the number of correctly classified
labels by the union of predicted and true labels.
Returns
-------
float
The Jaccard index.
"""
return self.j_sum / self.sample_count
def get_total_sum(self):
return self.confusion_matrix.get_total_sum()
@property
def _matrix(self):
return self.confusion_matrix.matrix
def get_info(self):
return '{}:'.format(type(self).__name__) + \
' - sample_count: {}'.format(self.sample_count) + \
' - hamming_loss: {:.6f}'.format(self.get_hamming_loss()) + \
' - hamming_score: {:.6f}'.format(self.get_hamming_score()) + \
' - exact_match: {:.6f}'.format(self.get_exact_match()) + \
' - j_index: {:.6f}'.format(self.get_j_index())
class WindowMultiTargetClassificationMeasurements(object):
""" This class will maintain a fixed sized window of the newest information
about one classifier. It can provide, as requested, any of the relevant
current metrics about the classifier, measured inside the window.
This class will keep updated statistics about a multi output classifier,
using a confusion matrix adapted to multi output problems, the
MOLConfusionMatrix, alongside other of the classifier's relevant
attributes stored in ComplexFastBuffer objects, which will simulate
fixed sized windows.
Its functionality is somewhat similar to those of the
MultiTargetClassificationMeasurements class. The difference is that the statistics
kept by this class are local, or partial, while the statistics kept by
the MultiTargetClassificationMeasurements class are global.
At any given moment, it can compute the following statistics: hamming_loss,
hamming_score, exact_match and j_index.
Parameters
----------
targets: list
A list containing the possible labels.
dtype: data type (Default: numpy.int64)
The data type of the existing labels.
window_size: int (Default: 200)
The width of the window. Determines how many samples the object
can see.
Examples
--------
"""
def __init__(self, targets=None, dtype=np.int64, window_size=200):
super().__init__()
if targets is not None:
self.n_targets = len(targets)
else:
self.n_targets = 0
self.confusion_matrix = MOLConfusionMatrix(self.n_targets, dtype)
self.last_true_label = None
self.last_prediction = None
self.targets = targets
self.window_size = window_size
self.exact_match_count = 0
self.j_sum = 0
self.true_labels = FastComplexBuffer(window_size, self.n_targets)
self.predictions = FastComplexBuffer(window_size, self.n_targets)
def reset(self):
if self.targets is not None:
self.n_targets = len(self.targets)
else:
self.n_targets = 0
self.confusion_matrix.restart(self.n_targets)
self.last_true_label = None
self.last_prediction = None
self.exact_match_count = 0
self.j_sum = 0
self.true_labels = FastComplexBuffer(self.window_size, self.n_targets)
self.predictions = FastComplexBuffer(self.window_size, self.n_targets)
def add_result(self, y_true, y_pred):
""" Updates its statistics with the results of a prediction.
Adds the result to the MOLConfusionMatrix, and updates the
ComplexFastBuffer objects.
Parameters
----------
y_true: list or numpy.ndarray
The true label.
y_pred: list or numpy.ndarray
The classifier's prediction
"""
self.last_true_label = y_true
self.last_prediction = y_pred
m = 0
if hasattr(y_true, 'size'):
m = y_true.size
elif hasattr(y_true, 'append'):
m = len(y_true)
self.n_targets = m
for i in range(m):
self.confusion_matrix.update(i, y_true[i], y_pred[i])
old_true = self.true_labels.add_element(y_true)
old_predict = self.predictions.add_element(y_pred)
if (old_true is not None) and (old_predict is not None):
for i in range(m):
self.confusion_matrix.remove(old_true[0][i], old_predict[0][i])
def get_last(self):
return self.last_true_label, self.last_prediction
def get_hamming_loss(self):
""" Computes the window/current Hamming loss, which is the
complement of the Hamming score metric.
Returns
-------
float
The window/current hamming loss.
"""
return 1.0 - self.get_hamming_score()
def get_hamming_score(self):
""" Computes the window/current Hamming score, defined as the number of
correctly classified labels divided by the total number of labels
classified.
Returns
-------
float
The window/current hamming score.
"""
return hamming_score(self.true_labels.get_queue(), self.predictions.get_queue())
def get_exact_match(self):
""" Computes the window/current exact match metric.
This is the most strict multi output metric, defined as the number of
samples that have all their labels correctly classified, divided by the
total number of samples.
Returns
-------
float
The window/current exact match metric.
"""
return exact_match(self.true_labels.get_queue(), self.predictions.get_queue())
def get_j_index(self):
""" Computes the window/current Jaccard index, also known as the intersection
over union metric. It is calculated by dividing the number of correctly
classified labels by the union of predicted and true labels.
Returns
-------
float
The window/current Jaccard index.
"""
return j_index(self.true_labels.get_queue(), self.predictions.get_queue())
def get_total_sum(self):
return self.confusion_matrix.get_total_sum()
@property
def matrix(self):
return self.confusion_matrix.matrix
@property
def sample_count(self):
return self.true_labels.get_current_size()
def get_info(self):
return '{}:'.format(type(self).__name__) + \
' - sample_count: {}'.format(self.sample_count) + \
' - hamming_loss: {:.6f}'.format(self.get_hamming_loss()) + \
' - hamming_score: {:.6f}'.format(self.get_hamming_score()) + \
' - exact_match: {:.6f}'.format(self.get_exact_match()) + \
' - j_index: {:.6f}'.format(self.get_j_index())
class RegressionMeasurements(object):
""" This class is used to keep updated statistics over a regression
learner in a regression problem context.
It will keep track of global metrics, that can be provided at
any moment. The relevant metrics kept by an instance of this class
are: MSE (mean square error) and MAE (mean absolute error).
"""
def __init__(self):
super().__init__()
self.total_square_error = 0.0
self.average_error = 0.0
self.sample_count = 0
self.last_true_label = None
self.last_prediction = None
def reset(self):
self.total_square_error = 0.0
self.average_error = 0.0
self.sample_count = 0
self.last_true_label = None
self.last_prediction = None
def add_result(self, y_true, y_pred):
""" Use the true value and the prediction to update the statistics.
Parameters
----------
y_true: float
The true value.
y_pred: float
The predicted value.
"""
self.last_true_label = y_true
self.last_prediction = y_pred
self.total_square_error += (y_true - y_pred) * (y_true - y_pred)
self.average_error += np.absolute(y_true - y_pred)
self.sample_count += 1
def get_mean_square_error(self):
""" Computes the mean square error.
Returns
-------
float
The mean square error.
"""
if self.sample_count == 0:
return 0.0
else:
return self.total_square_error / self.sample_count
def get_average_error(self):
""" Computes the average error.
Returns
-------
float
The average error.
"""
if self.sample_count == 0:
return 0.0
else:
return self.average_error / self.sample_count
def get_last(self):
return self.last_true_label, self.last_prediction
def get_info(self):
return '{}:'.format(type(self).__name__) + \
' - sample_count: {}'.format(self.sample_count) + \
' - mean_square_error: {:.6f}'.format(self.get_mean_square_error()) + \
' - mean_absolute_error: {:.6f}'.format(self.get_average_error())
class WindowRegressionMeasurements(object):
""" This class is used to keep updated statistics over a regression
learner in a regression problem context inside a fixed sized window.
It uses FastBuffer objects to simulate the fixed sized windows.
It will keep track of partial metrics, that can be provided at
any moment. The relevant metrics kept by an instance of this class
are: MSE (mean square error) and MAE (mean absolute error).
"""
def __init__(self, window_size=200):
super().__init__()
self.total_square_error = 0.0
self.average_error = 0.0
self.last_true_label = None
self.last_prediction = None
self.total_square_error_correction = FastBuffer(window_size)
self.average_error_correction = FastBuffer(window_size)
self.window_size = window_size
def reset(self):
self.total_square_error = 0.0
self.average_error = 0.0
self.last_true_label = None
self.last_prediction = None
self.total_square_error_correction = FastBuffer(self.window_size)
self.average_error_correction = FastBuffer(self.window_size)
def add_result(self, y_true, y_pred):
""" Use the true value and the prediction to update the statistics.
Parameters
----------
y_true: float
The true value.
y_pred: float
The predicted value.
"""
self.last_true_label = y_true
self.last_prediction = y_pred
self.total_square_error += (y_true - y_pred) * (y_true - y_pred)
self.average_error += np.absolute(y_true - y_pred)
old_square = self.total_square_error_correction.add_element(
np.array([-1 * ((y_true - y_pred) * (y_true - y_pred))]))
old_average = self.average_error_correction.add_element(np.array([-1 * (np.absolute(y_true - y_pred))]))
if (old_square is not None) and (old_average is not None):
self.total_square_error += old_square[0]
self.average_error += old_average[0]
def get_mean_square_error(self):
""" Computes the window/current mean square error.
Returns
-------
float
The window/current mean square error.
"""
if self.sample_count == 0:
return 0.0
else:
return self.total_square_error / self.sample_count
def get_average_error(self):
""" Computes the window/current average error.
Returns
-------
float
The window/current average error.
"""
if self.sample_count == 0:
return 0.0
else:
return self.average_error / self.sample_count
def get_last(self):
return self.last_true_label, self.last_prediction
@property
def sample_count(self):
return self.total_square_error_correction.get_current_size()
def get_info(self):
return '{}:'.format(type(self).__name__) + \
' - sample_count: {}'.format(self.sample_count) + \
' - mean_square_error: {:.6f}'.format(self.get_mean_square_error()) + \
' - mean_absolute_error: {:.6f}'.format(self.get_average_error())
class MultiTargetRegressionMeasurements(object):
""" This class is used to keep updated statistics over a multi-target regression
learner in a multi-target regression problem context.
It will keep track of global metrics, that can be provided at
any moment. The relevant metrics kept by an instance of this class
are: AMSE (average mean square error), AMAE (average mean absolute error),
and ARMSE (average root mean square error).
"""
def __init__(self):
super().__init__()
self.n_targets = 0
self.total_square_error = 0.0
self.average_error = 0.0
self.sample_count = 0
self.last_true_label = None
self.last_prediction = None
def reset(self):
self.n_targets = 0
self.total_square_error = 0.0
self.average_error = 0.0
self.sample_count = 0
self.last_true_label = None
self.last_prediction = None
def add_result(self, y, prediction):
""" Use the true value and the prediction to update the statistics.
Parameters
----------
y: float or list or np.ndarray
The true value(s).
prediction: float or list or np.ndarray
The predicted value(s).
prediction: float or list or np.ndarray
The predicted value(s).
"""
self.last_true_label = y
self.last_prediction = prediction
m = 0
if hasattr(y, 'size'):
m = y.size
elif hasattr(y, 'append'):
m = len(y)
self.n_targets = m
self.total_square_error += (y - prediction) ** 2
self.average_error += np.absolute(y - prediction)
self.sample_count += 1
def get_average_mean_square_error(self):
""" Computes the average mean square error.
Returns
-------
float
The average mean square error.
"""
if self.sample_count == 0:
return 0.0
else:
return np.sum(self.total_square_error / self.sample_count) / self.n_targets
def get_average_absolute_error(self):
""" Computes the average mean absolute error.
Returns
-------
float
The average absolute error.
"""
if self.sample_count == 0:
return 0.0
else:
return np.sum(self.average_error / self.sample_count) \
/ self.n_targets
def get_average_root_mean_square_error(self):
""" Computes the mean square error.
Returns
-------
float
The average mean square error.
"""
if self.sample_count == 0:
return 0.0
else:
return np.sum(np.sqrt(self.total_square_error /
self.sample_count)) \
/ self.n_targets
def get_last(self):
return self.last_true_label, self.last_prediction
@property
def _sample_count(self):
return self.sample_count
def get_info(self):
return 'MultiTargetRegressionMeasurements: sample_count: ' + \
str(self._sample_count) + ' - average_mean_square_error: ' + \
str(self.get_average_mean_square_error()) + ' - average_mean_absolute_error: ' + \
str(self.get_average_absolute_error()) + ' - average_root_mean_square_error: ' + \
str(self.get_average_root_mean_square_error())
class WindowMultiTargetRegressionMeasurements(object):
""" This class is used to keep updated statistics over a multi-target regression
learner in a multi-target regression problem context inside a fixed sized
window. It uses FastBuffer objects to simulate the fixed sized windows.
It will keep track of partial metrics, that can be provided at
any moment. The relevant metrics kept by an instance of this class
are: AMSE (average mean square error), AMAE (average mean absolute error),
and ARMSE (average root mean square error).
"""
def __init__(self, window_size=200):
super().__init__()
self.n_targets = 0
self.total_square_error = 0.0
self.average_error = 0.0
self.last_true_label = None
self.last_prediction = None
self.total_square_error_correction = FastBuffer(window_size)
self.average_error_correction = FastBuffer(window_size)
self.window_size = window_size
def reset(self):
self.total_square_error = 0.0
self.average_error = 0.0
self.last_true_label = None
self.last_prediction = None
self.total_square_error_correction = FastBuffer(self.window_size)
self.average_error_correction = FastBuffer(self.window_size)
def add_result(self, y, prediction):
""" Use the true value and the prediction to update the statistics.
Parameters
----------
y: float or list or np.ndarray
The true value(s).
prediction: float or list or np.ndarray
The predicted value(s).
prediction: float or list or np.ndarray
The predicted value(s).
"""
self.last_true_label = y
self.last_prediction = prediction
m = 0
if hasattr(y, 'size'):
m = y.size
elif hasattr(y, 'append'):
m = len(y)
self.n_targets = m
self.total_square_error += (y - prediction) ** 2
self.average_error += np.absolute(y - prediction)
old_square = self.total_square_error_correction.add_element(
np.array([-1 * ((y - prediction) ** 2)])
)
old_average = self.average_error_correction.add_element(
np.array([-1 * (np.absolute(y - prediction))])
)
if (old_square is not None) and (old_average is not None):
self.total_square_error += old_square[0]
self.average_error += old_average[0]
def get_average_mean_square_error(self):
""" Computes the window/current average mean square error.
Returns
-------
float
The window/current average mean square error.
"""
if self._sample_count == 0:
return 0.0
else:
return np.sum(self.total_square_error / self._sample_count) \
/ self.n_targets
def get_average_absolute_error(self):
""" Computes the window/current average mean absolute error.
Returns
-------
float
The window/current average mean absolute error.
"""
if self._sample_count == 0:
return 0.0
else:
return np.sum(self.average_error / self._sample_count) \
/ self.n_targets
def get_average_root_mean_square_error(self):
""" Computes the mean square error.
Returns
-------
float
The average mean square error.
"""
if self._sample_count == 0:
return 0.0
else:
return np.sum(np.sqrt(self.total_square_error /
self._sample_count)) \
/ self.n_targets
def get_last(self):
return self.last_true_label, self.last_prediction
@property
def _sample_count(self):
return self.total_square_error_correction.get_current_size()
def get_info(self):
return 'MultiTargetRegressionMeasurements: sample_count: ' + \
str(self._sample_count) + ' - average_mean_square_error: ' + \
str(self.get_average_mean_square_error()) + ' - average_mean_absolute_error: ' + \
str(self.get_average_absolute_error()) + ' - average_root_mean_square_error: ' + \
str(self.get_average_root_mean_square_error())
class RunningTimeMeasurements(object):
""" Class used to compute the running time for each evaluated prediction
model.
The training, prediction, and total time are considered separately. The
class accounts for the amount of time each model effectively spent on
training and testing. To do so, timers for each of the actions are
considered.
Besides the properties getters, the available compute time methods
must be used as follows:
- `compute_{training, testing}_time_begin`
- Perform training/action
- `compute_{training, testing}_time_end`
Additionally, the `update_time_measurements` method updates the total
running time accounting, as well as, the total seen samples count.
"""
def __init__(self):
super().__init__()
self._training_start = None
self._testing_start = None
self._training_time = 0
self._testing_time = 0
self._sample_count = 0
self._total_time = 0
def reset(self):
self._training_time = 0
self._testing_time = 0
self._sample_count = 0
self._total_time = 0
def compute_training_time_begin(self):
""" Initiates the training time accounting.
"""
self._training_start = timer()
def compute_training_time_end(self):
""" Finishes the training time accounting. Updates current total
training time.
"""
self._training_time += timer() - self._training_start
def compute_testing_time_begin(self):
""" Initiates the testing time accounting.
"""
self._testing_start = timer()
def compute_testing_time_end(self):
""" Finishes the testing time accounting. Updates current total
testing time.
"""
self._testing_time += timer() - self._testing_start
def update_time_measurements(self, increment=1):
""" Updates the current total running time. Updates the number of seen
samples by `increment`.
"""
if increment > 0:
self._sample_count += increment
else:
self._sample_count += 1
self._total_time = self._training_time + self._testing_time
def get_current_training_time(self):
return self._training_time
def get_current_testing_time(self):
return self._testing_time
def get_current_total_running_time(self):
return self._total_time
def get_info(self):
return 'RunningTimeMeasurements: sample_count: ' + \
str(self._sample_count) + ' - Total running time: ' + \
str(self.get_current_total_running_time()) + \
' - training_time: ' + \
str(self.get_current_training_time()) + \
' - testing_time: ' + \
str(self.get_current_testing_time())
def hamming_score(true_labels, predicts):
""" Computes de hamming score, which is known as the label-based accuracy,
designed for multi-label problems. It's defined as the number of correctly
predicted labels divided by all classified labels.
Parameters
----------
true_labels: numpy.ndarray of shape (n_samples, n_target_tasks)
A matrix with the true labels for all the classification tasks and for
n_samples.
predicts: numpy.ndarray of shape (n_samples, n_target_tasks)
A matrix with the predictions for all the classification tasks and for
n_samples.
Returns
-------
float
The hamming score, or label-based accuracy, for the given sets.
Examples
--------
>>> from skmultiflow.metrics import hamming_score
>>> true_labels = [[0,1,0,1],[0,0,0,1],[1,1,0,1],[1,1,1,1]]
>>> predictions = [[0,1,0,1],[0,1,1,0],[0,1,0,1],[1,1,1,1]]
>>> hamming_score(true_labels, predictions)
0.75
"""
if not hasattr(true_labels, 'shape'):
true_labels = np.asarray(true_labels)
if not hasattr(predicts, 'shape'):
predicts = np.asarray(predicts)
N, L = true_labels.shape
return np.sum((true_labels == predicts) * 1.) / N / L
def j_index(true_labels, predicts):
""" Computes the Jaccard Index of the given set, which is also called the
'intersection over union' in multi-label settings. It's defined as the
intersection between the true label's set and the prediction's set,
divided by the sum, or union, of those two sets.
Parameters
----------
true_labels: numpy.ndarray of shape (n_samples, n_target_tasks)
A matrix with the true labels for all the classification tasks and for
n_samples.
predicts: numpy.ndarray of shape (n_samples, n_target_tasks)
A matrix with the predictions for all the classification tasks and for
n_samples.
Returns
-------
float
The J-index, or 'intersection over union', for the given sets.
Examples
--------
>>> from skmultiflow.metrics import j_index
>>> true_labels = [[0,1,0,1],[0,0,0,1],[1,1,0,1],[1,1,1,1]]
>>> predictions = [[0,1,0,1],[0,1,1,0],[0,1,0,1],[1,1,1,1]]
>>> j_index(true_labels, predictions)
0.66666666666666663
"""
if not hasattr(true_labels, 'shape'):
true_labels = np.asarray(true_labels)
if not hasattr(predicts, 'shape'):
predicts = np.asarray(predicts)
N, L = true_labels.shape
s = 0.0
for i in range(N):
inter = sum((true_labels[i, :] * predicts[i, :]) > 0) * 1.
union = sum((true_labels[i, :] + predicts[i, :]) > 0) * 1.
if union > 0:
s += inter / union
elif np.sum(true_labels[i, :]) == 0:
s += 1.
return s * 1. / N
def exact_match(true_labels, predicts):
""" This is the most strict metric for the multi label setting. It's defined
as the percentage of samples that have all their labels correctly classified.
Parameters
----------
true_labels: numpy.ndarray of shape (n_samples, n_target_tasks)
A matrix with the true labels for all the classification tasks and for
n_samples.
predicts: numpy.ndarray of shape (n_samples, n_target_tasks)
A matrix with the predictions for all the classification tasks and for
n_samples.
Returns
-------
float
The exact match percentage between the given sets.
Examples
--------
>>> from skmultiflow.metrics import exact_match
>>> true_labels = [[0,1,0,1],[0,0,0,1],[1,1,0,1],[1,1,1,1]]
>>> predictions = [[0,1,0,1],[0,1,1,0],[0,1,0,1],[1,1,1,1]]
>>> exact_match(true_labels, predictions)
0.5
"""
if not hasattr(true_labels, 'shape'):
true_labels = np.asarray(true_labels)
if not hasattr(predicts, 'shape'):
predicts = np.asarray(predicts)
N, L = true_labels.shape
return np.sum(np.sum((true_labels == predicts) * 1, axis=1) == L) * 1. / N
| 31.720097
| 112
| 0.59763
|
5a6b89aee2cdfa442c021d4973c5e1b801e88a2f
| 308
|
py
|
Python
|
tests/runner.py
|
BinyaminSharet/Mtp
|
2683f521123edd1a40e73204eceaa5cb2d5e78aa
|
[
"MIT"
] | 8
|
2016-07-24T12:02:17.000Z
|
2021-08-18T06:14:54.000Z
|
tests/runner.py
|
BinyaminSharet/Mtp
|
2683f521123edd1a40e73204eceaa5cb2d5e78aa
|
[
"MIT"
] | 1
|
2017-04-18T23:00:40.000Z
|
2017-04-18T23:03:55.000Z
|
tests/runner.py
|
BinyaminSharet/Mtp
|
2683f521123edd1a40e73204eceaa5cb2d5e78aa
|
[
"MIT"
] | 5
|
2016-06-22T11:44:39.000Z
|
2019-08-21T13:17:26.000Z
|
#!/usr/bin/env python
import os
import unittest
from mtp_device_tests import *
from mtp_api_tests import *
from mtp_msg_tests import *
from mtp_property_tests import *
from mtp_object_tests import *
if __name__ == '__main__':
if not os.path.exists('logs'):
os.mkdir('logs')
unittest.main()
| 20.533333
| 34
| 0.730519
|
d9cb1564c24b5070bd04cb6dcae06149381f8ea3
| 191
|
py
|
Python
|
test/validate.py
|
mm333444/ProxyTool
|
aad1a2d900646b68b3dd2097c8abe7357a09ec6c
|
[
"BSD-2-Clause"
] | 6
|
2017-09-19T04:32:00.000Z
|
2018-01-25T08:18:55.000Z
|
test/validate.py
|
mm333444/ProxyTool
|
aad1a2d900646b68b3dd2097c8abe7357a09ec6c
|
[
"BSD-2-Clause"
] | 2
|
2021-03-22T17:12:39.000Z
|
2021-06-01T21:42:27.000Z
|
test/validate.py
|
aox-lei/ProxyTool
|
aad1a2d900646b68b3dd2097c8abe7357a09ec6c
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding:utf-8 -*-
import os
import sys
sys.path.append(os.path.abspath(os.getcwd()))
from app.validate.request_web import request_web
request_web().run('http', '222.73.68.144', 8090)
| 19.1
| 48
| 0.712042
|
ece30b3a8b4e4963d61979af9763fe25def49cf7
| 109
|
py
|
Python
|
leetcode/__init__.py
|
Gongzq5/leetcode-helper
|
9a6f79db6fbefa921b852064a269e274e81a1e28
|
[
"MIT"
] | null | null | null |
leetcode/__init__.py
|
Gongzq5/leetcode-helper
|
9a6f79db6fbefa921b852064a269e274e81a1e28
|
[
"MIT"
] | null | null | null |
leetcode/__init__.py
|
Gongzq5/leetcode-helper
|
9a6f79db6fbefa921b852064a269e274e81a1e28
|
[
"MIT"
] | null | null | null |
from . import array
from . import list
from . import reporter
from . import tree
from . import simple_drawer
| 18.166667
| 27
| 0.770642
|
ea81a6feb07b37fae5b45b9ea6f965b36333c3bc
| 2,046
|
py
|
Python
|
trio/tests/common_linear.py
|
psandahl/trio
|
34d1c20e6e9bd3cd18bca1a9f2a53518452bd0ff
|
[
"MIT"
] | null | null | null |
trio/tests/common_linear.py
|
psandahl/trio
|
34d1c20e6e9bd3cd18bca1a9f2a53518452bd0ff
|
[
"MIT"
] | null | null | null |
trio/tests/common_linear.py
|
psandahl/trio
|
34d1c20e6e9bd3cd18bca1a9f2a53518452bd0ff
|
[
"MIT"
] | null | null | null |
import numpy as np
import unittest
from trio.common.camera import Camera
from trio.common.linear import triangulate, solve_dlt
from trio.common.math import euclidean, homogeneous
from trio.common.matrix import matrix_look_at, matrix_decompose_ypr
from .utils import equal_arrays
class CommonLinearTestCase(unittest.TestCase):
def test_linear_triangulate(self):
c0 = Camera(np.array([4, 3, 0]),
np.radians((-90, 0, 0)), np.radians((30, 20)))
c1 = Camera(np.array([3, 3, 0]),
np.radians((-90, 0, 0)), np.radians((30, 20)))
xyz = np.array([3.5, 0.3, 0.35])
uv0 = c0.project(xyz)
uv1 = c1.project(xyz)
self.assertTrue(equal_arrays(xyz, triangulate(
c0.projection_matrix, uv0, c1.projection_matrix, uv1)))
def test_linear_simple_dlt(self):
# Setup camera.
ypr = matrix_decompose_ypr(
matrix_look_at(np.array((10, 10, 10)),
np.array((0, 0, 0)),
np.array((0, 0, 1))))
c = Camera(np.array((10, 10, 10)), np.array(ypr), np.radians((50, 45)))
# Produce points.
xyzs = [np.array((-3.2, 1.3, 1.1)),
np.array((-1.6, -2, 0.8)),
np.array((0, 0, -1)),
np.array((1.8, -1.6, -0.1)),
np.array((1.2, 2.1, -0.6)),
np.array((3.1, -2.7, 1.5)),
np.array((3.3, 2.7, 1.8))]
points = list()
for xyz in xyzs:
px = c.project(xyz)
point = dict()
point["x"] = xyz[0]
point["y"] = xyz[1]
point["z"] = xyz[2]
point["u"] = px[0]
point["v"] = px[1]
points.append(point)
# Run dlt.
res, p = solve_dlt(points)
self.assertTrue(res)
# Compare projections.
for xyz in xyzs:
px1 = c.project(xyz)
px2 = euclidean(p @ homogeneous(xyz))
self.assertTrue(equal_arrays(px1, px2))
| 30.537313
| 79
| 0.50782
|
6f5550a6f8209cedb5f586302b6b70ca4643bb71
| 26,967
|
py
|
Python
|
pandas/tests/reshape/test_crosstab.py
|
cgangwar11/pandas
|
972f491cb7fdcc3c1c2cb9f05644128f13457f87
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-09-03T13:24:57.000Z
|
2020-09-03T13:24:57.000Z
|
pandas/tests/reshape/test_crosstab.py
|
cgangwar11/pandas
|
972f491cb7fdcc3c1c2cb9f05644128f13457f87
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/reshape/test_crosstab.py
|
cgangwar11/pandas
|
972f491cb7fdcc3c1c2cb9f05644128f13457f87
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-11-20T18:19:33.000Z
|
2020-11-20T18:19:33.000Z
|
import numpy as np
import pytest
from pandas import CategoricalIndex, DataFrame, Index, MultiIndex, Series, crosstab
import pandas._testing as tm
class TestCrosstab:
def setup_method(self, method):
df = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df["A"], df["C"])
expected = df.groupby(["A", "C"]).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df["A"], [df["B"], df["C"]])
expected = df.groupby(["A", "B", "C"]).size()
expected = expected.unstack("B").unstack("C").fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df["B"], df["C"]], df["A"])
expected = df.groupby(["B", "C", "A"]).size()
expected = expected.unstack("A").fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({"a": a, "b": b, "c": c})
result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"))
expected = crosstab(df["a"], [df["b"], df["c"]])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=["a"], rownames=("b", "c"))
expected = crosstab([df["b"], df["c"]], df["a"])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df["A"].values, self.df["C"].values)
assert result.index.name == "row_0"
assert result.columns.name == "col_0"
def test_crosstab_non_aligned(self):
# GH 17005
a = Series([0, 1, 1], index=["a", "b", "c"])
b = Series([3, 4, 3, 4, 3], index=["a", "b", "c", "d", "f"])
c = np.array([3, 4, 3])
expected = DataFrame(
[[1, 0], [1, 1]],
index=Index([0, 1], name="row_0"),
columns=Index([3, 4], name="col_0"),
)
result = crosstab(a, b)
tm.assert_frame_equal(result, expected)
result = crosstab(a, c)
tm.assert_frame_equal(result, expected)
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({"a": a, "b": b, "c": c})
result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"), margins=True)
assert result.index.names == ("a",)
assert result.columns.names == ["b", "c"]
all_cols = result["All", ""]
exp_cols = df.groupby(["a"]).size().astype("i8")
# to keep index.name
exp_margin = Series([len(df)], index=Index(["All"], name="a"))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ("All", "")
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc["All"]
exp_rows = df.groupby(["b", "c"]).size().astype("i8")
exp_rows = exp_rows.append(Series([len(df)], index=[("All", "")]))
exp_rows.name = "All"
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_margins_set_margin_name(self):
# GH 15972
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({"a": a, "b": b, "c": c})
result = crosstab(
a,
[b, c],
rownames=["a"],
colnames=("b", "c"),
margins=True,
margins_name="TOTAL",
)
assert result.index.names == ("a",)
assert result.columns.names == ["b", "c"]
all_cols = result["TOTAL", ""]
exp_cols = df.groupby(["a"]).size().astype("i8")
# to keep index.name
exp_margin = Series([len(df)], index=Index(["TOTAL"], name="a"))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ("TOTAL", "")
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc["TOTAL"]
exp_rows = df.groupby(["b", "c"]).size().astype("i8")
exp_rows = exp_rows.append(Series([len(df)], index=[("TOTAL", "")]))
exp_rows.name = "TOTAL"
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
msg = "margins_name argument must be a string"
for margins_name in [666, None, ["a", "b"]]:
with pytest.raises(ValueError, match=msg):
crosstab(
a,
[b, c],
rownames=["a"],
colnames=("b", "c"),
margins=True,
margins_name=margins_name,
)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab(
[a, b], c, values, aggfunc=np.sum, rownames=["foo", "bar"], colnames=["baz"]
)
df = DataFrame({"foo": a, "bar": b, "baz": c, "values": values})
expected = df.pivot_table(
"values", index=["foo", "bar"], columns="baz", aggfunc=np.sum
)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object)
b = np.array(["one", "one", "two", "one", "two", "two", "two"], dtype=object)
c = np.array(
["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object
)
res = crosstab(a, [b, c], rownames=["a"], colnames=["b", "c"], dropna=False)
m = MultiIndex.from_tuples(
[("one", "dull"), ("one", "shiny"), ("two", "dull"), ("two", "shiny")],
names=["b", "c"],
)
tm.assert_index_equal(res.columns, m)
def test_crosstab_no_overlap(self):
# GS 10291
s1 = Series([1, 2, 3], index=[1, 2, 3])
s2 = Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
expected = DataFrame()
tm.assert_frame_equal(actual, expected)
def test_margin_dropna(self):
# GH 12577
# pivot_table counts null into margin ('All')
# when margins=true and dropna=true
df = DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]})
actual = crosstab(df.a, df.b, margins=True, dropna=True)
expected = DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3, 4, "All"], name="b")
tm.assert_frame_equal(actual, expected)
df = DataFrame(
{"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]}
)
actual = crosstab(df.a, df.b, margins=True, dropna=True)
expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3.0, 4.0, "All"], name="b")
tm.assert_frame_equal(actual, expected)
df = DataFrame(
{"a": [1, np.nan, np.nan, np.nan, np.nan, 2], "b": [3, 3, 4, 4, 4, 4]}
)
actual = crosstab(df.a, df.b, margins=True, dropna=True)
expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3, 4, "All"], name="b")
tm.assert_frame_equal(actual, expected)
# GH 12642
# _add_margins raises KeyError: Level None not found
# when margins=True and dropna=False
df = DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]})
actual = crosstab(df.a, df.b, margins=True, dropna=False)
expected = DataFrame([[1, 0, 1], [1, 3, 4], [2, 4, 6]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3, 4, "All"], name="b")
tm.assert_frame_equal(actual, expected)
df = DataFrame(
{"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]}
)
actual = crosstab(df.a, df.b, margins=True, dropna=False)
expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 4, 6]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3.0, 4.0, "All"], name="b")
tm.assert_frame_equal(actual, expected)
a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object)
b = np.array(["one", "one", "two", "one", "two", np.nan, "two"], dtype=object)
c = np.array(
["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object
)
actual = crosstab(
a, [b, c], rownames=["a"], colnames=["b", "c"], margins=True, dropna=False
)
m = MultiIndex.from_arrays(
[
["one", "one", "two", "two", "All"],
["dull", "shiny", "dull", "shiny", ""],
],
names=["b", "c"],
)
expected = DataFrame(
[[1, 0, 1, 0, 2], [2, 0, 1, 1, 5], [3, 0, 2, 1, 7]], columns=m
)
expected.index = Index(["bar", "foo", "All"], name="a")
tm.assert_frame_equal(actual, expected)
actual = crosstab(
[a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=False
)
m = MultiIndex.from_arrays(
[["bar", "bar", "foo", "foo", "All"], ["one", "two", "one", "two", ""]],
names=["a", "b"],
)
expected = DataFrame(
[[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 2, 7]], index=m
)
expected.columns = Index(["dull", "shiny", "All"], name="c")
tm.assert_frame_equal(actual, expected)
actual = crosstab(
[a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=True
)
m = MultiIndex.from_arrays(
[["bar", "bar", "foo", "foo", "All"], ["one", "two", "one", "two", ""]],
names=["a", "b"],
)
expected = DataFrame(
[[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 1, 6]], index=m
)
expected.columns = Index(["dull", "shiny", "All"], name="c")
tm.assert_frame_equal(actual, expected)
def test_crosstab_normalize(self):
# Issue 12578
df = DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]}
)
rindex = Index([1, 2], name="a")
cindex = Index([3, 4], name="b")
full_normal = DataFrame([[0.2, 0], [0.2, 0.6]], index=rindex, columns=cindex)
row_normal = DataFrame([[1.0, 0], [0.25, 0.75]], index=rindex, columns=cindex)
col_normal = DataFrame([[0.5, 0], [0.5, 1.0]], index=rindex, columns=cindex)
# Check all normalize args
tm.assert_frame_equal(crosstab(df.a, df.b, normalize="all"), full_normal)
tm.assert_frame_equal(crosstab(df.a, df.b, normalize=True), full_normal)
tm.assert_frame_equal(crosstab(df.a, df.b, normalize="index"), row_normal)
tm.assert_frame_equal(crosstab(df.a, df.b, normalize="columns"), col_normal)
tm.assert_frame_equal(
crosstab(df.a, df.b, normalize=1),
crosstab(df.a, df.b, normalize="columns"),
)
tm.assert_frame_equal(
crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index"),
)
row_normal_margins = DataFrame(
[[1.0, 0], [0.25, 0.75], [0.4, 0.6]],
index=Index([1, 2, "All"], name="a", dtype="object"),
columns=Index([3, 4], name="b", dtype="object"),
)
col_normal_margins = DataFrame(
[[0.5, 0, 0.2], [0.5, 1.0, 0.8]],
index=Index([1, 2], name="a", dtype="object"),
columns=Index([3, 4, "All"], name="b", dtype="object"),
)
all_normal_margins = DataFrame(
[[0.2, 0, 0.2], [0.2, 0.6, 0.8], [0.4, 0.6, 1]],
index=Index([1, 2, "All"], name="a", dtype="object"),
columns=Index([3, 4, "All"], name="b", dtype="object"),
)
tm.assert_frame_equal(
crosstab(df.a, df.b, normalize="index", margins=True), row_normal_margins
)
tm.assert_frame_equal(
crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins,
)
tm.assert_frame_equal(
crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins
)
# Test arrays
crosstab(
[np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])], np.array([1, 2, 1, 2])
)
# Test with aggfunc
norm_counts = DataFrame(
[[0.25, 0, 0.25], [0.25, 0.5, 0.75], [0.5, 0.5, 1]],
index=Index([1, 2, "All"], name="a", dtype="object"),
columns=Index([3, 4, "All"], name="b"),
)
test_case = crosstab(
df.a, df.b, df.c, aggfunc="count", normalize="all", margins=True
)
tm.assert_frame_equal(test_case, norm_counts)
df = DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [0, 4, np.nan, 3, 3]}
)
norm_sum = DataFrame(
[[0, 0, 0.0], [0.4, 0.6, 1], [0.4, 0.6, 1]],
index=Index([1, 2, "All"], name="a", dtype="object"),
columns=Index([3, 4, "All"], name="b", dtype="object"),
)
test_case = crosstab(
df.a, df.b, df.c, aggfunc=np.sum, normalize="all", margins=True
)
tm.assert_frame_equal(test_case, norm_sum)
def test_crosstab_with_empties(self):
# Check handling of empties
df = DataFrame(
{
"a": [1, 2, 2, 2, 2],
"b": [3, 3, 4, 4, 4],
"c": [np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
empty = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
index=Index([1, 2], name="a", dtype="int64"),
columns=Index([3, 4], name="b"),
)
for i in [True, "index", "columns"]:
calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=i)
tm.assert_frame_equal(empty, calculated)
nans = DataFrame(
[[0.0, np.nan], [0.0, 0.0]],
index=Index([1, 2], name="a", dtype="int64"),
columns=Index([3, 4], name="b"),
)
calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=False)
tm.assert_frame_equal(nans, calculated)
def test_crosstab_errors(self):
# Issue 12578
df = DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]}
)
error = "values cannot be used without an aggfunc."
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, values=df.c)
error = "aggfunc cannot be used without values"
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, aggfunc=np.mean)
error = "Not a valid normalize argument"
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, normalize="42")
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, normalize=42)
error = "Not a valid margins argument"
with pytest.raises(ValueError, match=error):
crosstab(df.a, df.b, normalize="all", margins=42)
def test_crosstab_with_categorial_columns(self):
# GH 8860
df = DataFrame(
{
"MAKE": ["Honda", "Acura", "Tesla", "Honda", "Honda", "Acura"],
"MODEL": ["Sedan", "Sedan", "Electric", "Pickup", "Sedan", "Sedan"],
}
)
categories = ["Sedan", "Electric", "Pickup"]
df["MODEL"] = df["MODEL"].astype("category").cat.set_categories(categories)
result = crosstab(df["MAKE"], df["MODEL"])
expected_index = Index(["Acura", "Honda", "Tesla"], name="MAKE")
expected_columns = CategoricalIndex(
categories, categories=categories, ordered=False, name="MODEL"
)
expected_data = [[2, 0, 0], [2, 0, 1], [0, 1, 0]]
expected = DataFrame(
expected_data, index=expected_index, columns=expected_columns
)
tm.assert_frame_equal(result, expected)
def test_crosstab_with_numpy_size(self):
# GH 4003
df = DataFrame(
{
"A": ["one", "one", "two", "three"] * 6,
"B": ["A", "B", "C"] * 8,
"C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
"D": np.random.randn(24),
"E": np.random.randn(24),
}
)
result = crosstab(
index=[df["A"], df["B"]],
columns=[df["C"]],
margins=True,
aggfunc=np.size,
values=df["D"],
)
expected_index = MultiIndex(
levels=[["All", "one", "three", "two"], ["", "A", "B", "C"]],
codes=[[1, 1, 1, 2, 2, 2, 3, 3, 3, 0], [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]],
names=["A", "B"],
)
expected_column = Index(["bar", "foo", "All"], dtype="object", name="C")
expected_data = np.array(
[
[2.0, 2.0, 4.0],
[2.0, 2.0, 4.0],
[2.0, 2.0, 4.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[12.0, 12.0, 24.0],
]
)
expected = DataFrame(
expected_data, index=expected_index, columns=expected_column
)
tm.assert_frame_equal(result, expected)
def test_crosstab_dup_index_names(self):
# GH 13279
s = Series(range(3), name="foo")
result = crosstab(s, s)
expected_index = Index(range(3), name="foo")
expected = DataFrame(
np.eye(3, dtype=np.int64), index=expected_index, columns=expected_index
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("names", [["a", ("b", "c")], [("a", "b"), "c"]])
def test_crosstab_tuple_name(self, names):
s1 = Series(range(3), name=names[0])
s2 = Series(range(1, 4), name=names[1])
mi = MultiIndex.from_arrays([range(3), range(1, 4)], names=names)
expected = Series(1, index=mi).unstack(1, fill_value=0)
result = crosstab(s1, s2)
tm.assert_frame_equal(result, expected)
def test_crosstab_both_tuple_names(self):
# GH 18321
s1 = Series(range(3), name=("a", "b"))
s2 = Series(range(3), name=("c", "d"))
expected = DataFrame(
np.eye(3, dtype="int64"),
index=Index(range(3), name=("a", "b")),
columns=Index(range(3), name=("c", "d")),
)
result = crosstab(s1, s2)
tm.assert_frame_equal(result, expected)
def test_crosstab_unsorted_order(self):
df = DataFrame({"b": [3, 1, 2], "a": [5, 4, 6]}, index=["C", "A", "B"])
result = crosstab(df.index, [df.b, df.a])
e_idx = Index(["A", "B", "C"], name="row_0")
e_columns = MultiIndex.from_tuples([(1, 4), (2, 6), (3, 5)], names=["b", "a"])
expected = DataFrame(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns
)
tm.assert_frame_equal(result, expected)
def test_crosstab_normalize_multiple_columns(self):
# GH 15150
df = DataFrame(
{
"A": ["one", "one", "two", "three"] * 6,
"B": ["A", "B", "C"] * 8,
"C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
"D": [0] * 24,
"E": [0] * 24,
}
)
result = crosstab(
[df.A, df.B],
df.C,
values=df.D,
aggfunc=np.sum,
normalize=True,
margins=True,
)
expected = DataFrame(
np.array([0] * 29 + [1], dtype=float).reshape(10, 3),
columns=Index(["bar", "foo", "All"], dtype="object", name="C"),
index=MultiIndex.from_tuples(
[
("one", "A"),
("one", "B"),
("one", "C"),
("three", "A"),
("three", "B"),
("three", "C"),
("two", "A"),
("two", "B"),
("two", "C"),
("All", ""),
],
names=["A", "B"],
),
)
tm.assert_frame_equal(result, expected)
def test_margin_normalize(self):
# GH 27500
df = DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
# normalize on index
result = crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=0
)
expected = DataFrame(
[[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]]
)
expected.index = MultiIndex(
levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
names=["A", "B"],
)
expected.columns = Index(["large", "small"], dtype="object", name="C")
tm.assert_frame_equal(result, expected)
# normalize on columns
result = crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=1
)
expected = DataFrame(
[
[0.25, 0.2, 0.222222],
[0.25, 0.2, 0.222222],
[0.5, 0.2, 0.333333],
[0, 0.4, 0.222222],
]
)
expected.columns = Index(
["large", "small", "Sub-Total"], dtype="object", name="C"
)
expected.index = MultiIndex(
levels=[["bar", "foo"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["A", "B"],
)
tm.assert_frame_equal(result, expected)
# normalize on both index and column
result = crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=True
)
expected = DataFrame(
[
[0.111111, 0.111111, 0.222222],
[0.111111, 0.111111, 0.222222],
[0.222222, 0.111111, 0.333333],
[0.000000, 0.222222, 0.222222],
[0.444444, 0.555555, 1],
]
)
expected.columns = Index(
["large", "small", "Sub-Total"], dtype="object", name="C"
)
expected.index = MultiIndex(
levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
names=["A", "B"],
)
tm.assert_frame_equal(result, expected)
def test_margin_normalize_multiple_columns(self):
# GH 35144
# use multiple columns with margins and normalization
df = DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
result = crosstab(
index=df.C,
columns=[df.A, df.B],
margins=True,
margins_name="margin",
normalize=True,
)
expected = DataFrame(
[
[0.111111, 0.111111, 0.222222, 0.000000, 0.444444],
[0.111111, 0.111111, 0.111111, 0.222222, 0.555556],
[0.222222, 0.222222, 0.333333, 0.222222, 1.0],
],
index=["large", "small", "margin"],
)
expected.columns = MultiIndex(
levels=[["bar", "foo", "margin"], ["", "one", "two"]],
codes=[[0, 0, 1, 1, 2], [1, 2, 1, 2, 0]],
names=["A", "B"],
)
expected.index.name = "C"
tm.assert_frame_equal(result, expected)
| 36.148794
| 88
| 0.457559
|
6581562855be0290e4b8bc7b5d28d9403415a1c1
| 1,514
|
py
|
Python
|
gapipy/resources/geo/place.py
|
wmak/gapipy
|
b6849606d4f6af24b9f871f65e87aaf0d0c013cc
|
[
"MIT"
] | 4
|
2015-03-19T02:10:35.000Z
|
2018-10-22T19:51:44.000Z
|
gapipy/resources/geo/place.py
|
wmak/gapipy
|
b6849606d4f6af24b9f871f65e87aaf0d0c013cc
|
[
"MIT"
] | 69
|
2015-03-11T20:58:04.000Z
|
2021-10-12T18:39:15.000Z
|
gapipy/resources/geo/place.py
|
wmak/gapipy
|
b6849606d4f6af24b9f871f65e87aaf0d0c013cc
|
[
"MIT"
] | 1
|
2016-08-31T15:22:43.000Z
|
2016-08-31T15:22:43.000Z
|
# Python 2 and 3
from __future__ import unicode_literals
from ..base import Resource
from .country import Country
from .feature import Feature
from .state import State
from .timezone import Timezone
from ..dossier import PlaceDossier
class Place(Resource):
_resource_name = 'places'
_as_is_fields = [
'id', 'href', 'name', 'ascii_name', 'population', 'elevation',
'latitude', 'longitude', 'bounding_box', 'alternate_names',
'admin_divisions', 'dossier_frequency',
]
_date_time_fields_utc = ['date_created', 'date_last_modified']
_resource_fields = [
('country', Country),
('state', State),
('feature', Feature),
('timezone', Timezone),
]
_model_fields = [
('place_dossier', PlaceDossier),
]
_model_collection_fields = [
('places_of_interest', 'Place')
]
def __init__(self, *args, **kwargs):
super(Place, self).__init__(*args, **kwargs)
self._set_admin_divisions()
def _set_admin_divisions(self):
"""Transform the raw json list of `admin_divisions` into a list of thecd ~?
corresponding Place (stub) instances.
"""
if 'admin_divisions' in self._raw_data:
raw_admin_divisions = self._raw_data['admin_divisions'] or []
admin_divisions = [
self.__class__(d, client=self._client, stub=True)
for d in raw_admin_divisions
]
self.admin_divisions = admin_divisions
| 28.566038
| 83
| 0.629458
|
6a4995a43ee6406642160c38d86831d3e8eff9bc
| 1,364
|
py
|
Python
|
sknano/generators/tests/test_mwnt_generator.py
|
haidi-ustc/scikit-nano
|
ef9b24165ba37918b3f520657f7311ba139b3e7d
|
[
"BSD-2-Clause"
] | 21
|
2016-06-08T18:27:20.000Z
|
2022-03-22T08:27:46.000Z
|
sknano/generators/tests/test_mwnt_generator.py
|
haidi-ustc/scikit-nano
|
ef9b24165ba37918b3f520657f7311ba139b3e7d
|
[
"BSD-2-Clause"
] | 8
|
2016-06-24T19:45:58.000Z
|
2021-03-25T21:42:29.000Z
|
sknano/generators/tests/test_mwnt_generator.py
|
scikit-nano/scikit-nano
|
ef9b24165ba37918b3f520657f7311ba139b3e7d
|
[
"BSD-2-Clause"
] | 9
|
2016-12-08T16:35:52.000Z
|
2021-06-23T17:13:44.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import nose
from nose.tools import *
from sknano.generators import MWNTGenerator
from sknano.testing import GeneratorTestFixtures
class TestCase(GeneratorTestFixtures):
def test1(self):
mwnt = MWNTGenerator(max_walls=3, Lz=1.0)
print(mwnt)
print(mwnt.todict())
mwnt.save()
self.tmpdata.append(mwnt.fname)
mwnt.save(structure_format='data')
self.tmpdata.append(mwnt.fname)
def test2(self):
mwnt = MWNTGenerator(Ch=[(10, 10), (50, 50)], Lz=1.0)
print(mwnt)
print(mwnt.todict())
assert_equal(mwnt.Nwalls, 2)
assert_equal(mwnt.chiral_set, set(['armchair']))
mwnt.save()
self.tmpdata.append(mwnt.fname)
mwnt.save(structure_format='data')
self.tmpdata.append(mwnt.fname)
def test3(self):
mwnt = MWNTGenerator(Ch=[(5, 5), (10, 10), (20, 20)], Lz=1.0)
print(mwnt)
print(mwnt.todict())
assert_equal(mwnt.Nwalls, 3)
assert_equal(mwnt.chiral_set, set(['armchair']))
mwnt.save()
self.tmpdata.append(mwnt.fname)
mwnt.save(structure_format='data')
self.tmpdata.append(mwnt.fname)
if __name__ == '__main__':
nose.runmodule()
| 28.416667
| 69
| 0.63563
|
a2c52ac1c2b89662c1ea3f0033ad911b35123a33
| 13,319
|
py
|
Python
|
ABC_doc.py
|
Ally-Financial/abc-doc
|
20e6d3867774cf8729bbc763d6c2dc7b684fabce
|
[
"Apache-2.0"
] | null | null | null |
ABC_doc.py
|
Ally-Financial/abc-doc
|
20e6d3867774cf8729bbc763d6c2dc7b684fabce
|
[
"Apache-2.0"
] | null | null | null |
ABC_doc.py
|
Ally-Financial/abc-doc
|
20e6d3867774cf8729bbc763d6c2dc7b684fabce
|
[
"Apache-2.0"
] | null | null | null |
'''
* Copyright 2021 Ally Financial, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
'''
#library to capture Mouse Events
from pynput.mouse import Listener
from pynput import mouse
import logging
#library to capture Screenshot
import pyautogui
#library to prepare and work on word File
from docx import Document
from docx.shared import Inches
#library to Build Application GUI
import PySimpleGUI as sg
#library to get and Set system details
import os
from datetime import datetime
#library to work on images and merge 2 images
from PIL import Image, ImageDraw
#import pii_DetectBlur_Image
#nexus
v_enable_logging = True
# for padding white spaces for better layout
vSzInput=(3,10)
vSzButton=(25,3)
vSzIcon=(3,3)
# parameters to be changed for individual or organizaiton needs
vFinalFilenameStartWith = 'Ally - '
vFilename = 'Ally - '
vContinueProcessCapture = False
# parameters to be changed for individual or organizaiton needs
vScreenTextFont = 'Calibri 12'
vDesktopPath = (os.environ['USERPROFILE'] + '\Desktop')
# convert images or logo to 64 bit string for easy portability
vLogoOnScreen = b'iVBORw0KGgoAAAANSUhEUgAAADIAAAAtCAYAAADsvzj/AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAFuoAABbqAeWOQxAAAAbCSURBVGhD7VlpbFRVFB6KSFQWi2Aps7xlBjAoEG2MGIlEZakUqW1nee9u0xaD+gPj8kfi0kQQBNJEgtEQBU0MEreEaExUjBL+mBgDBmIBg8aYKEESRHahMH7n9RaG8dF5M51WTfiS023ePfd8557tvoau4Ar+J0iFUkNT41Ij3Fp3bEOkISxrZUzaMsbDPJI1suMbYg3VT0SS1+jH/1sgw+UEOckJi3uZLVq5zZ+XllwrrexGfH8HRN6VdvZtZan1+H0VN/iTwsg+yCPp23mM13aEOqq0qn8H8+HhTDQzV5ryOWGKrdySv3JLnMnGW3PtiUU9Er8oixIP5VrjbTlhyZy01J+Q3dyUb4JYe9pMT09GZgzuSbHRrNqJ8QYY8QaM+o2Ma4u354iAsrM5YcuiQs/RGiJLxHCSXzOLPdUSdW7GFgN/QkmLTeW2XAPPH+g13s/QUoV0ybjCz3wbt5Wbqk6N1ltWHEOZwe7npviSvNlaIQKF0pYAIVsd5hZflrJTMb13ZVAXqhsmDOEIW+zzYtzHgEoKnTLI5JB3G1zbnajN6D+4oVwk8k9Ze2BOwU9UPJtTVjaHPNyYjAlLm1I+kpYzByR2ZxODR6JXPDIIY5TsNXKkvEGbVDpQWuPCUp9WKqHLEb33EW7yh+tCi4dp00rCEGmKF5WtTpJnCjcYTGlNeL1nJ4uxu7RtweFYYg63RVc5p0GJSuS9hLXlecg5EgqTvL+XJEQGTu1sNBqv1yYWx5TQlKt5XLwGBTAg+KYeARiLnDqO3/cgUT93Tb6JWXwjyukmlO6t+LwLz52gZ0shpCeC/SIq5mgziyNluzMxPuwq5TSomWHNWSTmd8wSKxEGs1VEhRfruK4P1Q9vCjdFMlE+FyPJGhi1C9Ld0wT9dRZKjz1qOR9TP8oztBhQv5+Ft44G9Zj27kkMi++lDT5Dq+kT6Qi7mxtiC8icggN89RaK1/1ttZ1F2FSt5vJIjm8bh0n1fQoRP2WFokl0M1NtaJ/sTNBqAiFTkzGlyTcLU54hPX7688Xby1LHHEsltYrLI2nIO1FydwQNK2qSzJKfuUbzTVpFSUiH09PhtG1BiJCQXcxmHXW1dddqFf5A/LpZW/0chAiRQGX7I2MwppeXBfSIJQixQ0HyhaoXisnmTE2jqZf7w7X50/DOkSAeopjlaJhJMzlZLy8LNLojvLYHCWeqXtJUXzlhZ5pe7g9mik54J1DMtmJSxfMrRY24Ti8vF1VUooMQIefhuR0Iyb6Limu4uI4Gi1fyjmPwx/XSfoEbkkryuWIOpJDHM3uo6uml/mCGeJ2mTj8lhUIe5DG5RC/tF1hMLIcD/wpCBPvuUwa7Ry/1BxJvHTzjlTo/RflCx+wa/Jm6uvKGuXwgRAMTESCCS17fRNCVX8A19mQQIj3DnHprYbS9pP7hh4oTQVdfDGUHgpRCrXQvM1TfSgOg4kQQWnORI11Br7Q9lUZ0No4uYSr1QcWJNIebJ0LhF0FKIYk3lpvqIKpOO71l1GpKRsWJhEIdVa7F10JpoF4COU+nh267H/m1aHb17LJe45RStQISwaXKSLnKkr+UcjP0Om4cJ2PJVY7p3EHvfmeFZl2lVRaFG+MrKk4kGUmGpSk/ChpevUKbeG9aLPU9t+U6umezKGvkCdxBfMSNu/Nc252vopk4NURUy9MVJUJAh38UHjpYzn2dTkffs7ulLQ/j+++kq1Dw96OYnPeKmGhBOFNnL1r2SybCxzbVwkMflkMkX2i9RyxP6NTopTam7BMCfYvykpmsA88XzcuSiRBwXV2gLLWn1BArJkQO9/ljSPCO1LhZI7y9Kl+1LkGVMMRSxPuR/p5Mr5AeGHuCGfyllH3xRfVAEwklRyXH4LL1CuL3dCXCDMaehtGdbu2CsXoLDwNOhIAKE5EWX4/ELftlnbcO9xyE1KuO5dRo1RcwGESG0JcmJD8S82Vly0OUM8U2zBea2zD2dCtbbeBhHvG0FmBQTgTwyCycvHAkhsrHsOFOSLeO939sli/683PCVJuaY8z2tPmgpM5uqrKJXAIZdWfCgxsQKvux+Rkqpz3T8qVGkFGemOoDOSEzSS/3hWvyFTjps2TohXUFQvq9d1uW+oHH+H16af9Qn6gf7kbdBzhyByHzDUKHGtzZ3k17+wW897GIiFv0sssCBWUZdBwnx8AJpy4nPaGluipyIvmgibfFapkmDfkI5qxXuSk+ESb/Ft79EYZtScf4bfrRPiEjch5um6scm3WyOFvtL87qDAbatOUszSRUXC8dGIiaphudhHOrY4j6ZE2y//9pCoRQ6G8oxV0jL1garQAAAABJRU5ErkJggg=='
vAppLogo = b'iVBORw0KGgoAAAANSUhEUgAAAEgAAABICAMAAABiM0N1AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAzUExURQAAACCPjxiXhxuVihyXhx2WiRuXihuWiRyXiR2YixuYihyYihyYihyYihuYihyYihyYinvhKU4AAAAQdFJOUwAQIDBAUGBwgI+fr7/P3+8jGoKKAAAACXBIWXMAAA7DAAAOwwHHb6hkAAADHUlEQVRYR+2X25asIAxERfCCN/z/rz0VSBAEWvusNW+9H6btFiNJVYDpfvz4e9Q88+fS+4v/ZTvPHRHUep4H//Qlyv+dTuCm8aDP6fr9Pdqd69gZRwEEZ5RZ3Gl4yDuQSouVh7zD51LH8ZBXaH6oysvclDG9+ZAZ5DNaDzy6jcrqC459tXbdilyfbDXwOI/bJs2/d2qCpxKe5pRUx83BNErL22e+QzwF6nnceW6YjDLzHnI9LNW4X/wX4qnkMiM3UjZ5Zcjahq+fAoleDjM3e7i+OBAdnvfssXolGg0QcH2nLF9nWMwzTnNtzSoqj/lQv9fYVKpHvYUV3z3PsRkHkZI6tcwkkqAtq3kFcDcZWIXN6HRuyzvIO141CLdtpwq9PFxDtD87s9UmoYpQzC+MBUfPCc8ypYZqY3gjEq+uRgfeHyJhSlwlv/reiDrNiSoJFEfWhqEb/SdsWRogdlGf9aYQ4nDtlssqS3g6QQTHrlPJLItDucll2PYy+NYu4mF87LY8DjmaF6dKapL/ElcS+ImfvcdB+lz2aueGAtrYS2Q3/3QRB7qHQJXEQHCPTWfkny/jIFAQpCZ/14car5ciIdJaxkFqQWSYt0B8hO4OFyCW4BYHxebRkOaOmAfSXvJzpHucxCJtH0VFPFKnHBhSFsFyJYmjp2wRQaQiDvSUIXR4KuCevzXt5cqIkxK5qb7WhmbFsPoyEsEyIs3bwN8ll1W67QJvYmXq87mW2v7zUjt3PZca21wVWZEgaVxUSrC1iazwXBWRlHLPjx4J6HdeSkFNMxiA7wJo3ogEvWXT9kWoYuLDqGdqywurLlu5raka6s2jyNG3swhw6HbNv+66pVkgbuz0ttxP3n7x6P1hNp7rhGCpAIPlB91qEEbJVyrjZ5KjBglN9DjF+jSSMM8ntnxPw78RghrWJMxzaokHAvu6zNZufJSMtJRPGIwW7ers2gzmOY7nOttWeEoq45ZICpn1PR+atnVMqwPxtknlErpBDZs/OH+FN5zffN08+Uz9tqpeVvkO5KP1vafPr2pzR1n0OzH7DffHjz+l6/4Bmz911Xhw3/QAAAAASUVORK5CYII='
def add_to_log(text):
if v_enable_logging:
print('Timestamp '+str(docnow.strftime("%m-%d-%Y %H%M")))
print(str(docnow.strftime("%m-%d-%Y %H%M%S")) + ' : ' + text)
logging.info(text)
# Add your new theme colors and settings
sg.LOOK_AND_FEEL_TABLE['Ally'] = {'BACKGROUND': '#FFFFFF',
'TEXT': '#000000',
'INPUT': '#FFF7F0',
'TEXT_INPUT': '#000000',
'SCROLL': '#FFF7F0',
'BUTTON': ('#FFFFFF' , '#8A3575'),
'PROGRESS': "#FFF7F0",
'SCROLL': '#FFF7F0',
'BORDER': 1,
'SLIDER_DEPTH':0,
'PROGRESS_DEPTH':0
}
sg.ChangeLookAndFeel('Ally')
column_input_padding = [[sg.Text('', size=vSzInput)]]
column_button_padding = [[sg.Text('', size=vSzButton)]]
column_icon_padding = [[sg.Text('', size=vSzIcon)]]
column_input = [[sg.Text('Save Document here', size=(20, 1), justification='left'),
sg.InputText(vDesktopPath), sg.FolderBrowse( size=(8,1))],
[sg.Text('Process Name ', size=(20, 1), justification='left'),sg.InputText('')],
[sg.Text('Process Description ', size=(20, 1), justification='left'),
sg.MLine(default_text='', size=(45, 3),no_scrollbar = True)],
[sg.Text('Systems Impacted ', size=(20, 1), justification='left'),
sg.InputText(default_text='', size=(45, 3))]]
column_button = [[sg.Button("Start", size =(8,1)),
sg.Button("Pause",disabled=True, size =(8,1)),
sg.Button("Continue",disabled=True, size =(8,1)),
sg.Button("Stop",disabled=True, size =(8,1))]]
column_ally_logo = [[sg.Image(data=vLogoOnScreen)]]
#Preparing layout with input fields and button
layout = [[
[
sg.Column(column_input_padding, element_justification='c' ),
sg.Column(column_input, element_justification='l')],
[sg.Column(column_button_padding, element_justification='c' ),
sg.Column(column_button, element_justification='l')],
[sg.Column(column_ally_logo, element_justification='l')
]
]]
#Function to capture mouse Move
def on_move(x, y):
pass
#Function to paste image 2(fg_img) on image 1(bg_img)
# alpha represent opacity
# Box represent position on Image 1 where image 2 will be added
def trans_paste(fg_img,bg_img,alpha=1.0,box=(0,0)):
fg_img_trans = Image.new("RGBA",fg_img.size)
fg_img_trans = Image.blend(fg_img_trans,fg_img,alpha)
bg_img.paste(fg_img_trans,box,fg_img_trans)
return bg_img
#Function to prepare word file with inputs provided by users
def prepare_word_file(document,values):
try:
add_to_log('Started preparing word file in Current temaplet folder')
document.add_heading('Process Name', level=1)
document.add_paragraph(values[1])
#document.add_page_break()
document.add_heading('Process Description', level=1)
document.add_paragraph(values[2])
#document.add_page_break()
document.add_heading('System Impacted', level=1)
document.add_paragraph(values[3])
#document.add_page_break()
document.add_heading('As-Is Process', level=1)
document.add_paragraph('Process screenshot will be added below')
except Exception as e:
add_to_log(' Error while preparing Word file '+ str(e))
#Function to capture Screenshot and Add to word file
def capture_and_save(x, y):
add_to_log('Mouse Clicked ')
myScreenshot = pyautogui.screenshot()
now = datetime.now()
timestamp = datetime.timestamp(now)
path_to_save=vFilePath+r'/file_name'+str(timestamp)+'.png'
path_to_save_bg=vFilePath+r'/file_name_bg'+str(timestamp)+'.png'
path_to_save_fg=vFilePath+r'/file_name_fg'+str(timestamp)+'.png'
myScreenshot.save(path_to_save)
add_to_log('Screenshot Saved ')
#path_to_save_ret = pii_DetectBlur_Image.main(path_to_save)
path_to_save_ret = path_to_save
bg_img = Image.open(path_to_save_ret)
fg_img = Image.open('Mouse Icon.png')
add_to_log('Screenshot Saved ')
p = trans_paste(fg_img.convert('RGBA'),bg_img.convert('RGBA'),1,(x,y))
#p = p.crop((left, top, right, bottom))
p.save(path_to_save_bg)
add_to_log('Image Saved ')
document.add_picture(path_to_save_bg, width=Inches(6.25))
document.save(vFilePath+r'/'+vFilename)
add_to_log('Image Added to file ')
if os.path.exists(path_to_save):
os.remove(path_to_save)
if os.path.exists(path_to_save_ret):
os.remove(path_to_save_ret)
if os.path.exists(path_to_save_bg):
os.remove(path_to_save_bg)
add_to_log('Image deleted ')
#Function to capture on click event and add it to screenshot
def on_click(x, y, button, pressed):
if pressed and vContinueProcessCapture:
capture_and_save(x, y)
#Function to skip scroll event( can be extended in Future)
def on_scroll(x, y, dx, dy):
pass
#Create Window and add fetch input
window = sg.Window(' ABC Doc - Automated Business process Capture and Documentation ',
layout,
default_element_size=(45, 22),
grab_anywhere=False,
icon = vAppLogo,
progress_bar_color='red',
font = vScreenTextFont,
element_padding=5
)
#Create Base Documentcls
try:
document = Document()
document.add_heading('Process Definition Document (PDD)', 0)
docnow = datetime.now()
doctimestamp = docnow.strftime("%m-%d-%Y %H%M")
vFilename = vFilename+' '+ str(doctimestamp)+'.docx'
vFilePrepared = False
except Exception as e:
add_to_log(' Error while getting date , time and preparing file name '+ str(e))
# Create window exists, fetch events on window
while True:
try:
#Create Base Document
event, values = window.read()
vFilePath = values[0]
add_to_log(str(values ))
if vFilePrepared == False:
add_to_log(' 3 ')
prepare_word_file(document,values)
add_to_log(' 4 ')
vFilePrepared = True
add_to_log(' 5 ')
except Exception as e:
add_to_log(' Error while Preparing window '+ str(e))
# Steps to be performed on Click "Start"
if event == "Start":
vContinueProcessCapture=True
window.Minimize()
add_to_log('Start Clicked')
listener = mouse.Listener(on_move=on_move,on_click=on_click,on_scroll=on_scroll)
listener.start()
window.find_element("Pause").Update(disabled=False)
window.find_element("Stop").Update(disabled=False)
window.find_element("Start").Update(disabled=True)
window.find_element("Continue").Update(disabled=True)
window.Refresh()
# Steps to be performed on Click "Stop or Window Closed"
if event == "Stop" or event == sg.WIN_CLOSED:
try:
os.rename(vFilePath+r'/'+vFilename,vFinalFilenameStartWith+values[1]+r'/'+' '+str(doctimestamp)+'.docx')
add_to_log('Stop Clicked')#listener.stop()
break
except:
break
if event == "Pause":
vContinueProcessCapture=False
add_to_log('Pause Clicked')
window.find_element("Continue").Update(disabled=False)
window.find_element("Pause").Update(disabled=True)
window.Refresh()
if event == "Continue":
vContinueProcessCapture=True
add_to_log('Continue Clicked')
window.find_element("Continue").Update(disabled=True)
window.find_element("Pause").Update(disabled=False)
window.Refresh()
window.close()
| 53.063745
| 2,471
| 0.736617
|
186abbbb52a54ec3c6aabd3c8a9e35ea862462d1
| 1,086
|
py
|
Python
|
examples/simple_example.py
|
ri-gilfanov/aiohttp-sqlalchemy
|
5324f753f76ea31424e5e5b95e4f92ca68b781f9
|
[
"MIT"
] | 5
|
2021-04-14T15:08:59.000Z
|
2021-12-01T08:05:27.000Z
|
examples/simple_example.py
|
ri-gilfanov/aiohttp-sqlalchemy
|
5324f753f76ea31424e5e5b95e4f92ca68b781f9
|
[
"MIT"
] | 5
|
2021-06-29T08:17:26.000Z
|
2021-07-12T08:17:33.000Z
|
examples/simple_example.py
|
ri-gilfanov/aiohttp-sqlalchemy
|
5324f753f76ea31424e5e5b95e4f92ca68b781f9
|
[
"MIT"
] | 2
|
2021-06-07T23:23:08.000Z
|
2021-06-21T20:12:48.000Z
|
from datetime import datetime
import sqlalchemy as sa
from aiohttp import web
from sqlalchemy import orm
import aiohttp_sqlalchemy
metadata = sa.MetaData()
Base = orm.declarative_base(metadata=metadata)
class MyModel(Base):
__tablename__ = 'my_table'
pk = sa.Column(sa.Integer, primary_key=True)
timestamp = sa.Column(sa.DateTime(), default=datetime.now)
async def main(request):
sa_session = aiohttp_sqlalchemy.get_session(request)
async with sa_session.begin():
sa_session.add(MyModel())
result = await sa_session.execute(sa.select(MyModel))
result = result.scalars()
data = {
instance.pk: instance.timestamp.isoformat()
for instance in result
}
return web.json_response(data)
async def app_factory():
app = web.Application()
aiohttp_sqlalchemy.setup(app, [
aiohttp_sqlalchemy.bind('sqlite+aiosqlite:///'),
])
await aiohttp_sqlalchemy.init_db(app, metadata)
app.add_routes([web.get('/', main)])
return app
if __name__ == '__main__':
web.run_app(app_factory())
| 22.163265
| 62
| 0.69337
|
053aabd5d2d7fdc51444b85991535ec5437c701f
| 964
|
py
|
Python
|
src/sentry/api/endpoints/organization_config_integrations.py
|
apragacz/sf-sentry
|
2fdd6c1195c29a1d401d1cd538c22ea68556699a
|
[
"BSD-3-Clause"
] | 1
|
2021-04-04T07:26:13.000Z
|
2021-04-04T07:26:13.000Z
|
src/sentry/api/endpoints/organization_config_integrations.py
|
pkaminski/sentry
|
27e948283e27d93ca5192ca7b580830e092c25c7
|
[
"BSD-3-Clause"
] | 1
|
2018-08-22T16:49:48.000Z
|
2018-08-22T16:49:48.000Z
|
src/sentry/api/endpoints/organization_config_integrations.py
|
pkaminski/sentry
|
27e948283e27d93ca5192ca7b580830e092c25c7
|
[
"BSD-3-Clause"
] | 1
|
2018-07-02T09:46:44.000Z
|
2018-07-02T09:46:44.000Z
|
from __future__ import absolute_import
from rest_framework.response import Response
from sentry import integrations
from sentry.api.bases.organization import OrganizationEndpoint
class OrganizationConfigIntegrationsEndpoint(OrganizationEndpoint):
def get(self, request, organization):
providers = []
for provider in integrations.all():
providers.append(
{
'key': provider.key,
'name': provider.name,
'config': provider.get_config(),
'setupDialog': dict(
url='/organizations/{}/integrations/{}/setup/'.format(
organization.slug,
provider.key,
),
**provider.setup_dialog_config
)
}
)
return Response({
'providers': providers,
})
| 31.096774
| 78
| 0.51556
|
467b693f0eec7842205b50b0d4285f5205329cf9
| 908
|
py
|
Python
|
service-3/app.py
|
PhilipL1/project-2
|
e59890708585f5c99dfcdd15080c2411db935cbb
|
[
"Unlicense"
] | null | null | null |
service-3/app.py
|
PhilipL1/project-2
|
e59890708585f5c99dfcdd15080c2411db935cbb
|
[
"Unlicense"
] | null | null | null |
service-3/app.py
|
PhilipL1/project-2
|
e59890708585f5c99dfcdd15080c2411db935cbb
|
[
"Unlicense"
] | null | null | null |
from flask import Flask,request
import random
from os import getenv
import random
import datetime
import calendar
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = getenv("DATABASE_URI")
@app.route('/get_day',methods=['GET'])
def get_day():
start_date = datetime.date(2022, 6, 1)
end_date = datetime.date(2022, 7, 1)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = random.randrange(days_between_dates)
random_date = start_date + datetime.timedelta(days=random_number_of_days)
date = str(random_date)
#print(date)
day = datetime.datetime.strptime(date, '%Y-%m-%d').weekday()
#print(day)
answer_day = calendar.day_name[day]
return_value =str(answer_day) + "("+str(date) + ")"
return return_value
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
| 28.375
| 77
| 0.714758
|
b64297c8889d3169b9cc4ede4d3352e12f403ed2
| 5,585
|
py
|
Python
|
rutypograph/rule.py
|
nanopony/rutypograph
|
c7b02c3d39dedffca403a486c93410320bd204ef
|
[
"MIT"
] | null | null | null |
rutypograph/rule.py
|
nanopony/rutypograph
|
c7b02c3d39dedffca403a486c93410320bd204ef
|
[
"MIT"
] | null | null | null |
rutypograph/rule.py
|
nanopony/rutypograph
|
c7b02c3d39dedffca403a486c93410320bd204ef
|
[
"MIT"
] | null | null | null |
import logging
import re
import sys
from typing import Dict, Union, Callable, Match
from typing import List
from typing import Tuple
from .environment import TypographEnvironment
logger = logging.getLogger(__name__)
def compile_pcre_pattern(pattern, additional_flags=0):
"""
Compiles Perl Compatible Regular Expressions into python (parses /.../keys )
:param additional_flags:
:param pattern:
:return:
"""
try:
if pattern[0] != '/':
return re.compile(pattern, additional_flags)
parts = pattern.split('/')
modifiers = parts[-1]
newpattern = pattern[1:-1 - len(modifiers)]
flags_lut = {'i': re.I, 's': re.S, 'm': re.M, 'u': re.U}
flags = re.U
for i in modifiers:
if i in flags_lut:
flags |= flags_lut[i]
flags |= additional_flags
return re.compile(newpattern, flags)
except Exception as e:
raise type(e)(str(e) + "\nFailed pattern: %s, %s" % (pattern, newpattern)) \
.with_traceback(sys.exc_info()[2])
class Rule: # pylint: disable=too-few-public-methods
"""
Base class for a rule of text conversion
"""
def __init__(self) -> None:
self.rule_id = "N/A"
self.disabled = False
self.debug = False
self.description = ""
self.doctests = [] # type: List[Tuple[str, str]]
def apply(self, text: str, environment: TypographEnvironment) -> str:
"""
apply specific rule to text with environment
:param text:
:param environment:
:return:
"""
return text
def __str__(self):
return '[%s] %s' % (self.rule_id, self.description)
def __repr__(self):
return self.__str__()
class RuleRegex(Rule): # pylint: disable=too-few-public-methods, too-many-instance-attributes
def __init__(self, patterns: Union[List[str], str],
replacements: List[Union[str, Callable[[Match[str], TypographEnvironment], str]]] = None,
pcre_keys=0, cycled=False) -> None:
"""
Set of Regex rules of text transformation
:param patterns: Search pattern
:param replacements: Replace pattern
:param pcre_keys: PCRE-compatible regex keys
:param cycled: Run multiple passes until the string is unchanged
"""
super().__init__()
if not isinstance(patterns, list):
patterns = [patterns]
if not isinstance(replacements, list):
replacements = [replacements]
self.compiled_patterns = [compile_pcre_pattern(pattern, pcre_keys) for pattern in patterns]
if len(replacements) == 1:
self.replacements = replacements * len(patterns)
elif len(replacements) == len(patterns):
self.replacements = replacements
else:
raise ValueError("Number of patterns and replacements dont match!")
self.cycled = cycled
def apply(self, text: str, environment: TypographEnvironment) -> str:
for pattern, replacement in zip(self.compiled_patterns, self.replacements):
while True:
text_dry = text
if callable(replacement):
text_wet = pattern.sub(lambda match: replacement(match, environment), text_dry) # pylint: disable=cell-var-from-loop
else:
text_wet = pattern.sub(replacement, text_dry)
logger.debug("pattern: %s || dry: %s || result: %s", pattern, text_dry , text_wet)
text = text_wet
if not self.cycled:
break
if text_dry == text:
break
return text
class RuleFunction(Rule): # pylint: disable=too-few-public-methods
"""
Rule for pass text through function
"""
def __init__(self, fn) -> None:
super().__init__()
self.fn = fn
def apply(self, text: str, environment: TypographEnvironment) -> str:
return self.fn(text, environment)
class RuleReplace(RuleRegex): # pylint: disable=too-few-public-methods
"""
simple replace rule
"""
def __init__(self, pattern, replacement, icase) -> None:
"""
:param pattern: search pattern
:param replacement: replacement pattern
:param icase: ignore case
"""
super().__init__(re.escape(pattern), replacement, re.IGNORECASE if icase else 0)
def _ruledef_to_rule(ruledef) -> Rule:
fn = ruledef.get('function', None)
if fn is not None:
# @todo check signature
return RuleFunction(fn)
elif ruledef.get('simple_replace'):
icase = ruledef.get('case_sensitive', False)
return RuleReplace(ruledef['pattern'], ruledef['replacement'], icase)
cycled = ruledef.get('cycled', False)
return RuleRegex(ruledef['pattern'], ruledef['replacement'], cycled=cycled)
def _parse_ruledef(ruledef) -> Rule:
"""
:param ruledef: dict, in a format comatible with mdash's own definitions
:return: Rule
"""
rule = _ruledef_to_rule(ruledef)
if rule is not None:
rule.rule_id = ruledef.get('rule_id', 'N/A')
rule.disabled = ruledef.get('disabled', False)
rule.description = ruledef.get('description', '-')
rule.debug = ruledef.get('debug', False)
rule.doctests = ruledef.get('doctests', [])
return rule
def convert_ruledefs_to_rules(ruledefs: List[Dict]) -> List[Rule]:
rules = [_parse_ruledef(ruledef) for ruledef in ruledefs]
return rules
| 31.027778
| 137
| 0.610743
|
7b330f58c85ab3d35d646dca4622c0e89854faf8
| 391
|
bzl
|
Python
|
tools/build_defs/shell_toolchain/toolchains/function_and_call.bzl
|
slsyy/rules_foreign_cc
|
34ab7f86a3ab1b2381cb4820d08a1c892f55bf54
|
[
"Apache-2.0"
] | 2
|
2021-03-18T04:14:56.000Z
|
2021-03-18T05:11:09.000Z
|
tools/build_defs/shell_toolchain/toolchains/function_and_call.bzl
|
slsyy/rules_foreign_cc
|
34ab7f86a3ab1b2381cb4820d08a1c892f55bf54
|
[
"Apache-2.0"
] | null | null | null |
tools/build_defs/shell_toolchain/toolchains/function_and_call.bzl
|
slsyy/rules_foreign_cc
|
34ab7f86a3ab1b2381cb4820d08a1c892f55bf54
|
[
"Apache-2.0"
] | 1
|
2021-03-01T17:51:22.000Z
|
2021-03-01T17:51:22.000Z
|
# buildifier: disable=module-docstring
# buildifier: disable=name-conventions
FunctionAndCall = provider(
doc = "Wrapper to pass function definition and (if custom) function call",
fields = {
"call": "How to call defined function, if different from <function-name> <arg1> ...<argn>",
"text": "Function body, without wrapping function <name>() {} fragment.",
},
)
| 39.1
| 99
| 0.675192
|
fc5a5e1186141eea8bc2e301d3cef9461b9ce8f9
| 124
|
py
|
Python
|
29_Comparison_Operator/main.py
|
jmmedel/Python-Tutorials-
|
243ae9a6b51a4fce03dd90c02da13b859cbfbe5f
|
[
"MIT"
] | null | null | null |
29_Comparison_Operator/main.py
|
jmmedel/Python-Tutorials-
|
243ae9a6b51a4fce03dd90c02da13b859cbfbe5f
|
[
"MIT"
] | null | null | null |
29_Comparison_Operator/main.py
|
jmmedel/Python-Tutorials-
|
243ae9a6b51a4fce03dd90c02da13b859cbfbe5f
|
[
"MIT"
] | null | null | null |
#Operator Name Example
# == Equal x == y
x = 5
y = 3
print(x == y) # false
x = 3
y = 3
print(x == y ) # true
| 10.333333
| 24
| 0.459677
|
6abdff12170dcfc180bbe59be8cb5158d9b760f7
| 30,223
|
py
|
Python
|
nuitka/MainControl.py
|
leojay/Nuitka
|
131aaca9b858279233dd60f546d0eb8e37c3828f
|
[
"Apache-2.0"
] | null | null | null |
nuitka/MainControl.py
|
leojay/Nuitka
|
131aaca9b858279233dd60f546d0eb8e37c3828f
|
[
"Apache-2.0"
] | null | null | null |
nuitka/MainControl.py
|
leojay/Nuitka
|
131aaca9b858279233dd60f546d0eb8e37c3828f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" This is the main actions of Nuitka.
This can do all the steps to translate one module to a target language using
the Python C/API, to compile it to either an executable or an extension
module, potentially with bytecode included and used library copied into
a distribution folder.
"""
import os
import shutil
import subprocess
import sys
from logging import info, warning
from nuitka.finalizations.FinalizeMarkups import getImportedNames
from nuitka.importing import Importing, Recursion
from nuitka.Options import getPythonFlags
from nuitka.plugins.Plugins import Plugins
from nuitka.PythonVersions import (
getPythonABI,
getSupportedPythonVersions,
isUninstalledPython,
python_version,
python_version_str
)
from nuitka.tree import SyntaxErrors
from nuitka.utils import Execution, InstanceCounters, MemoryUsage, Utils
from nuitka.utils.AppDirs import getCacheDir
from nuitka.utils.FileOperations import (
deleteFile,
hasFilenameExtension,
listDir,
makePath,
removeDirectory
)
from . import ModuleRegistry, Options, TreeXML
from .build import SconsInterface
from .codegen import CodeGeneration, ConstantCodes, Reports
from .finalizations import Finalization
from .freezer.BytecodeModuleFreezer import generateBytecodeFrozenCode
from .freezer.Standalone import copyUsedDLLs, detectEarlyImports
from .optimizations import Optimization
from .tree import Building
def createNodeTree(filename):
""" Create a node tree.
Turn that source code into a node tree structure. If recursion into
imported modules is available, more trees will be available during
optimization, or immediately through recursed directory paths.
"""
# First, build the raw node tree from the source code.
main_module = Building.buildModuleTree(
filename = filename,
package = None,
is_top = True,
is_main = not Options.shallMakeModule()
)
ModuleRegistry.addRootModule(main_module)
# First remove old object files and old generated files, old binary or
# module, and standalone mode program directory if any, they can only do
# harm.
source_dir = getSourceDirectoryPath(main_module)
if not Options.shallOnlyExecCCompilerCall():
cleanSourceDirectory(source_dir)
# Prepare the ".dist" directory, throwing away what was there before.
if Options.isStandaloneMode():
standalone_dir = getStandaloneDirectoryPath(main_module)
removeDirectory(
path = standalone_dir,
ignore_errors = True
)
makePath(standalone_dir)
deleteFile(
path = getResultFullpath(main_module),
must_exist = False
)
# Second, do it for the directories given.
for plugin_filename in Options.getShallFollowExtra():
Recursion.checkPluginPath(
plugin_filename = plugin_filename,
module_package = None
)
for pattern in Options.getShallFollowExtraFilePatterns():
Recursion.checkPluginFilenamePattern(
pattern = pattern
)
for package_name in Options.getMustIncludePackages():
package_package, package_directory, kind = Importing.findModule(
importing = None,
module_name = package_name,
parent_package = None,
level = 0,
warn = False
)
if kind != "absolute":
sys.exit("Error, failed to locate package %r." % package_name)
Recursion.checkPluginPath(
plugin_filename = package_directory,
module_package = package_package
)
for module_name in Options.getMustIncludeModules():
module_package, module_filename, kind = Importing.findModule(
importing = None,
module_name = module_name,
parent_package = None,
level = 0,
warn = False
)
if kind != "absolute":
sys.exit("Error, failed to locate module %r." % module_name)
Recursion.checkPluginSinglePath(
plugin_filename = module_filename,
module_package = module_package
)
# Then optimize the tree and potentially recursed modules.
Optimization.optimize(main_module.getOutputFilename())
if Options.isExperimental("check_xml_persistence"):
for module in ModuleRegistry.getRootModules():
if module.isMainModule():
return module
assert False
else:
# Main module might change behind our back, look it up again.
return main_module
def dumpTreeXML(tree):
xml_root = tree.asXml()
TreeXML.dump(xml_root)
def getTreeFilenameWithSuffix(tree, suffix):
return tree.getOutputFilename() + suffix
def getSourceDirectoryPath(main_module):
assert main_module.isCompiledPythonModule()
return Options.getOutputPath(
path = os.path.basename(
getTreeFilenameWithSuffix(main_module, ".build")
)
)
def getStandaloneDirectoryPath(main_module):
return Options.getOutputPath(
path = os.path.basename(
getTreeFilenameWithSuffix(main_module, ".dist")
)
)
def getResultBasepath(main_module):
assert main_module.isCompiledPythonModule()
if Options.isStandaloneMode():
return os.path.join(
getStandaloneDirectoryPath(main_module),
os.path.basename(
getTreeFilenameWithSuffix(main_module, "")
)
)
else:
return Options.getOutputPath(
path = os.path.basename(
getTreeFilenameWithSuffix(main_module, "")
)
)
def getResultFullpath(main_module):
""" Get the final output binary result full path.
"""
result = getResultBasepath(main_module)
if Options.shallMakeModule():
result += Utils.getSharedLibrarySuffix()
else:
if Options.getOutputFilename() is not None:
result = Options.getOutputFilename()
elif Utils.getOS() == "Windows":
result += ".exe"
elif not Options.isStandaloneMode():
result += ".bin"
return result
def cleanSourceDirectory(source_dir):
extensions = (
".bin", ".c", ".cpp", ".exp", ".h",
".lib", ".manifest", ".o", ".obj",
".os", ".rc", ".res", ".S", ".txt"
)
if os.path.isdir(source_dir):
for path, _filename in listDir(source_dir):
if hasFilenameExtension(path, extensions):
deleteFile(path, must_exist = True)
else:
makePath(source_dir)
def pickSourceFilenames(source_dir, modules):
collision_filenames = set()
seen_filenames = set()
# Our output.
module_filenames = {}
def getFilenames(module):
base_filename = os.path.join(
source_dir,
"module." + module.getFullName()
if not module.isInternalModule() else
module.getFullName()
)
# Note: Could detect if the file system is cases sensitive in source_dir
# or not, but that's probably not worth the effort. False positives do
# no harm at all.
collision_filename = os.path.normcase(base_filename)
return base_filename, collision_filename
# First pass, check for collisions.
for module in modules:
if module.isPythonShlibModule():
continue
base_filename = base_filename, collision_filename = getFilenames(module)
if collision_filename in seen_filenames:
collision_filenames.add(collision_filename)
seen_filenames.add(collision_filename)
# Count up for colliding filenames.
collision_counts = {}
# Second pass, this time sorted, so we get deterministic results. We will
# apply an @1/@2 to disambiguate the filenames.
for module in sorted(modules, key = lambda x : x.getFullName()):
if module.isPythonShlibModule():
continue
base_filename = base_filename, collision_filename = getFilenames(module)
if collision_filename in collision_filenames:
collision_counts[ collision_filename ] = \
collision_counts.get(collision_filename, 0) + 1
hash_suffix = "@%d" % collision_counts[ collision_filename ]
else:
hash_suffix = ""
base_filename += hash_suffix
module_filenames[module] = base_filename + ".c"
return module_filenames
standalone_entry_points = []
def makeSourceDirectory(main_module):
""" Get the full list of modules imported, create code for all of them.
"""
# We deal with a lot of details here, but rather one by one, and split makes
# no sense, pylint: disable=too-many-branches,too-many-locals,too-many-statements
assert main_module.isCompiledPythonModule()
# The global context used to generate code.
global_context = CodeGeneration.makeGlobalContext()
# assert main_module in ModuleRegistry.getDoneModules()
# We might have chosen to include it as bytecode, and only compiled it for
# fun, and to find its imports. In this case, now we just can drop it. Or
# a module may shadow a frozen module, but be a different one, then we can
# drop the frozen one.
# TODO: This really should be done when the compiled module comes into
# existence.
for module in ModuleRegistry.getDoneUserModules():
if module.isCompiledPythonModule():
uncompiled_module = ModuleRegistry.getUncompiledModule(
module_name = module.getFullName(),
module_filename = module.getCompileTimeFilename()
)
if uncompiled_module is not None:
# We now need to decide which one to keep, compiled or uncompiled
# module. Some uncompiled modules may have been asked by the user
# or technically required. By default, frozen code if it exists
# is preferred, as it will be from standalone mode adding it.
if uncompiled_module.isUserProvided() or \
uncompiled_module.isTechnical():
ModuleRegistry.removeDoneModule(module)
else:
ModuleRegistry.removeUncompiledModule(uncompiled_module)
# Lets check if the recurse-to modules are actually present, and warn the
# user if one of those was not found.
for any_case_module in Options.getShallFollowModules():
if '*' in any_case_module or '{' in any_case_module:
continue
for module in ModuleRegistry.getDoneUserModules():
if module.getFullName() == any_case_module:
break
else:
warning(
"Didn't recurse to '%s', apparently not used." % \
any_case_module
)
# Prepare code generation, i.e. execute finalization for it.
for module in ModuleRegistry.getDoneModules():
if module.isCompiledPythonModule():
Finalization.prepareCodeGeneration(module)
# Pick filenames.
source_dir = getSourceDirectoryPath(main_module)
module_filenames = pickSourceFilenames(
source_dir = source_dir,
modules = ModuleRegistry.getDoneModules()
)
# First pass, generate code and use constants doing so, but prepare the
# final code generation only, because constants code will be added at the
# end only.
prepared_modules = {}
for module in ModuleRegistry.getDoneModules():
if module.isCompiledPythonModule():
c_filename = module_filenames[module]
prepared_modules[c_filename] = CodeGeneration.prepareModuleCode(
global_context = global_context,
module = module,
module_name = module.getFullName(),
)
# Main code constants need to be allocated already too.
if module is main_module and not Options.shallMakeModule():
prepared_modules[c_filename][1].getConstantCode(0)
# Second pass, generate the actual module code into the files.
for module in ModuleRegistry.getDoneModules():
if module.isCompiledPythonModule():
c_filename = module_filenames[module]
template_values, module_context = prepared_modules[c_filename]
source_code = CodeGeneration.generateModuleCode(
module_context = module_context,
template_values = template_values
)
writeSourceCode(
filename = c_filename,
source_code = source_code
)
if Options.isShowInclusion():
info("Included compiled module '%s'." % module.getFullName())
elif module.isPythonShlibModule():
target_filename = os.path.join(
getStandaloneDirectoryPath(main_module),
*module.getFullName().split('.')
)
if Utils.getOS() == "Windows":
target_filename += ".pyd"
else:
target_filename += ".so"
target_dir = os.path.dirname(target_filename)
if not os.path.isdir(target_dir):
makePath(target_dir)
shutil.copyfile(
module.getFilename(),
target_filename
)
standalone_entry_points.append(
(
module.getFilename(),
target_filename,
module.getPackage()
)
)
elif module.isUncompiledPythonModule():
pass
else:
assert False, module
writeSourceCode(
filename = os.path.join(
source_dir,
"__constants.c"
),
source_code = ConstantCodes.getConstantsDefinitionCode(
context = global_context
)
)
helper_decl_code, helper_impl_code = CodeGeneration.generateHelpersCode(
ModuleRegistry.getDoneUserModules()
)
writeSourceCode(
filename = os.path.join(
source_dir,
"__helpers.h"
),
source_code = helper_decl_code
)
writeSourceCode(
filename = os.path.join(
source_dir,
"__helpers.c"
),
source_code = helper_impl_code
)
def _asBoolStr(value):
return "true" if value else "false"
def runScons(main_module, quiet):
# Scons gets transported many details, that we express as variables, and
# have checks for them, leading to many branches and statements,
# pylint: disable=too-many-branches,too-many-statements
options = {
"name" : os.path.basename(
getTreeFilenameWithSuffix(main_module, "")
),
"result_name" : getResultBasepath(main_module),
"source_dir" : getSourceDirectoryPath(main_module),
"debug_mode" : _asBoolStr(Options.isDebug()),
"python_debug" : _asBoolStr(Options.isPythonDebug()),
"unstripped_mode" : _asBoolStr(Options.isUnstripped()),
"module_mode" : _asBoolStr(Options.shallMakeModule()),
"full_compat" : _asBoolStr(Options.isFullCompat()),
"experimental" : ','.join(Options.getExperimentalIndications()),
"trace_mode" : _asBoolStr(Options.shallTraceExecution()),
"python_version" : python_version_str,
"target_arch" : Utils.getArchitecture(),
"python_prefix" : sys.prefix,
"nuitka_src" : SconsInterface.getSconsDataPath(),
"nuitka_cache" : getCacheDir(),
"module_count" : "%d" % (
1 + \
len(ModuleRegistry.getDoneUserModules()) + \
len(ModuleRegistry.getUncompiledNonTechnicalModules())
)
}
if not Options.shallMakeModule():
options["result_exe"] = getResultFullpath(main_module)
# Ask Scons to cache on Windows, except where the directory is thrown
# away. On non-Windows you can should use ccache instead.
if not Options.isRemoveBuildDir() and Utils.getOS() == "Windows":
options["cache_mode"] = "true"
if Options.isLto():
options["lto_mode"] = "true"
if Options.shallDisableConsoleWindow():
options["win_disable_console"] = "true"
if Options.isStandaloneMode():
options["standalone_mode"] = "true"
if not Options.isStandaloneMode() and \
not Options.shallMakeModule() and \
isUninstalledPython():
options["uninstalled_python"] = "true"
if ModuleRegistry.getUncompiledTechnicalModules():
options["frozen_modules"] = str(
len(ModuleRegistry.getUncompiledTechnicalModules())
)
if Options.isShowScons():
options["show_scons"] = "true"
if Options.isMingw64():
options["mingw_mode"] = "true"
if Options.getMsvcVersion():
msvc_version = Options.getMsvcVersion()
msvc_version = msvc_version.replace("exp", "Exp")
if '.' not in msvc_version:
msvc_version += ".0"
options["msvc_version"] = msvc_version
if Options.isClang():
options["clang_mode"] = "true"
if Options.getIconPath():
options["icon_path"] = Options.getIconPath()
if Options.isProfile():
options["profile_mode"] = "true"
if "no_warnings" in getPythonFlags():
options["no_python_warnings"] = "true"
if python_version < 300 and sys.flags.py3k_warning:
options["python_sysflag_py3k_warning"] = "true"
if python_version < 300 and (sys.flags.division_warning or sys.flags.py3k_warning):
options["python_sysflag_division_warning"] = "true"
if sys.flags.bytes_warning:
options["python_sysflag_bytes_warning"] = "true"
if int(os.environ.get("NUITKA_SITE_FLAG", "no_site" in Options.getPythonFlags())):
options["python_sysflag_no_site"] = "true"
if "trace_imports" in Options.getPythonFlags():
options["python_sysflag_verbose"] = "true"
if python_version < 300 and sys.flags.unicode:
options["python_sysflag_unicode"] = "true"
if python_version >= 370 and sys.flags.utf8_mode:
options["python_sysflag_utf8"] = "true"
abiflags = getPythonABI()
if abiflags:
options["abiflags"] = abiflags
return SconsInterface.runScons(options, quiet), options
def writeSourceCode(filename, source_code):
# Prevent accidental overwriting. When this happens the collision detection
# or something else has failed.
assert not os.path.isfile(filename), filename
if python_version >= 300:
with open(filename, "wb") as output_file:
output_file.write(source_code.encode("latin1"))
else:
with open(filename, 'w') as output_file:
output_file.write(source_code)
def writeBinaryData(filename, binary_data):
# Prevent accidental overwriting. When this happens the collision detection
# or something else has failed.
assert not os.path.isfile(filename), filename
assert type(binary_data) is bytes
with open(filename, "wb") as output_file:
output_file.write(binary_data)
def callExecPython(args, clean_path, add_path):
old_python_path = os.environ.get("PYTHONPATH", None)
if clean_path and old_python_path is not None:
os.environ["PYTHONPATH"] = ""
if add_path:
if "PYTHONPATH" in os.environ:
os.environ["PYTHONPATH"] += ':' + Options.getOutputDir()
else:
os.environ["PYTHONPATH"] = Options.getOutputDir()
# We better flush these, "os.execl" won't do it anymore.
sys.stdout.flush()
sys.stderr.flush()
# Add the main arguments, previous separated.
args += Options.getPositionalArgs()[1:] + Options.getMainArgs()
Execution.callExec(args)
def executeMain(binary_filename, clean_path):
args = (binary_filename, binary_filename)
if Options.shallRunInDebugger():
gdb_path = Execution.getExecutablePath("gdb")
if gdb_path is None:
sys.exit("Error, no 'gdb' binary found in path.")
args = (gdb_path, "gdb", "-ex=run", "-ex=where", "--args", binary_filename)
callExecPython(
clean_path = clean_path,
add_path = False,
args = args
)
def executeModule(tree, clean_path):
python_command = "__import__('%s')" % tree.getName()
args = (
sys.executable,
"python",
"-c",
python_command,
)
callExecPython(
clean_path = clean_path,
add_path = True,
args = args
)
def compileTree(main_module):
source_dir = getSourceDirectoryPath(main_module)
if not Options.shallOnlyExecCCompilerCall():
# Now build the target language code for the whole tree.
makeSourceDirectory(
main_module = main_module
)
frozen_code = generateBytecodeFrozenCode()
if frozen_code is not None:
writeSourceCode(
filename = os.path.join(
source_dir,
"__frozen.c"
),
source_code = frozen_code
)
writeBinaryData(
filename = os.path.join(
source_dir,
"__constants.bin"
),
binary_data = ConstantCodes.stream_data.getBytes()
)
else:
source_dir = getSourceDirectoryPath(main_module)
if not os.path.isfile(os.path.join(source_dir, "__helpers.h")):
sys.exit("Error, no previous build directory exists.")
if Options.isShowProgress() or Options.isShowMemory():
info(
"Total memory usage before running scons: {memory}:".format(
memory = MemoryUsage.getHumanReadableProcessMemoryUsage()
)
)
if Options.isShowMemory():
InstanceCounters.printStats()
if Options.isDebug():
Reports.doMissingOptimizationReport()
if Options.shallNotDoExecCCompilerCall():
return True, {}
# Run the Scons to build things.
result, options = runScons(
main_module = main_module,
quiet = not Options.isShowScons()
)
return result, options
def handleSyntaxError(e):
# Syntax or indentation errors, output them to the user and abort. If
# we are not in full compat, and user has not specified the Python
# versions he wants, tell him about the potential version problem.
error_message = SyntaxErrors.formatOutput(e)
if not Options.isFullCompat():
if python_version < 300:
suggested_python_version_str = getSupportedPythonVersions()[-1]
else:
suggested_python_version_str = "2.7"
error_message += """
Nuitka is very syntax compatible with standard Python. It is currently running
with Python version '%s', you might want to specify more clearly with the use
of the precise Python interpreter binary and '-m nuitka', e.g. use this
'python%s -m nuitka' option, if that's not the one the program expects.
""" % (python_version_str, suggested_python_version_str)
sys.exit(error_message)
data_files = []
def main():
""" Main program flow of Nuitka
At this point, options will be parsed already, Nuitka will be executing
in the desired version of Python with desired flags, and we just get
to execute the task assigned.
We might be asked to only re-compile generated C, dump only an XML
representation of the internal node tree after optimization, etc.
"""
# Main has to fulfill many options, leading to many branches and statements
# to deal with them. pylint: disable=too-many-branches
filename = Options.getPositionalArgs()[0]
# Inform the importing layer about the main script directory, so it can use
# it when attempting to follow imports.
Importing.setMainScriptDirectory(
main_dir = os.path.dirname(os.path.abspath(filename))
)
# Detect to be frozen modules if any, so we can consider to not recurse
# to them.
if Options.isStandaloneMode():
for module in detectEarlyImports():
ModuleRegistry.addUncompiledModule(module)
if module.getName() == "site":
origin_prefix_filename = os.path.join(
os.path.dirname(module.getCompileTimeFilename()),
"orig-prefix.txt"
)
if os.path.isfile(origin_prefix_filename):
data_files.append(
(filename, "orig-prefix.txt")
)
# Turn that source code into a node tree structure.
try:
main_module = createNodeTree(
filename = filename
)
except (SyntaxError, IndentationError) as e:
handleSyntaxError(e)
if Options.shallDumpBuiltTreeXML():
# XML output only.
for module in ModuleRegistry.getDoneModules():
dumpTreeXML(module)
else:
# Make the actual compilation.
result, options = compileTree(
main_module = main_module
)
# Exit if compilation failed.
if not result:
sys.exit(1)
if Options.shallNotDoExecCCompilerCall():
if Options.isShowMemory():
MemoryUsage.showMemoryTrace()
sys.exit(0)
if Options.isStandaloneMode():
binary_filename = options["result_exe"]
standalone_entry_points.insert(
0,
(binary_filename, binary_filename, None)
)
dist_dir = getStandaloneDirectoryPath(main_module)
for module in ModuleRegistry.getDoneUserModules():
standalone_entry_points.extend(
Plugins.considerExtraDlls(dist_dir, module)
)
for module in ModuleRegistry.getUncompiledModules():
standalone_entry_points.extend(
Plugins.considerExtraDlls(dist_dir, module)
)
copyUsedDLLs(
source_dir = getSourceDirectoryPath(main_module),
dist_dir = dist_dir,
standalone_entry_points = standalone_entry_points
)
for module in ModuleRegistry.getDoneModules():
data_files.extend(
Plugins.considerDataFiles(module)
)
for source_filename, target_filename in data_files:
target_filename = os.path.join(
getStandaloneDirectoryPath(main_module),
target_filename
)
makePath(os.path.dirname(target_filename))
shutil.copy2(
source_filename,
target_filename
)
# Remove the source directory (now build directory too) if asked to.
if Options.isRemoveBuildDir():
removeDirectory(
path = getSourceDirectoryPath(main_module),
ignore_errors = False
)
# Modules should not be executable, but Scons creates them like it, fix
# it up here. TODO: Move inside scons file and avoid subprocess call.
if Utils.getOS() != "Windows" and Options.shallMakeModule():
subprocess.call(
(
"chmod",
"-x",
getResultFullpath(main_module)
)
)
if Options.shallMakeModule() and Options.shallCreatePyiFile():
pyi_filename = getResultBasepath(main_module) + ".pyi"
with open(pyi_filename, 'w') as pyi_file:
pyi_file.write(
"""\
# This file was generated by Nuitka and describes the types of the
# created shared library.
# At this time it lists only the imports made and can be used by the
# tools that bundle libraries, including Nuitka itself. For instance
# standalone mode usage of the created library will need it.
# In the future, this will also contain type information for values
# in the module, so IDEs will use this. Therefore please include it
# when you make software releases of the extension module that it
# describes.
%(imports)s
# This is not Python source even if it looks so. Make it clear for
# now. This was decided by PEP 484 designers.
__name__ = ...
""" % {
"imports" : '\n'.join(
"import %s" % module_name
for module_name in
getImportedNames()
)
}
)
# Execute the module immediately if option was given.
if Options.shallExecuteImmediately():
if Options.shallMakeModule():
executeModule(
tree = main_module,
clean_path = Options.shallClearPythonPathEnvironment()
)
else:
executeMain(
binary_filename = getResultFullpath(main_module),
clean_path = Options.shallClearPythonPathEnvironment()
)
| 32.497849
| 87
| 0.625881
|
75fd85527ec7798b7a07ac20d1a5b91cbfab1b46
| 14,090
|
py
|
Python
|
NLI_task/roberta_large_nli_LF.py
|
jizhi-zhang/Counterfactual_Reasoning_Model
|
3c4eb3e022e66e8626facc6fc772141a0079b807
|
[
"MIT"
] | 7
|
2021-07-21T07:11:56.000Z
|
2022-01-10T13:01:14.000Z
|
NLI_task/roberta_large_nli_LF.py
|
jizhi-zhang/Counterfactual_Reasoning_Model
|
3c4eb3e022e66e8626facc6fc772141a0079b807
|
[
"MIT"
] | 1
|
2021-07-28T08:58:49.000Z
|
2021-08-02T08:04:03.000Z
|
NLI_task/roberta_large_nli_LF.py
|
jizhi-zhang/Counterfactual_Reasoning_Model
|
3c4eb3e022e66e8626facc6fc772141a0079b807
|
[
"MIT"
] | 1
|
2021-07-30T13:52:52.000Z
|
2021-07-30T13:52:52.000Z
|
from transformers import AutoTokenizer
# from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
#from pytorch_pretrained_bert import BertTokenizer, BertModel
from transformers import BertModel,BertTokenizer, BertForSequenceClassification, AdamW, get_linear_schedule_with_warmup
import argparse
import numpy as np
import sys
import torch.optim as optim
from torch import nn
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from collections import namedtuple
from tqdm import tqdm
import os
import torch.nn as nn
import matplotlib.pyplot as plt
import random
import copy
parser = argparse.ArgumentParser()
parser.add_argument("--device", type= int, default= 0)
parser.add_argument("--train_file", type =str, default= "./dataset/NLI/all_combined/train.tsv")
parser.add_argument("--val_file", type =str, default= "./dataset/NLI/all_combined/dev.tsv")
parser.add_argument("--test_file", type=str, default= "./dataset/NLI/all_combined/test.tsv")
parser.add_argument("--orig_train_file", type =str, default= "./dataset/NLI/original/train.tsv")
parser.add_argument("--orig_val_file", type =str, default= "./dataset/NLI/original/dev.tsv")
parser.add_argument("--orig_test_file", type=str, default= "./dataset/NLI/original/test.tsv")
parser.add_argument("--revised_train_file", type =str, default= "./dataset/NLI/revised_combined/train.tsv")
parser.add_argument("--revised_val_file", type =str, default= "./dataset/NLI/revised_combined/dev.tsv")
parser.add_argument("--revised_test_file", type=str, default= "./dataset/NLI/revised_combined/test.tsv")
parser.add_argument("--lr", type=float, default= 1e-3)
parser.add_argument("--batchsize", type=int , default= 8)
parser.add_argument("--epochs", type=int , default= 20)
parser.add_argument("--run_seed", type = int, default= 4)
parser.add_argument("--save_folder", type=str, default ="./NLI_tasks/roberta_large_nli_EF/")
parser.add_argument("--log_name", type= str, default= "cf_inference_out.log")
parser.add_argument("--plot_name", type = str, default= "result_plot.jpg")
parser.add_argument("--cf_model_folder", type = str, default="./NLI_tasks/roberta_large_nli_cf/")
args = parser.parse_args()
device = torch.device("cuda:"+str(args.device))
tokenizer = AutoTokenizer.from_pretrained("roberta-large-mnli")
class cf_conv_linear_net (nn.Module):
def __init__(self, hidde_channels):
super().__init__()
self.conv1 = nn.Conv2d(1, hidde_channels, (3,1))
self.fc = nn.Linear(hidde_channels * 3, 3)
def forward(self, x):
out = torch.flatten(self.conv1(x), start_dim= 1)
return self.fc(out)
def get_label(text):
if text == "neutral":
return 1
elif text == "contradiction":
return 0
elif text == "entailment":
return 2
def create_batch_with_delta_cf(orig_data, cf_data, batchsize, model, tokenizer):
model.eval()
with torch.no_grad():
count = 0
batch_list = []
data_indexs = [i for i in range(len(orig_data))]
random.shuffle(data_indexs)
for index in tqdm(data_indexs):
if count == 0:
label =torch.stack([torch.tensor(get_label(orig_data["gold_label"][index]))])
sent_list = [(orig_data["sentence1"][index],orig_data['sentence2'][index])]
for i in range(4*index, 4 * index + 4):
sent_list.append((cf_data["sentence1"][i],cf_data['sentence2'][i]))
delta_embed, output =calc_cf_sent_list(sent_list, model, tokenizer)
delta_embed_list = [delta_embed]
output = torch.cat(output)
output_list = [output]
count = count + 1
else:
# label =torch.stack([torch.tensor(get_label(orig_data["gold_label"][index]))])
sent_list = [(orig_data["sentence1"][index],orig_data['sentence2'][index])]
for i in range(4*index, 4 * index + 4):
sent_list.append((cf_data["sentence1"][i],cf_data['sentence2'][i]))
delta_embed, output =calc_cf_sent_list(sent_list, model, tokenizer)
delta_embed_list.append(delta_embed)
output = torch.cat(output)
output_list.append(output)
label = torch.cat([label, torch.stack([torch.tensor(get_label(orig_data["gold_label"][index]))])])
count = count + 1
if count == batchsize:
count = 0
# embed_list = torch.stack([torch.stack([j]) for j in embed_list])
batch_list.append((label, delta_embed_list, output_list))
if count != 0:
# embed_list = torch.stack([torch.stack([j]) for j in embed_list])
batch_list.append((label, delta_embed_list, output_list))
return batch_list
def calc_cf_sent_list(sent_list, model, tokenizer):
model.eval()
with torch.no_grad():
real_out = model(**tokenizer(sent_list[:1], padding=True, truncation=True, max_length=512, return_tensors='pt' ).to(device)).logits.detach()
cf_out = model(**tokenizer(sent_list[1:5], padding=True, truncation=True, max_length=512, return_tensors='pt').to(device)).logits.detach()
delta_embed = model.roberta(**tokenizer(sent_list[1:5], padding=True, truncation=True, max_length=512, return_tensors='pt').to(device)).last_hidden_state.detach()[:,:1,:]\
- torch.cat([model.roberta(**tokenizer(sent_list[:1], padding=True, truncation=True, max_length=512, return_tensors='pt').to(device)).last_hidden_state.detach()[:,:1,:] for k in range(4)])
# delta_out = model.classifier(delta_embed).detach()
return delta_embed, [cf_out, real_out]
def isNan_2(a):
return a != a
def mk_dir(path):
try:
os.mkdir(path)
except:
pass
train_data = pd.read_csv(args.train_file, sep= "\t")
val_data = pd.read_csv(args.val_file, sep ="\t")
test_data = pd.read_csv(args.test_file, sep = "\t")
orig_train_data = pd.read_csv(args.orig_train_file, sep= "\t")
orig_val_data = pd.read_csv(args.orig_val_file, sep ="\t")
orig_test_data = pd.read_csv(args.orig_test_file, sep = "\t")
revised_train_data = pd.read_csv(args.revised_train_file, sep= "\t")
revised_val_data = pd.read_csv(args.revised_val_file, sep ="\t")
revised_test_data = pd.read_csv(args.revised_test_file, sep = "\t")
def model_test(batch_train, classifier, cf_net):
cf_net = cf_net.eval()
classifier = classifier.eval()
correct=0
total=0
with torch.no_grad():
for index in tqdm(range(len(batch_train))):
label = batch_train[index][0].to(device)
# encoder = tokenizer(batch_train[index][1], padding=True, truncation=True, max_length=512, return_tensors='pt' )
out= classifier(torch.cat(batch_train[index][1])).view(len(label),4,3).mean(1)
output = cf_net(torch.cat([torch.stack(batch_train[index][2]).view(len(label),2,3),out.view(len(label),1,3)], dim=1).view(len(label),1,3,3))
# output = out_net(output)
_,predict = torch.max(output,1)
total+=label.size(0)
correct += (predict == label).sum().item()
return 100 * correct/total
def model_test_for_option2(batch_train, classifier, cf_net):
classifier = classifier.eval()
cf_net = cf_net.eval()
total = 0
correct = 0
with torch.no_grad():
for index in tqdm(range(len(batch_train))):
label = batch_train[index][0].to(device)
delta_classifier_out = classifier(torch.cat(batch_train[index][1])).view(len(label),4,3)
for k in range(len(batch_train[index][2])):
label_temp = label[k].view(1)
real_classifier1_out = batch_train[index][2][k][-1]
cf_classifier1_out = batch_train[index][2][k][0:4]
delta_classifier_out_temp = delta_classifier_out[k]
for m in range(4):
input_for_conv_network = torch.cat([real_classifier1_out,cf_classifier1_out[m],delta_classifier_out_temp[m]]).view(1,1,3,3)
if m==0:
conv_network_out = cf_net(input_for_conv_network)
else:
conv_network_out += cf_net(input_for_conv_network)
conv_network_out = conv_network_out/4
total += 1
_,predict = torch.max(conv_network_out,1)
if predict == label_temp:
correct += 1
return correct/total * 100
def shuffle_from_bs_1(batch_train_bs_1, batchsize):
batch_train_bs = copy.deepcopy(batch_train_bs_1)
count = 0
batch_list = []
index_list = [i for i in range(len(batch_train_bs))]
random.shuffle(index_list)
for index in index_list:
item = batch_train_bs[index]
if count == 0:
label_1 = item[0]
delta_1 = item[1]
out_1 = item[2]
count += 1
else:
label_1 = torch.cat([label_1, item[0]])
delta_1 += item[1]
out_1 += item[2]
count += 1
if count >= batchsize:
batch_list.append((label_1, delta_1, out_1))
count = 0
if count != 0:
batch_list.append((label_1, delta_1, out_1))
return batch_list
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed= args.run_seed
max_val_acc = 0
final_test_acc = 0
setup_seed(seed)
model = torch.load(args.cf_model_folder +str(seed) + "/roberta-large-mnli.pt", map_location= device)
batch_train_bs_1 = create_batch_with_delta_cf(orig_train_data, revised_train_data, 1, model, tokenizer)
batch_val = create_batch_with_delta_cf(orig_val_data, revised_val_data, args.batchsize, model, tokenizer)
batch_test = create_batch_with_delta_cf(orig_test_data, revised_test_data, args.batchsize, model, tokenizer)
classifier = copy.deepcopy(model.classifier).to(device)
cf_net = cf_conv_linear_net(10).to(device)
optimizer = optim.Adam([{"params":cf_net.parameters(),"lr":args.lr},
{"params":classifier.parameters(),"lr":args.lr}])
Loss = nn.CrossEntropyLoss()
acc_train_list = []
acc_val_list = []
acc_test_list = []
mk_dir(args.save_folder)
final_saving_folder = args.save_folder + "/" + str(seed) + "/"
mk_dir(final_saving_folder)
for i in range(0, args.epochs):
print("epoch:" + str(i))
loss_total = 0
batch_train = shuffle_from_bs_1(batch_train_bs_1, args.batchsize)
with open(final_saving_folder + "/" + args.log_name,"a+") as f:
if i == 0:
f.write("settings:\n")
f.write("lr:" + str(args.lr) + "\n")
f.write("net_struc:" + "\n")
print(cf_net, file=f)
print(classifier, file=f)
cf_net = cf_net.train()
classifier = classifier.train()
for index in tqdm(range(len(batch_train))):
loss = 0
# encoder = tokenizer(batch_train[index][1], padding=True, truncation=True, max_length=512, return_tensors='pt' )
label = batch_train[index][0].to(device)
delta_classifier_out = classifier(torch.cat(batch_train[index][1])).view(len(label),4,3)
for k in range(len(batch_train[index][2])):
label_temp = label[k]
real_classifier1_out = batch_train[index][2][k][-1]
cf_classifier1_out = batch_train[index][2][k][0:4]
delta_classifier_out_temp = delta_classifier_out[k]
for m in range(4):
input_for_conv_network = torch.cat([real_classifier1_out,cf_classifier1_out[m],delta_classifier_out_temp[m]]).view(1,1,3,3)
if m==0:
conv_network_out = cf_net(input_for_conv_network)
else:
conv_network_out += cf_net(input_for_conv_network)
conv_network_out = conv_network_out/4
loss += Loss(conv_network_out, label_temp.view(1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_total += loss.item()
print(loss_total/len(batch_train))
# batch_train = create_batch(train_data, 64)
acc1 = model_test_for_option2(batch_train, classifier, cf_net)
acc2 = model_test_for_option2(batch_val, classifier, cf_net)
acc3 = model_test_for_option2(batch_test, classifier, cf_net)
acc_train_list.append(acc1)
acc_val_list.append(acc2)
acc_test_list.append(acc3)
# acc4 = model_test(orig_batch_val, model)
# acc5 = model_test(orig_batch_test, model)
# print(loss_total/len(batch_train))
print(acc1, acc2, acc3)
# torch.save(model, args.save_folder + "roberta-large-mnli" + "save_epoch_" + str(i) + ".pt")
with open(final_saving_folder + "/" + args.log_name,"a+") as f:
if i == 0:
f.write("settings:\n")
f.write("net_struc:" + "\n")
print(cf_net, file=f)
f.write("epoch:" + str(i) + " train_acc:" + str(acc1) + " val_acc:" + str(acc2) + " test_acc:" + str(acc3) + "\n")
if acc2 > max_val_acc:
max_val_acc = acc2
final_test_acc = acc3
torch.save(classifier, final_saving_folder + "/classifier.pt")
torch.save(cf_net, final_saving_folder + "/cf_net.pt")
with open(args.save_folder + "/final_acc", "a+") as f:
f.write("random seed:" + str(seed) + "max_val_acc: " + str(max_val_acc) + " final_test_acc: " + str(final_test_acc) + "\n")
x = [i for i in range(len(acc_train_list))]
p1 = plt.plot(x, acc_train_list, "b", marker = "o", label = "train")
p2 = plt.plot(x, acc_val_list, "g", marker = "v", label = "val")
p3 = plt.plot(x, acc_test_list, "y", marker = "^", label = "test")
plt.xlabel("epochs")
plt.ylabel("acc")
plt.title("cf_net result")
# plt.legend([p1,p2,p3], ["train", "val", "test"])
# plt.title("cf_net result")
plt.legend(labels = ["train", "val", "test"])
plt.savefig(final_saving_folder + args.plot_name)
plt.cla()
# end
| 45.305466
| 200
| 0.650887
|
5e65d9ee6f7ad6cab9622de3cff18b6627565a78
| 2,240
|
py
|
Python
|
conans/test/functional/old/user_info_test.py
|
noverby/conan
|
5e560ce806be28416e80544e767b1bca3f48d11e
|
[
"MIT"
] | 1
|
2019-01-09T14:14:58.000Z
|
2019-01-09T14:14:58.000Z
|
conans/test/functional/old/user_info_test.py
|
noverby/conan
|
5e560ce806be28416e80544e767b1bca3f48d11e
|
[
"MIT"
] | null | null | null |
conans/test/functional/old/user_info_test.py
|
noverby/conan
|
5e560ce806be28416e80544e767b1bca3f48d11e
|
[
"MIT"
] | null | null | null |
import os
import unittest
from conans.paths import CONANFILE
from conans.test.utils.tools import TestClient
from conans.util.files import load
class UserInfoTest(unittest.TestCase):
def test_user_info_propagation(self):
client = TestClient()
def export_lib(name, requires, infolines):
base = '''
import os
from conans import ConanFile
class MyConanfile(ConanFile):
name = "%s"
version = "0.1"
requires = "%s"
def build(self):
pass
def package_info(self):
%s
'''
client.save({CONANFILE: base % (name, requires, infolines)}, clean_first=True)
client.run("export . lasote/stable")
export_lib("LIB_A", "", "self.user_info.VAR1=2")
export_lib("LIB_B", "LIB_A/0.1@lasote/stable", "self.user_info.VAR1=2\n "
"self.user_info.VAR2=3")
export_lib("LIB_C", "LIB_B/0.1@lasote/stable", "self.user_info.VAR1=2")
export_lib("LIB_D", "LIB_C/0.1@lasote/stable", "self.user_info.var1=2")
reuse = '''
import os
from conans import ConanFile
class MyConanfile(ConanFile):
name = "reuse"
version = "0.1"
requires = "LIB_D/0.1@lasote/stable"
def build(self):
assert(self.deps_user_info["LIB_A"].VAR1=="2")
assert(self.deps_user_info["LIB_B"].VAR1=="2")
assert(self.deps_user_info["LIB_B"].VAR2=="3")
assert(self.deps_user_info["LIB_C"].VAR1=="2")
assert(self.deps_user_info["LIB_D"].var1=="2")
'''
client.save({CONANFILE: reuse}, clean_first=True)
client.run("export . lasote/stable")
client.run('install reuse/0.1@lasote/stable --build -g txt')
# Assert generator TXT
txt_contents = client.load("conanbuildinfo.txt")
self.assertIn("[USER_LIB_A]%sVAR1=2" % os.linesep, txt_contents)
self.assertIn("[USER_LIB_B]%sVAR1=2%sVAR2=3" % (os.linesep, os.linesep), txt_contents)
self.assertIn("[USER_LIB_C]%sVAR1=2" % os.linesep, txt_contents)
self.assertIn("[USER_LIB_D]%svar1=2" % os.linesep, txt_contents)
# Now try local command with a consumer
client.run('install . --build')
client.run("build .")
| 32
| 94
| 0.61875
|
b4cf8733fafbdf11e19d7a3997e47b55865facc5
| 6,080
|
py
|
Python
|
pygame_gui/core/ui_container.py
|
halfninja/pygame_gui
|
71b1150cb0c789339a9f8d781da15bdfad604f6c
|
[
"MIT"
] | null | null | null |
pygame_gui/core/ui_container.py
|
halfninja/pygame_gui
|
71b1150cb0c789339a9f8d781da15bdfad604f6c
|
[
"MIT"
] | null | null | null |
pygame_gui/core/ui_container.py
|
halfninja/pygame_gui
|
71b1150cb0c789339a9f8d781da15bdfad604f6c
|
[
"MIT"
] | null | null | null |
import pygame
from pygame_gui.core.ui_element import UIElement
class UIContainer(UIElement):
"""
A UI Container holds any number of other UI elements inside of a rectangle. When we move the UIContainer
all the UI elements contained within it can be moved as well.
This class helps us make UI Windows, but likely will have wider uses as well as the GUI system develops.
:param relative_rect: A pygame.Rect whose position is relative to whatever UIContainer it is inside of, if any.
:param manager: The UIManager that manages this UIElement.
:param container: The UIContainer that this UIElement is contained within.
:param parent_element: The element this element 'belongs to' in the theming hierarchy.
:param object_id: A custom defined ID for fine tuning of theming.
"""
def __init__(self, relative_rect, manager,
container=None, parent_element=None, object_id=None):
self._layer = 0
self.ui_manager = manager
new_element_ids, new_object_ids = self.create_valid_ids(parent_element=parent_element,
object_id=object_id,
element_id='container')
super().__init__(relative_rect, manager, container,
object_ids=new_object_ids,
element_ids=new_element_ids,
starting_height=1,
layer_thickness=1)
self.sprite_group = self.ui_manager.get_sprite_group()
self.image = pygame.Surface((0, 0))
self.elements = []
self.layer_thickness = 1
self.hovered = False
def add_element(self, element):
"""
Add a UIElement to the container. The UI's relative_rect parameter will be relative to this container.
:param element: A UIElement to add to this container.
"""
element.change_layer(self._layer + element.starting_height)
self.elements.append(element)
self.recalculate_container_layer_thickness()
def remove_element(self, element):
"""
Remove a UIElement from this container.
:param element: A UIElement to remove from this container.
"""
if element in self.elements:
self.elements.remove(element)
self.recalculate_container_layer_thickness()
def recalculate_container_layer_thickness(self):
"""
This function will iterate through the elements in our container and determine the maximum 'height'
that they reach in the 'layer stack'. We then use that to determine the overall 'thickness' of this container.
The thickness value is used to determine where to place overlapping windows in the layers
"""
max_element_top_layer = 0
for element in self.elements:
if element.top_layer > max_element_top_layer:
max_element_top_layer = element.top_layer
self.layer_thickness = max_element_top_layer - self._layer
def change_container_layer(self, new_layer):
"""
Change the layer of this container. Layers are used by the GUI to control the order in which things are drawn
and which things should currently be interactive (so you can't interact with things behind other things).
This particular method is most often used to shift the visible contents of a window in front of any others when
it is moved to the front of the window stack.
:param new_layer: The layer to move our container to.
"""
if new_layer != self._layer:
self._layer = new_layer
self.sprite_group.change_layer(self, self._layer)
for element in self.elements:
element.change_layer(self._layer + element.starting_height)
def update_containing_rect_position(self):
"""
This function is called when we move the container to update all the contained UI Elements to move as well.
"""
super().update_containing_rect_position()
for element in self.elements:
element.update_containing_rect_position()
def get_top_layer(self):
"""
Assuming we have correctly calculated the 'thickness' of this container, this method will return the 'highest'
layer in the LayeredUpdates UI Group.
:return int: An integer representing the current highest layer being used by this container.
"""
return self._layer + self.layer_thickness
def kill(self):
"""
Overrides the standard kill method of UI Elements (and pygame sprites beyond that) to also call the kill method
on all contained UI Elements.
"""
self.clear()
super().kill()
def clear(self):
"""
Removes and kills all the UI elements inside this container.
"""
while len(self.elements) > 0:
self.elements.pop().kill()
# noinspection PyUnusedLocal
def check_hover(self, time_delta, hovered_higher_element):
"""
A method that helps us to determine which, if any, UI Element is currently being hovered by the mouse.
:param time_delta: A float, the time in seconds between the last call to this function and now (roughly).
:param hovered_higher_element: A boolean, representing whether we have already hovered a 'higher' element.
:return bool: A boolean that is true if we have hovered a UI element, either just now or before this method.
"""
if self.alive():
mouse_x, mouse_y = pygame.mouse.get_pos()
if self.rect.collidepoint(mouse_x, mouse_y) and not hovered_higher_element:
if not self.hovered:
self.hovered = True
hovered_higher_element = True
else:
if self.hovered:
self.hovered = False
elif self.hovered:
self.hovered = False
return hovered_higher_element
| 41.360544
| 119
| 0.647204
|
2e3b0c91ea33c0cfebfc04c6f62c3f693a887a63
| 399
|
py
|
Python
|
tests/python/pants_test/projects/test_python_testprojects_integration.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/projects/test_python_testprojects_integration.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | 1
|
2020-01-21T16:34:02.000Z
|
2020-01-21T16:34:02.000Z
|
tests/python/pants_test/projects/test_python_testprojects_integration.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants_test.projects.projects_test_base import ProjectsTestBase
class TestPythonTestprojectsIntegration(ProjectsTestBase):
def test_python_testprojects(self) -> None:
self.assert_valid_projects("testprojects/src/python::", "testprojects/tests/python::")
| 36.272727
| 90
| 0.807018
|
f006cbf907ac4349fa410e629f50e2b482ba6b80
| 1,911
|
py
|
Python
|
app/game/game_repository.py
|
hmajid2301/banter-bus-management-api
|
d51a40c2d5254d4197cbe5bb84aa576df2c24893
|
[
"Apache-2.0"
] | null | null | null |
app/game/game_repository.py
|
hmajid2301/banter-bus-management-api
|
d51a40c2d5254d4197cbe5bb84aa576df2c24893
|
[
"Apache-2.0"
] | null | null | null |
app/game/game_repository.py
|
hmajid2301/banter-bus-management-api
|
d51a40c2d5254d4197cbe5bb84aa576df2c24893
|
[
"Apache-2.0"
] | null | null | null |
import abc
from typing import List
from omnibus.database.repository import AbstractRepository
from pymongo.errors import DuplicateKeyError
from app.game.game_exceptions import GameExistsException, GameNotFound
from app.game.game_models import Game
class AbstractGameRepository(AbstractRepository[Game]):
@abc.abstractmethod
async def update_enable_status(self, game_name: str, enabled: bool) -> Game:
raise NotImplementedError
@abc.abstractmethod
async def get_all_game_names(self, enabled: bool = None) -> List[str]:
raise NotImplementedError
class GameRepository(AbstractGameRepository):
@staticmethod
async def add(game: Game):
try:
await Game.insert(game)
except DuplicateKeyError:
raise GameExistsException(f"game {game.name=} already exists")
@staticmethod
async def get(game_name: str) -> Game:
game = await Game.find_one(Game.name == game_name)
if not game:
raise GameNotFound(game_name=game_name)
return game
async def remove(self, game_name: str):
game = await self.get(game_name=game_name)
await game.delete()
async def get_all_game_names(self, enabled: bool = None) -> List[str]:
if enabled is not None:
games = await Game.find(Game.enabled == enabled).to_list()
else:
games = await Game.find().to_list()
return self._get_game_names(games)
# TODO: use projection https://roman-right.github.io/beanie/tutorial/finding-documents/
@staticmethod
def _get_game_names(games: List[Game]):
game_names: List[str] = [game.name for game in games]
return game_names
async def update_enable_status(self, game_name: str, enabled: bool) -> Game:
game = await self.get(game_name=game_name)
game.enabled = enabled
await game.save()
return game
| 32.948276
| 91
| 0.685505
|
6f278f2ea0df76471a5f4d7877ff42e07a5cdf52
| 7,109
|
py
|
Python
|
src/python/photosphere_parallax_pyopenvr.py
|
cmbruns/vr_samples
|
8dee056766bccca1a602c6dd58fd0a641c5033a5
|
[
"MIT"
] | 1
|
2017-01-29T21:15:23.000Z
|
2017-01-29T21:15:23.000Z
|
src/python/photosphere_parallax_pyopenvr.py
|
cmbruns/vr_samples
|
8dee056766bccca1a602c6dd58fd0a641c5033a5
|
[
"MIT"
] | 2
|
2017-01-29T20:34:39.000Z
|
2017-01-29T23:26:05.000Z
|
src/python/photosphere_parallax_pyopenvr.py
|
cmbruns/vr_samples
|
8dee056766bccca1a602c6dd58fd0a641c5033a5
|
[
"MIT"
] | null | null | null |
#!/bin/env python
# Example program for viewing a 360 photosphere in a virtual reality headset
# using parallax shifting to place the ground plane on the floor
import os
from textwrap import dedent
import numpy
from OpenGL import GL
from OpenGL.GL.shaders import compileShader, compileProgram
try:
from PIL import Image
except ImportError:
import Image
from openvr.glframework.glfw_app import GlfwApp
from openvr.gl_renderer import OpenVrGlRenderer
class SphericalPanorama(object):
def __init__(self, image):
self.image = image
self.shader = None
self.vao = None
self.texture_handle = None
def init_gl(self):
self.vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self.vao)
# Set up photosphere image texture for OpenGL
self.texture_handle = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture_handle);
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR);
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR);
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT);
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_MIRRORED_REPEAT);
GL.glTexImage2D(GL.GL_TEXTURE_2D,
0,
GL.GL_RGB8,
self.image.shape[1], # width
self.image.shape[0], # height
0,
GL.GL_RGB,
GL.GL_UNSIGNED_BYTE,
self.image);
GL.glBindTexture(GL.GL_TEXTURE_2D, 0);
# Set up shaders for rendering
vertex_shader = compileShader(dedent(
"""#version 450 core
#line 52
layout(location = 1) uniform mat4 projection = mat4(1);
layout(location = 2) uniform mat4 model_view = mat4(1);
out vec3 viewDir;
out vec3 camPos;
// projected screen quad
const vec4 SCREEN_QUAD[4] = vec4[4](
vec4(-1, -1, 1, 1),
vec4( 1, -1, 1, 1),
vec4( 1, 1, 1, 1),
vec4(-1, 1, 1, 1));
const int TRIANGLE_STRIP_INDICES[4] = int[4](
0, 1, 3, 2);
void main()
{
int vertexIndex = TRIANGLE_STRIP_INDICES[gl_VertexID];
gl_Position = vec4(SCREEN_QUAD[vertexIndex]);
mat4 xyzFromNdc = inverse(projection * model_view);
vec4 campos4 = xyzFromNdc * vec4(0, 0, 0, 1);
vec4 vpos = xyzFromNdc * SCREEN_QUAD[vertexIndex];
camPos = campos4.xyz /campos4.w;
viewDir = vpos.xyz/vpos.w - camPos;
}
"""),
GL.GL_VERTEX_SHADER)
fragment_shader = compileShader(dedent(
"""#version 450 core
#line 84
const vec3 original_cam_pos = vec3(0, 2.0, 0);
const float floor_level = -0.40;
const vec4 ground_plane = vec4(0, 1, 0, -floor_level);
layout(binding = 0) uniform sampler2D image;
in vec3 viewDir;
in vec3 camPos;
out vec4 pixelColor;
const float PI = 3.1415926535897932384626433832795;
// this function abstracts away equirectangular vs cubemap fetch
vec4 color_for_original_direction(in vec3 d) {
float longitude = 0.5 * atan(d.z, d.x) / PI + 0.5; // range [0-1]
float r = length(d.xz);
float latitude = -atan(d.y, r) / PI + 0.5; // range [0-1]
return texture(image, vec2(longitude, latitude));
}
vec3 intersect_plane(vec3 ray_point, vec3 ray_direction, vec4 plane) {
// intersection of view direction and plane
// http://math.stackexchange.com/questions/400268/equation-for-a-line-through-a-plane-in-homogeneous-coordinates
const vec3 w = plane.xyz;
const float e = plane.w;
vec3 l = ray_direction;
vec3 m = cross(ray_point, l);
// r is the point on the floor we are looking at
vec3 r = (cross(w, m) - e*l) / dot(w,l);
return r;
}
// intersect_proxy_geometry() will change depending on nature of proxy geometry
vec3 intersect_proxy_geometry(vec3 ray_point, vec3 ray_direction)
{
return intersect_plane(ray_point, ray_direction, ground_plane);
}
vec4 color_for_direction(in vec3 d) {
if (d.y < 0) {
// below the horizon, shift parallax to infinite plane at finite distance
vec3 r = intersect_proxy_geometry(camPos, viewDir);
vec3 dir2 = r - original_cam_pos;
return color_for_original_direction(dir2);
}
else {
// above the horizon, view to infinity in all directions
return color_for_original_direction(d);
}
}
void main()
{
pixelColor = color_for_direction(viewDir);
}
"""),
GL.GL_FRAGMENT_SHADER)
self.shader = compileProgram(vertex_shader, fragment_shader)
def display_gl(self, modelview, projection):
GL.glBindVertexArray(self.vao)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.texture_handle)
GL.glUseProgram(self.shader)
GL.glUniformMatrix4fv(1, 1, False, projection)
GL.glUniformMatrix4fv(2, 1, False, modelview)
GL.glDrawArrays(GL.GL_TRIANGLE_STRIP, 0, 4)
def dispose_gl(self):
GL.glDeleteTextures([self.texture_handle,])
if self.shader is not None:
GL.glDeleteProgram(self.shader)
GL.glDeleteVertexArrays(1, [self.vao,])
if __name__ == "__main__":
# Open equirectangular photosphere
src_folder = os.path.dirname(os.path.abspath(__file__))
img_path = os.path.join(src_folder, '../../assets/images/_0010782_stitch2.jpg')
img = Image.open(img_path)
arr = numpy.array(img)
actor = SphericalPanorama(arr)
renderer = OpenVrGlRenderer(actor)
with GlfwApp(renderer, "parallax shifted photosphere test") as glfwApp:
glfwApp.run_loop()
| 41.573099
| 133
| 0.522999
|
c744054939c4515bec3835bc476c25562dfd15c4
| 261
|
py
|
Python
|
retangulos de #.py
|
knapoli/programas-python
|
51c3f8f84c3237866a20f37859ef0bcfe9850d15
|
[
"MIT"
] | null | null | null |
retangulos de #.py
|
knapoli/programas-python
|
51c3f8f84c3237866a20f37859ef0bcfe9850d15
|
[
"MIT"
] | null | null | null |
retangulos de #.py
|
knapoli/programas-python
|
51c3f8f84c3237866a20f37859ef0bcfe9850d15
|
[
"MIT"
] | null | null | null |
largura = int(input('Digite a largura: '))
altura = int(input('Digite a altura: '))
guardar = largura
while altura > 0:
largura = guardar
while largura >0:
print('#', end = '')
largura = largura - 1
altura = altura - 1
print ()
| 21.75
| 42
| 0.574713
|
1a285827407684171f5dedb4508ae47f04a484b9
| 20,091
|
py
|
Python
|
salt/modules/systemd.py
|
petersanchez/salt
|
0e8f1a15b4bb7299220001f3cab6d2c002d4689c
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/systemd.py
|
petersanchez/salt
|
0e8f1a15b4bb7299220001f3cab6d2c002d4689c
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/systemd.py
|
petersanchez/salt
|
0e8f1a15b4bb7299220001f3cab6d2c002d4689c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Provide the service module for systemd
'''
# Import python libs
from __future__ import absolute_import
import glob
import logging
import os
import re
import shlex
# Import 3rd-party libs
import salt.ext.six as six
import salt.utils.itertools
import salt.utils.systemd
from salt.exceptions import CommandExecutionError, CommandNotFoundError
log = logging.getLogger(__name__)
__func_alias__ = {
'reload_': 'reload'
}
LOCAL_CONFIG_PATH = '/etc/systemd/system'
LEGACY_INIT_SCRIPT_PATH = '/etc/init.d'
VALID_UNIT_TYPES = ('service', 'socket', 'device', 'mount', 'automount',
'swap', 'target', 'path', 'timer')
# Define the module's virtual name
__virtualname__ = 'service'
def __virtual__():
'''
Only work on systems that have been booted with systemd
'''
if __grains__['kernel'] == 'Linux' \
and salt.utils.systemd.booted(__context__):
return __virtualname__
return (
False,
'The systemd execution module failed to load: only available on Linux '
'systems which have been booted with systemd.'
)
def _canonical_unit_name(name):
'''
Build a canonical unit name treating unit names without one
of the valid suffixes as a service.
'''
if not isinstance(name, six.string_types):
name = str(name)
if any(name.endswith(suffix) for suffix in VALID_UNIT_TYPES):
return name
return '{0}.service'.format(name)
def _systemctl_cmd(action, name=None):
'''
Build a systemctl command line. Treat unit names without one
of the valid suffixes as a service.
'''
ret = ['systemctl']
ret.extend(shlex.split(action))
if name:
ret.append(_canonical_unit_name(name))
if 'status' in ret:
ret.extend(['-n', '0'])
return ret
def _get_all_units():
'''
Get all units and their state. Units ending in .service
are normalized so that they can be referenced without a type suffix.
'''
rexp = re.compile(r'(?m)^(?P<name>.+)\.(?P<type>' +
'|'.join(VALID_UNIT_TYPES) +
r')\s+loaded\s+(?P<active>[^\s]+)')
out = __salt__['cmd.run_stdout'](
_systemctl_cmd('--all --full --no-legend --no-pager list-units'),
python_shell=False
)
ret = {}
for match in rexp.finditer(out):
name = match.group('name')
if match.group('type') != 'service':
name += '.' + match.group('type')
ret[name] = match.group('active')
return ret
def _get_all_unit_files():
'''
Get all unit files and their state. Unit files ending in .service
are normalized so that they can be referenced without a type suffix.
'''
rexp = re.compile(r'(?m)^(?P<name>.+)\.(?P<type>' +
'|'.join(VALID_UNIT_TYPES) +
r')\s+(?P<state>.+)$')
out = __salt__['cmd.run_stdout'](
_systemctl_cmd(
'systemctl --full --no-legend --no-pager list-unit-files'
),
python_shell=False
)
ret = {}
for match in rexp.finditer(out):
name = match.group('name')
if match.group('type') != 'service':
name += '.' + match.group('type')
ret[name] = match.group('state')
return ret
def _get_all_legacy_init_scripts():
'''
Get all old-fashioned init-style scripts. State is always inactive, because
systemd would already show them otherwise.
'''
ret = {}
if not os.path.isdir(LEGACY_INIT_SCRIPT_PATH):
return ret
for initscript_name in os.listdir(LEGACY_INIT_SCRIPT_PATH):
if initscript_name.startswith('rc'):
continue
full_path = os.path.join(LEGACY_INIT_SCRIPT_PATH, initscript_name)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
log.info('Legacy init script: \'%s\'.', initscript_name)
ret[initscript_name] = 'inactive'
return ret
def _untracked_custom_unit_found(name):
'''
If the passed service name is not available, but a unit file exist in
/etc/systemd/system, return True. Otherwise, return False.
'''
unit_path = os.path.join('/etc/systemd/system',
_canonical_unit_name(name))
return os.access(unit_path, os.R_OK) and not available(name)
def _unit_file_changed(name):
'''
Returns True if systemctl reports that the unit file has changed, otherwise
returns False.
'''
out = __salt__['cmd.run'](_systemctl_cmd('status', name),
python_shell=False,
ignore_retcode=True).lower()
return "'systemctl daemon-reload'" in out
def systemctl_reload():
'''
Reloads systemctl, an action needed whenever unit files are updated.
CLI Example:
.. code-block:: bash
salt '*' service.systemctl_reload
'''
out = __salt__['cmd.run_all'](
_systemctl_cmd('--system daemon-reload'),
python_shell=False,
redirect_stderr=True
)
if out['retcode'] != 0:
raise CommandExecutionError(
'Problem performing systemctl daemon-reload',
info=out['stdout']
)
return True
def _default_runlevel():
'''
Try to figure out the default runlevel. It is kept in
/etc/init/rc-sysinit.conf, but can be overridden with entries
in /etc/inittab, or via the kernel command-line at boot
'''
# Try to get the "main" default. If this fails, throw up our
# hands and just guess "2", because things are horribly broken
try:
with salt.utils.fopen('/etc/init/rc-sysinit.conf') as fp_:
for line in fp_:
if line.startswith('env DEFAULT_RUNLEVEL'):
runlevel = line.split('=')[-1].strip()
except Exception:
return '2'
# Look for an optional "legacy" override in /etc/inittab
try:
with salt.utils.fopen('/etc/inittab') as fp_:
for line in fp_:
if not line.startswith('#') and 'initdefault' in line:
runlevel = line.split(':')[1]
except Exception:
pass
# The default runlevel can also be set via the kernel command-line.
# Kinky.
try:
valid_strings = set(
('0', '1', '2', '3', '4', '5', '6', 's', 'S', '-s', 'single'))
with salt.utils.fopen('/proc/cmdline') as fp_:
for line in fp_:
for arg in line.strip().split():
if arg in valid_strings:
runlevel = arg
break
except Exception:
pass
return runlevel
def _runlevel():
'''
Return the current runlevel
'''
if 'systemd._runlevel' in __context__:
return __context__['systemd._runlevel']
out = __salt__['cmd.run']('runlevel', python_shell=False)
try:
ret = out.split()[1]
except IndexError:
# The runlevel is unknown, return the default
ret = _default_runlevel()
__context__['systemd._runlevel'] = ret
return ret
def _get_service_exec():
'''
Debian uses update-rc.d to manage System-V style services.
http://www.debian.org/doc/debian-policy/ch-opersys.html#s9.3.3
'''
executable = 'update-rc.d'
salt.utils.check_or_die(executable)
return executable
def _check_for_unit_changes(name):
'''
Check for modified/updated unit files, and run a daemon-reload if any are
found.
'''
contextkey = 'systemd._check_for_unit_changes'
if contextkey not in __context__:
if _untracked_custom_unit_found(name) or _unit_file_changed(name):
systemctl_reload()
# Set context key to avoid repeating this check
__context__[contextkey] = True
def _has_sysv_exec():
'''
Return the current runlevel
'''
if 'systemd._has_sysv_exec' not in __context__:
try:
__context__['systemd._has_sysv_exec'] = bool(_get_service_exec())
except (CommandExecutionError, CommandNotFoundError):
__context__['systemd._has_sysv_exec'] = False
return __context__['systemd._has_sysv_exec']
def _sysv_exists(name):
script = '/etc/init.d/{0}'.format(name)
return os.access(script, os.X_OK)
def _service_is_sysv(name):
'''
A System-V style service will have a control script in
/etc/init.d.
Return True only if the service doesnt also provide a systemd unit file.
'''
return (_has_sysv_exec() and
name in _get_all_units() and
name not in _get_all_unit_files() and
_sysv_exists(name))
def _sysv_is_enabled(name):
'''
A System-V style service is assumed disabled if the "startup" symlink
(starts with "S") to its script is found in /etc/init.d in the current
runlevel.
'''
return bool(glob.glob('/etc/rc{0}.d/S*{1}'.format(_runlevel(), name)))
def get_enabled():
'''
Return a list of all enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
ret = []
units = _get_all_unit_files()
services = _get_all_units()
for name, state in six.iteritems(units):
if state.strip() == 'enabled':
ret.append(name)
for name, state in six.iteritems(services):
if name in units:
continue
# performance; if the legacy initscript doesnt exists,
# don't contiue up with systemd query
if not _service_is_sysv(name):
continue
if _sysv_is_enabled(name):
ret.append(name)
return sorted(ret)
def get_disabled():
'''
Return a list of all disabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
ret = []
known_services = _get_all_unit_files()
known_services.update(_get_all_legacy_init_scripts())
for name, state in six.iteritems(known_services):
if state == 'disabled':
ret.append(name)
return sorted(ret)
def get_all():
'''
Return a list of all available services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
ret = set(_get_all_units())
ret.update(_get_all_unit_files())
ret.update(_get_all_legacy_init_scripts())
return sorted(ret)
def available(name):
'''
Check that the given service is available taking into account
template units.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
out = __salt__['cmd.run'](_systemctl_cmd('status', name),
python_shell=False,
ignore_retcode=True)
for line in salt.utils.itertools.split(out.lower(), '\n'):
match = re.match(r'\s+loaded:\s+(\S+)', line)
if match:
ret = match.group(1) != 'not-found'
break
else:
raise CommandExecutionError(
'Failed to get information on service unit \'{0}\''.format(name),
info=out
)
return ret
def missing(name):
'''
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
'''
return not available(name)
def unmask(name):
'''
Unmask the specified service with systemd
CLI Example:
.. code-block:: bash
salt '*' service.unmask <service name>
'''
_check_for_unit_changes(name)
mask_status = masked(name)
if not mask_status:
log.debug('Service \'{0}\' is not masked'.format(name))
return True
cmd = 'unmask --runtime' if 'runtime' in mask_status else 'unmask'
out = __salt__['cmd.run_all'](_systemctl_cmd(cmd, name),
python_shell=False,
redirect_stderr=True)
if out['retcode'] != 0:
raise CommandExecutionError(
'Failed to unmask service \'{0}\''.format(name),
info=out['stdout']
)
return True
def mask(name, runtime=False):
'''
Mask the specified service with systemd
runtime : False
Set to ``True`` to mask this service only until the next reboot
.. versionadded:: Boron
CLI Example:
.. code-block:: bash
salt '*' service.mask <service name>
'''
_check_for_unit_changes(name)
cmd = 'mask --runtime' if runtime else 'mask'
out = __salt__['cmd.run_all'](_systemctl_cmd(cmd, name),
python_shell=False,
redirect_stderr=True)
if out['retcode'] != 0:
raise CommandExecutionError(
'Failed to mask service \'{0}\''.format(name),
info=out['stdout']
)
return True
def masked(name):
'''
.. versionadded:: 2015.8.0
.. versionchanged:: Boron
The return data for this function has changed. If the service is
masked, the return value will now be the output of the ``systemctl
is-enabled`` command (so that a persistent mask can be distinguished
from a runtime mask). If the service is not masked, then ``False`` will
be returned.
Check whether or not a service is masked
CLI Example:
.. code-block:: bash
salt '*' service.masked <service name>
'''
_check_for_unit_changes(name)
out = __salt__['cmd.run'](
_systemctl_cmd('is-enabled', name),
python_shell=False,
ignore_retcode=True,
)
return out if 'masked' in out else False
def start(name):
'''
Start the specified service with systemd
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](_systemctl_cmd('start', name),
python_shell=False) == 0
def stop(name):
'''
Stop the specified service with systemd
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
_check_for_unit_changes(name)
return __salt__['cmd.retcode'](_systemctl_cmd('stop', name),
python_shell=False) == 0
def restart(name):
'''
Restart the specified service with systemd
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](_systemctl_cmd('restart', name),
python_shell=False) == 0
def reload_(name):
'''
Reload the specified service with systemd
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](_systemctl_cmd('reload', name),
python_shell=False) == 0
def force_reload(name):
'''
Force-reload the specified service with systemd
CLI Example:
.. code-block:: bash
salt '*' service.force_reload <service name>
'''
_check_for_unit_changes(name)
unmask(name)
return __salt__['cmd.retcode'](_systemctl_cmd('force-reload', name),
python_shell=False) == 0
# The sig argument is required to maintain consistency with service states. It
# is unused in this function.
def status(name, sig=None):
'''
Return the status for a service via systemd, returns ``True`` if the
service is running and ``False`` if it is not.
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
_check_for_unit_changes(name)
return __salt__['cmd.retcode'](_systemctl_cmd('is-active', name),
python_shell=False,
ignore_retcode=True) == 0
def enable(name, **kwargs):
'''
Enable the named service to start when the system boots
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
_check_for_unit_changes(name)
unmask(name)
if _service_is_sysv(name):
cmd = [_get_service_exec(), '-f', name, 'defaults', '99']
return __salt__['cmd.retcode'](cmd,
python_shell=False,
ignore_retcode=True) == 0
return __salt__['cmd.retcode'](_systemctl_cmd('enable', name),
python_shell=False,
ignore_retcode=True) == 0
def disable(name, **kwargs):
'''
Disable the named service to not start when the system boots
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
_check_for_unit_changes(name)
if _service_is_sysv(name):
cmd = [_get_service_exec(), '-f', name, 'remove']
return __salt__['cmd.retcode'](cmd,
python_shell=False,
ignore_retcode=True) == 0
return __salt__['cmd.retcode'](_systemctl_cmd('disable', name),
python_shell=False,
ignore_retcode=True) == 0
def _enabled(name):
'''
Try ``systemctl is-enabled`` first, then look for a symlink created by
systemctl (older systemd releases did not support using is-enabled to check
templated services), and lastly check for a sysvinit service.
'''
if __salt__['cmd.retcode'](_systemctl_cmd('is-enabled', name),
python_shell=False,
ignore_retcode=True) == 0:
return True
elif '@' in name:
# On older systemd releases, templated services could not be checked
# with ``systemctl is-enabled``. As a fallback, look for the symlinks
# created by systemctl when enabling templated services.
cmd = ['find', LOCAL_CONFIG_PATH, '-name', name,
'-type', 'l', '-print', '-quit']
# If the find command returns any matches, there will be output and the
# string will be non-empty.
if bool(__salt__['cmd.run'](cmd, python_shell=False)):
return True
else:
return _sysv_is_enabled(name)
def enabled(name, **kwargs):
'''
Return if the named service is enabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
return _enabled(name)
def disabled(name):
'''
Return if the named service is disabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return not _enabled(name)
def show(name):
'''
Show properties of one or more units/jobs or the manager
CLI Example:
salt '*' service.show <service name>
'''
ret = {}
out = __salt__['cmd.run'](_systemctl_cmd('show', name),
python_shell=False)
for line in salt.utils.itertools.split(out, '\n'):
comps = line.split('=')
name = comps[0]
value = '='.join(comps[1:])
if value.startswith('{'):
value = value.replace('{', '').replace('}', '')
ret[name] = {}
for item in value.split(' ; '):
comps = item.split('=')
ret[name][comps[0].strip()] = comps[1].strip()
elif name in ('Before', 'After', 'Wants'):
ret[name] = value.split()
else:
ret[name] = value
return ret
def execs():
'''
Return a list of all files specified as ``ExecStart`` for all services.
CLI Example:
salt '*' service.execs
'''
ret = {}
for service in get_all():
data = show(service)
if 'ExecStart' not in data:
continue
ret[service] = data['ExecStart']['path']
return ret
| 27.484268
| 81
| 0.589617
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.