hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c15b634ddb263d20add136ebc95c1405585f3a8
| 14,133
|
py
|
Python
|
stumpy/mpdist.py
|
mexxexx/stumpy
|
dcfa14b98aee375da4239363c1d2a6520fb54e80
|
[
"BSD-3-Clause"
] | null | null | null |
stumpy/mpdist.py
|
mexxexx/stumpy
|
dcfa14b98aee375da4239363c1d2a6520fb54e80
|
[
"BSD-3-Clause"
] | null | null | null |
stumpy/mpdist.py
|
mexxexx/stumpy
|
dcfa14b98aee375da4239363c1d2a6520fb54e80
|
[
"BSD-3-Clause"
] | null | null | null |
# STUMPY
# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license.
# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.
import numpy as np
import math
from . import stump, stumped, core
from .core import _mass_distance_matrix
from .aampdist import aampdist, aampdisted
def _compute_P_ABBA(
T_A, T_B, m, P_ABBA, dask_client=None, device_id=None, mp_func=stump
):
"""
A convenience function for computing the (unsorted) concatenated matrix profiles
from an AB-join and BA-join for the two time series, `T_A` and `T_B`. This result
can then be used to compute the matrix profile distance (MPdist) measure.
The MPdist distance measure considers two time series to be similar if they share
many subsequences, regardless of the order of matching subsequences. MPdist
concatenates and sorts the output of an AB-join and a BA-join and returns the value
of the `k`th smallest number as the reported distance. Note that MPdist is a
measure and not a metric. Therefore, it does not obey the triangular inequality but
the method is highly scalable.
Parameters
----------
T_A : ndarray
The first time series or sequence for which to compute the matrix profile
T_B : ndarray
The second time series or sequence for which to compute the matrix profile
m : int
Window size
P_ABBA : ndarray
The output array to write the concatenated AB-join and BA-join results to
dask_client : client, default None
A Dask Distributed client that is connected to a Dask scheduler and
Dask workers. Setting up a Dask distributed cluster is beyond the
scope of this library. Please refer to the Dask Distributed
documentation.
device_id : int or list, default None
The (GPU) device number to use. The default value is `0`. A list of
valid device ids (int) may also be provided for parallel GPU-STUMP
computation. A list of all valid device ids can be obtained by
executing `[device.id for device in numba.cuda.list_devices()]`.
mp_func : object, default stump
Specify a custom matrix profile function to use for computing matrix profiles
Returns
-------
None
Notes
-----
`DOI: 10.1109/ICDM.2018.00119 \
<https://www.cs.ucr.edu/~eamonn/MPdist_Expanded.pdf>`__
See Section III
"""
n_A = T_A.shape[0]
partial_mp_func = core._get_partial_mp_func(
mp_func, dask_client=dask_client, device_id=device_id
)
P_ABBA[: n_A - m + 1] = partial_mp_func(T_A, m, T_B, ignore_trivial=False)[:, 0]
P_ABBA[n_A - m + 1 :] = partial_mp_func(T_B, m, T_A, ignore_trivial=False)[:, 0]
def _select_P_ABBA_value(P_ABBA, k, custom_func=None):
"""
A convenience function for returning the `k`th smallest value from the `P_ABBA`
array or use a custom function to specify what `P_ABBA` value to return.
The MPdist distance measure considers two time series to be similar if they share
many subsequences, regardless of the order of matching subsequences. MPdist
concatenates and sorts the output of an AB-join and a BA-join and returns the value
of the `k`th smallest number as the reported distance. Note that MPdist is a
measure and not a metric. Therefore, it does not obey the triangular inequality but
the method is highly scalable.
Parameters
----------
P_ABBA : ndarray
A pre-sorted array resulting from the concatenation of the outputs from an
AB-joinand BA-join for two time series, `T_A` and `T_B`
k : int
Specify the `k`th value in the concatenated matrix profiles to return. This
parameter is ignored when `k_func` is not None.
custom_func : object, default None
A custom user defined function for selecting the desired value from the
sorted `P_ABBA` array. This function may need to leverage `functools.partial`
and should take `P_ABBA` as its only input parameter and return a single
`MPdist` value. The `percentage` and `k` parameters are ignored when
`custom_func` is not None.
Returns
-------
MPdist : float
The matrix profile distance
"""
k = min(int(k), P_ABBA.shape[0] - 1)
if custom_func is not None:
MPdist = custom_func(P_ABBA)
else:
MPdist = P_ABBA[k]
if ~np.isfinite(MPdist):
k = max(0, np.count_nonzero(np.isfinite(P_ABBA[:k])) - 1)
MPdist = P_ABBA[k]
return MPdist
def _mpdist(
T_A,
T_B,
m,
percentage=0.05,
k=None,
dask_client=None,
device_id=None,
mp_func=stump,
custom_func=None,
):
"""
A convenience function for computing the matrix profile distance (MPdist) measure
between any two time series.
The MPdist distance measure considers two time series to be similar if they share
many subsequences, regardless of the order of matching subsequences. MPdist
concatenates and sorts the output of an AB-join and a BA-join and returns the value
of the `k`th smallest number as the reported distance. Note that MPdist is a
measure and not a metric. Therefore, it does not obey the triangular inequality but
the method is highly scalable.
Parameters
----------
T_A : ndarray
The first time series or sequence for which to compute the matrix profile
T_B : ndarray
The second time series or sequence for which to compute the matrix profile
m : int
Window size
percentage : float, 0.05
The percentage of distances that will be used to report `mpdist`. The value
is between 0.0 and 1.0. This parameter is ignored when `k` is not `None` or when
`k_func` is not None.
k : int, default None
Specify the `k`th value in the concatenated matrix profiles to return. When `k`
is not `None`, then the `percentage` parameter is ignored. This parameter is
ignored when `k_func` is not None.
dask_client : client, default None
A Dask Distributed client that is connected to a Dask scheduler and
Dask workers. Setting up a Dask distributed cluster is beyond the
scope of this library. Please refer to the Dask Distributed
documentation.
device_id : int or list, default None
The (GPU) device number to use. The default value is `0`. A list of
valid device ids (int) may also be provided for parallel GPU-STUMP
computation. A list of all valid device ids can be obtained by
executing `[device.id for device in numba.cuda.list_devices()]`.
mp_func : object, default stump
Specify a custom matrix profile function to use for computing matrix profiles
custom_func : object, default None
A custom user defined function for selecting the desired value from the
sorted `P_ABBA` array. This function may need to leverage `functools.partial`
and should take `P_ABBA` as its only input parameter and return a single
`MPdist` value. The `percentage` and `k` parameters are ignored when
`custom_func` is not None.
Returns
-------
MPdist : float
The matrix profile distance
Notes
-----
`DOI: 10.1109/ICDM.2018.00119 \
<https://www.cs.ucr.edu/~eamonn/MPdist_Expanded.pdf>`__
See Section III
"""
n_A = T_A.shape[0]
n_B = T_B.shape[0]
P_ABBA = np.empty(n_A - m + 1 + n_B - m + 1, dtype=np.float64)
_compute_P_ABBA(T_A, T_B, m, P_ABBA, dask_client, device_id, mp_func)
P_ABBA.sort()
if k is not None:
k = min(int(k), P_ABBA.shape[0] - 1)
else:
percentage = min(percentage, 1.0)
percentage = max(percentage, 0.0)
k = min(math.ceil(percentage * (n_A + n_B)), n_A - m + 1 + n_B - m + 1 - 1)
MPdist = _select_P_ABBA_value(P_ABBA, k, custom_func)
return MPdist
def _mpdist_vect(
Q,
T,
m,
distance_matrix_func=_mass_distance_matrix,
percentage=0.05,
k=None,
custom_func=None,
):
"""
Compute the matrix profile distance measure vector between `Q` and each subsequence,
`T[i : i + len(Q)]`, within `T`.
Parameters
----------
Q : ndarray
Query array
T : ndarray
Time series or sequence
m : int
Window size
distance_matrix_func : object, default _mass_distance_matrix
The function to use to compute the distance matrix between `Q` and `T`
percentage : float, 0.05
The percentage of distances that will be used to report `mpdist`. The value
is between 0.0 and 1.0. This parameter is ignored when `k` is not `None` or when
`k_func` is not None.
k : int, default None
Specify the `k`th value in the concatenated matrix profiles to return. When `k`
is not `None`, then the `percentage` parameter is ignored. This parameter is
ignored when `k_func` is not None.
custom_func : object, default None
A custom user defined function for selecting the desired value from the
sorted `P_ABBA` array. This function may need to leverage `functools.partial`
and should take `P_ABBA` as its only input parameter and return a single
`MPdist` value. The `percentage` and `k` parameters are ignored when
`custom_func` is not None.
"""
j = Q.shape[0] - m + 1 # `k` is reserved for `P_ABBA` selection
l = T.shape[0] - m + 1
MPdist_vect = np.empty(T.shape[0] - Q.shape[0] + 1)
distance_matrix = np.full((j, l), np.inf)
P_ABBA = np.empty(2 * j)
if k is None:
percentage = min(percentage, 1.0)
percentage = max(percentage, 0.0)
k = min(math.ceil(percentage * (2 * Q.shape[0])), 2 * j - 1)
k = min(int(k), P_ABBA.shape[0] - 1)
distance_matrix_func(Q, T, m, distance_matrix)
rolling_row_min = core.rolling_nanmin(distance_matrix, j)
col_min = np.nanmin(distance_matrix, axis=0)
for i in range(MPdist_vect.shape[0]):
P_ABBA[:j] = rolling_row_min[:, i]
P_ABBA[j:] = col_min[i : i + j]
P_ABBA.sort()
MPdist_vect[i] = _select_P_ABBA_value(P_ABBA, k, custom_func)
return MPdist_vect
@core.non_normalized(aampdist)
def mpdist(T_A, T_B, m, percentage=0.05, k=None, normalize=True):
"""
Compute the z-normalized matrix profile distance (MPdist) measure between any two
time series
The MPdist distance measure considers two time series to be similar if they share
many subsequences, regardless of the order of matching subsequences. MPdist
concatenates and sorts the output of an AB-join and a BA-join and returns the value
of the `k`th smallest number as the reported distance. Note that MPdist is a
measure and not a metric. Therefore, it does not obey the triangular inequality but
the method is highly scalable.
Parameters
----------
T_A : ndarray
The first time series or sequence for which to compute the matrix profile
T_B : ndarray
The second time series or sequence for which to compute the matrix profile
m : int
Window size
percentage : float, default 0.05
The percentage of distances that will be used to report `mpdist`. The value
is between 0.0 and 1.0.
normalize : bool, default True
When set to `True`, this z-normalizes subsequences prior to computing distances.
Otherwise, this function gets re-routed to its complementary non-normalized
equivalent set in the `@core.non_normalized` function decorator.
Returns
-------
MPdist : float
The matrix profile distance
Notes
-----
`DOI: 10.1109/ICDM.2018.00119 \
<https://www.cs.ucr.edu/~eamonn/MPdist_Expanded.pdf>`__
See Section III
"""
return _mpdist(T_A, T_B, m, percentage, k, mp_func=stump)
@core.non_normalized(aampdisted)
def mpdisted(dask_client, T_A, T_B, m, percentage=0.05, k=None, normalize=True):
"""
Compute the z-normalized matrix profile distance (MPdist) measure between any two
time series with a distributed dask cluster
The MPdist distance measure considers two time series to be similar if they share
many subsequences, regardless of the order of matching subsequences. MPdist
concatenates and sorts the output of an AB-join and a BA-join and returns the value
of the `k`th smallest number as the reported distance. Note that MPdist is a
measure and not a metric. Therefore, it does not obey the triangular inequality but
the method is highly scalable.
Parameters
----------
dask_client : client
A Dask Distributed client that is connected to a Dask scheduler and
Dask workers. Setting up a Dask distributed cluster is beyond the
scope of this library. Please refer to the Dask Distributed
documentation.
T_A : ndarray
The first time series or sequence for which to compute the matrix profile
T_B : ndarray
The second time series or sequence for which to compute the matrix profile
m : int
Window size
percentage : float, default 0.05
The percentage of distances that will be used to report `mpdist`. The value
is between 0.0 and 1.0. This parameter is ignored when `k` is not `None`.
k : int
Specify the `k`th value in the concatenated matrix profiles to return. When `k`
is not `None`, then the `percentage` parameter is ignored.
normalize : bool, default True
When set to `True`, this z-normalizes subsequences prior to computing distances.
Otherwise, this function gets re-routed to its complementary non-normalized
equivalent set in the `@core.non_normalized` function decorator.
Returns
-------
MPdist : float
The matrix profile distance
Notes
-----
`DOI: 10.1109/ICDM.2018.00119 \
<https://www.cs.ucr.edu/~eamonn/MPdist_Expanded.pdf>`__
See Section III
"""
return _mpdist(T_A, T_B, m, percentage, k, dask_client=dask_client, mp_func=stumped)
| 35.870558
| 88
| 0.675157
| 2,160
| 14,133
| 4.323611
| 0.124537
| 0.019274
| 0.015419
| 0.024628
| 0.82632
| 0.806403
| 0.797409
| 0.790556
| 0.784559
| 0.767855
| 0
| 0.01429
| 0.252317
| 14,133
| 393
| 89
| 35.961832
| 0.869499
| 0.725607
| 0
| 0.341463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.060976
| 0
| 0.195122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c35aa1277ffe802f90bac0cd78c1c4a49041400
| 69,352
|
py
|
Python
|
hack/test_errata.py
|
Davoska/cincinnati-graph-data
|
3bc79fdcefa72f570e0757c0bfd46d4302543264
|
[
"Apache-2.0"
] | null | null | null |
hack/test_errata.py
|
Davoska/cincinnati-graph-data
|
3bc79fdcefa72f570e0757c0bfd46d4302543264
|
[
"Apache-2.0"
] | null | null | null |
hack/test_errata.py
|
Davoska/cincinnati-graph-data
|
3bc79fdcefa72f570e0757c0bfd46d4302543264
|
[
"Apache-2.0"
] | null | null | null |
import copy
import datetime
import os
import tempfile
import unittest
import urllib
from unittest.mock import MagicMock
from unittest.mock import patch
import errata
class GithubUserMock():
def __init__(self, login):
self.login = login
class GithubLabelMock():
def __init__(self, name):
self.name = name
class GithubPRMock:
def __init__(self, user, title, labels=[], number=0, body="", url="", html_url=""):
self.user = user
self.title = title
self.labels = labels
self.number = number
self.body = body
self.url = url
self.html_url = html_url
self.create_issue_comment = MagicMock()
def __eq__(self, other):
if not isinstance(other, GithubPRMock):
return False
return self.user == other.user \
and self.title == other.title \
and self.labels == other.labels \
and self.number == other.number \
and self.body == other.body \
and self.url == other.url \
and self.html_url == other.html_url
class ExtractErrataNumberFromBodyTest(unittest.TestCase):
def test_url_starting_with_valid_errata_marker(self):
"""
Test errata number extraction from valid URLs.
URLs starting with corresponding ERRATA_MARKER in errata.py.
"""
param_list = [
('https://errata.devel.redhat.com/advisory/12345', 12345),
('https://errata.devel.redhat.com/advisory/67890', 67890),
('https://errata.devel.redhat.com/advisory/13579', 13579),
('https://errata.devel.redhat.com/advisory/24680', 24680),
('https://errata.devel.redhat.com/advisory/', None),
('https://errata.devel.redhat.com/advisory/invalid', None)
]
for (url, expected) in param_list:
with self.subTest(url=url):
self.assertEqual(errata.extract_errata_number_from_body(url), expected)
def test_invalid_url(self):
"""
Test errata number extraction from invalid URLs.
"""
param_list = [
'http://errata.devel.redhat.com/advisory/12345',
'https://errrata.devel.redhat.com/advisory/12345',
'https://errata.dvel.reddhat.com/advisori/12345',
'https://errata.devel.redhat.com/12345',
'https://errata.devel.com/advisory/12345',
'https://errata.redhat.com/advisory/12345',
'https://devel.redhat.com/advisory/12345',
'https://redhat.com/advisory/12345',
'https://errata.com/advisory/12345'
]
for url in param_list:
with self.subTest(url=url):
self.assertEqual(errata.extract_errata_number_from_body(url), None)
def test_missing_url(self):
"""
Test errata number extraction from missing URLs.
"""
param_list = [
'errata',
'12345',
'errata is 12345'
]
for body in param_list:
with self.subTest(body=body):
self.assertEqual(errata.extract_errata_number_from_body(body), None)
def test_url_is_not_on_the_first_line(self):
"""
Test errata number extraction from valid URLs which are not located on the first line.
"""
param_list = [
'\nhttps://errata.devel.redhat.com/advisory/12345',
'\n\nhttps://errata.devel.redhat.com/advisory/12345'
]
for body in param_list:
with self.subTest(body=body):
self.assertEqual(errata.extract_errata_number_from_body(body), None)
class SaveAndLoadTest(unittest.TestCase):
def test_load_nonexisting_file(self):
"""
Test loading a nonexisting file.
"""
with tempfile.TemporaryDirectory() as tempdir:
cachepath = os.path.join(tempdir, "cache.json")
self.assertCountEqual(errata.load(cachepath), {})
def test_save_and_load_as_a_pair(self):
"""
Test using errata.save and errata.load as a pair to confirm their functionality.
"""
param_list = [
(),
({"foo": "bar"}),
({"value": "1234"}),
({"company": "Red Hat"}),
({"foo": "bar"}, {"value": "1234"}, {"errata": "1234"}),
({"value": "1234"}, {"foo": "bar"}, {"errata": "1234"})
]
for cache in param_list:
with self.subTest():
with tempfile.TemporaryDirectory() as tempdir:
cachepath = os.path.join(tempdir, "cache.json")
errata.save(cachepath, cache)
self.assertCountEqual(errata.load(cachepath), cache)
class PollTest(unittest.TestCase):
def setUp(self):
self.raw_messages = [
(
True,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 11,
"product": "RHOSE",
"to": "SHIPPED_LIVE",
}
}
),
(
True,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 12,
"product": "RHOSE",
"to": "SHIPPED_LIVE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 21,
"product": "RHOSE",
"to": "QE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 22,
"product": "RHEL",
"to": "SHIPPED_LIVE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 23,
"product": "RHEL",
"to": "QE",
}
}
),
(
False,
{
"additional_unnecessary_info": "shouldn't be processed",
"msg": {
"errata_id": 24,
"product": "SHIPPED_LIVE",
"to": "RHOSE",
}
}
)
]
self.valid_messages = [x[1] for x in self.raw_messages if x[0]]
self.invalid_messages = [x[1] for x in self.raw_messages if not x[0]]
@patch("json.load")
@patch("urllib.request.urlopen")
def test_params_of_urlopen_call(self, urlopen_mock, json_load_mock):
"""
Test parameters used in the data_grepper's url which is used for getting raw messages.
"""
urlopen_mock.return_value = MagicMock()
json_load_mock.return_value = {
"raw_messages": [],
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
# Get params of the url used in urlopen in errata.poll
parsed_url = urllib.parse.urlparse(urlopen_mock.call_args[0][0])
params = urllib.parse.parse_qs(parsed_url.query)
# Assert if parameters complies with datagrepper reference
self.assertGreater(int(params["page"][0]), 0) # Page must be greater than 0
self.assertLessEqual(int(params["rows_per_page"][0]), 100) # Must be less than or equal to 100
self.assertEqual(params["category"][0], "errata") # Should only look for errata category
self.assertEqual(params["contains"][0], "RHOSE") # Only messages containing RHOSE
@patch("json.load")
@patch("urllib.request.urlopen")
def test_number_of_returned_pages_is_zero(self, urlopen_mock, json_load_mock):
"""
Test poll's functionality if returned data contains number of pages equal to zero.
"""
urlopen_mock.return_value = MagicMock()
json_load_mock.return_value = {
"raw_messages": [],
"pages": 0
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
self.assertEqual(polled_messages, [])
@patch("json.load")
@patch("urllib.request.urlopen")
def test_no_raw_messages(self, urlopen_mock, json_load_mock):
"""
Test polling messages if data doesn't contain any raw messages.
"""
urlopen_mock.return_value = MagicMock()
json_load_mock.return_value = {
"raw_messages": [],
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
self.assertEqual(polled_messages, [])
@patch("json.load")
@patch("time.sleep")
@patch("urllib.request.urlopen")
def test_unresponsive_url_becomes_responsive(self, urlopen_mock, sleep_mock, json_load_mock):
"""
Test polling messages if request.urlopen throws exception on a first try.
"""
urlopen_mock.side_effect = [
Exception("Unresponsive, request.urlopen has failed"),
MagicMock()
]
json_load_mock.return_value = {
"raw_messages": self.valid_messages,
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
sleep_mock.assert_called_once() # URL wasn't responsive only once, so time.sleep should have been called only once
expected_msgs = [x['msg'] for x in self.valid_messages]
self.assertEqual(polled_messages, expected_msgs)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_multiple_messages(self, urlopen_mock, json_load_mock):
"""
Test polling messages from raw messages that include wanted and unwanted messages.
"""
urlopen_mock.return_value = MagicMock()
messages = self.valid_messages + self.invalid_messages
json_load_mock.return_value = {
"raw_messages": messages,
"pages": 1
}
polled_messages = []
for message in errata.poll(period=datetime.timedelta(seconds=3600)):
polled_messages.append(message)
expected_msgs = [x['msg'] for x in self.valid_messages]
self.assertEqual(polled_messages, expected_msgs)
class SynopsisMatchTest(unittest.TestCase):
def test_match(self):
"""
Ensure we match only the synopses that we want to match.
"""
for synopsis, expected in [
(
'Moderate: OpenShift Container Platform 4.7.13 bug fix and security update',
{
'impact': 'Moderate',
'version': '4.7.13',
'major': '4',
'minor': '7',
'patch': '13',
'prerelease': None,
'build': None,
'type': 'bug fix and security update',
},
),
(
'Moderate: OpenShift Container Platform 4.7.5 security and bug fix update',
{
'impact': 'Moderate',
'version': '4.7.5',
'major': '4',
'minor': '7',
'patch': '5',
'prerelease': None,
'build': None,
'type': 'security and bug fix update',
},
),
(
'OpenShift Container Platform 4.6 GA Images',
{
'impact': None,
'version': '4.6',
'major': '4',
'minor': '6',
'patch': None,
'prerelease': None,
'build': None,
'type': 'GA Images',
},
),
(
'OpenShift Container Platform 4.5.11 optional CSI driver Operators bug fix update',
None,
),
(
'Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update',
{
'impact': 'Moderate',
'version': '4.5.20',
'major': '4',
'minor': '5',
'patch': '20',
'prerelease': None,
'build': None,
'type': 'bug fix and golang security update',
},
),
(
'Low: OpenShift Container Platform 4.3.40 security and bug fix update',
{
'impact': 'Low',
'version': '4.3.40',
'major': '4',
'minor': '3',
'patch': '40',
'prerelease': None,
'build': None,
'type': 'security and bug fix update',
},
),
]:
with self.subTest(synopsis=synopsis):
actual = errata._SYNOPSIS_REGEXP.match(synopsis)
if actual:
self.assertEqual(actual.groupdict(), expected)
else:
self.assertEqual(actual, expected)
class AdvisoryPhrasingsTest(unittest.TestCase):
def test_phrasings(self):
"""
Ensure we can construct synonym phrasins.
"""
for advisory, expected in [
(
'RHBA-123',
['RHBA-123', 'RHSA-123'],
),
(
'RHSA-123',
['RHBA-123', 'RHSA-123'],
),
(
'https://example.com/RHBA-123',
['https://example.com/RHBA-123', 'https://example.com/RHSA-123'],
),
(
'https://example.com/RHBA-123/abc',
['https://example.com/RHBA-123/abc', 'https://example.com/RHSA-123/abc'],
),
]:
with self.subTest(advisory=advisory):
actual = list(errata.advisory_phrasings(advisory=advisory))
self.assertEqual(actual, expected)
class NotifyTest(unittest.TestCase):
def setUp(self):
self.messages_including_approved_pr = [
(
{
"errata_id": 11,
"fulladvisory": "RHSA-2020:0000-00",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "OpenShift Container Platform 4.6 GA Images",
"when": "2021-01-01 12:00:00 UTC",
"uri": "Public_Errata_URI_11",
"approved_pr": "PR_HTML_URL_11"
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:0000-00 shipped '
'2021-01-01 12:00:00 UTC: '
'OpenShift Container Platform 4.6 GA Images '
'Public_Errata_URI_11'
'\nPR PR_HTML_URL_11 has been approved'
),
(
{
"errata_id": 12,
"fulladvisory": "RHSA-2020:2000-20",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update",
"when": "2021-01-02 13:00:00 UTC",
"uri": "Public_Errata_URI_12",
"approved_pr": "PR_HTML_URL_12"
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:2000-20 shipped '
'2021-01-02 13:00:00 UTC: '
'Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update '
'Public_Errata_URI_12'
'\nPR PR_HTML_URL_12 has been approved'
)
]
self.messages_not_including_approved_pr = [
(
{
"errata_id": 21,
"fulladvisory": "RHSA-2020:0000-00",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "OpenShift Container Platform 4.6 GA Images",
"when": "2021-01-01 12:00:00 UTC",
"uri": "Public_Errata_URI_21",
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:0000-00 shipped '
'2021-01-01 12:00:00 UTC: '
'OpenShift Container Platform 4.6 GA Images '
'Public_Errata_URI_21'
),
(
{
"errata_id": 22,
"fulladvisory": "RHSA-2020:2000-20",
"product": "RHOSE",
"to": "SHIPPED_LIVE",
"synopsis": "Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update",
"when": "2021-01-02 13:00:00 UTC",
"uri": "Public_Errata_URI_22",
},
'<!subteam^STE7S7ZU2>: '
'RHSA-2020:2000-20 shipped '
'2021-01-02 13:00:00 UTC: '
'Moderate: OpenShift Container Platform 4.5.20 bug fix and golang security update '
'Public_Errata_URI_22'
)
]
self.messages = \
self.messages_including_approved_pr + \
self.messages_not_including_approved_pr
@patch("builtins.print")
@patch("urllib.request.urlopen")
def test_no_webhook(self, urlopen_mock, print_mock):
"""
Test functionality of notify if parameter webhook is set to its default value.
"""
for message in self.messages:
with self.subTest(message=message):
errata.notify(message[0])
expected_message = message[0]
self.assertEqual(print_mock.call_args, unittest.mock.call(expected_message))
@patch("urllib.request.urlopen")
def test_format_of_message_not_including_approved_pr(self, urlopen_mock):
"""
Test format of data passed as argument to request.urlopen in errata.get_open_prs_to_fast.
This tests encoded format of the message in data as well.
Only testing messages including approved_pr key.
"""
for (message, expected_message_in_data_to_be_uploaded) in self.messages_not_including_approved_pr:
with self.subTest(message=message):
expected_data_to_be_uploaded = urllib.parse.urlencode({
'payload': {
'text': expected_message_in_data_to_be_uploaded
}
}).encode('utf-8')
errata.notify(message, MagicMock())
uploaded_data = urlopen_mock.call_args[1]['data']
self.assertEqual(uploaded_data, expected_data_to_be_uploaded)
@patch("urllib.request.urlopen")
def test_format_of_message_including_approved_pr(self, urlopen_mock):
"""
Test format of data passed as argument to request.urlopen in errata.get_open_prs_to_fast.
This tests encoded format of the message in data as well.
Only testing messages that do not include approved_pr key.
"""
for (message, expected_message_in_data_to_be_uploaded) in self.messages_including_approved_pr:
with self.subTest(message=message):
expected_data_to_be_uploaded = urllib.parse.urlencode({
'payload': {
'text': expected_message_in_data_to_be_uploaded
}
}).encode('utf-8')
errata.notify(message, MagicMock())
uploaded_data = urlopen_mock.call_args[1]['data']
self.assertEqual(uploaded_data, expected_data_to_be_uploaded)
class GetOpenPRsToFastTest(unittest.TestCase):
def setUp(self):
self.repo = MagicMock()
self.labels_multiple_including_lgtm = [
[
GithubLabelMock('lgtm')
],
[
GithubLabelMock('bug'), GithubLabelMock('duplicate'), GithubLabelMock('lgtm'),
GithubLabelMock('documentation'), GithubLabelMock('invalid')
],
[
GithubLabelMock('wontfix'), GithubLabelMock('lgtm'),
GithubLabelMock('question'), GithubLabelMock('invalid')
],
[
GithubLabelMock('help wanted'), GithubLabelMock('lgtm'),
GithubLabelMock('good first issue'), GithubLabelMock('bug')
]
]
self.labels_multiple_not_including_lgtm = [
[
],
[
GithubLabelMock('wontfix'), GithubLabelMock('bug'),
GithubLabelMock('question'), GithubLabelMock('invalid')
],
[
GithubLabelMock('help wanted'), GithubLabelMock('invalid'),
GithubLabelMock('good first issue'), GithubLabelMock('duplicate')
],
[
GithubLabelMock('bug'), GithubLabelMock('duplicate'), GithubLabelMock('invalid'),
GithubLabelMock('documentation'), GithubLabelMock('enhancement')
]
]
self.prs_correct_and_expected_to_be_yielded = [
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.6.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[0]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[1]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[2]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_not_including_lgtm[3]),
]
self.prs_including_the_lgtm_label = [
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[0]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[1]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[2]),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", self.labels_multiple_including_lgtm[3])
]
self.prs_author_is_not_openshift_bot = [
GithubPRMock(GithubUserMock("user1234"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("bot-openshift"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("Openshift-Bot"), "Enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("GitHubUser1234"), "Enable 4.0.0 in fast channel(s)")
]
self.prs_title_not_starting_with_Enable = [
GithubPRMock(GithubUserMock("openshift-bot"), ""),
GithubPRMock(GithubUserMock("openshift-bot"), "Fix component"),
GithubPRMock(GithubUserMock("openshift-bot"), "Add features in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "enable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Disable 4.0.0 in fast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enablee 4.0.0 in fast channel(s)")
]
self.prs_do_not_target_fast = [
GithubPRMock(GithubUserMock("openshift-bot"), "Enable "),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in FAST channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in faast channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in stable channel(s)"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in candidate channel(s)")
]
def test_prs_including_the_lgtm_label(self):
"""
Test retrieving PRs which include the LGTM label. These PRs should be skipped.
"""
self.repo.get_pulls = MagicMock(return_value=self.prs_including_the_lgtm_label)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_prs_author_is_not_openshift_bot(self):
"""
Test getting PRs whose author is not openshift-bot. These PRs should be skipped.
"""
self.repo.get_pulls = MagicMock(return_value=self.prs_author_is_not_openshift_bot)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_unknown_prs_should_be_skipped(self):
"""
Test getting unknown PRs. These PRs should be skipped.
"""
self.repo.get_pulls = MagicMock(return_value=self.prs_title_not_starting_with_Enable)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_ignore_prs_which_dont_target_fast(self):
"""
Test getting PRs which don't target fast. These PRs should be skipped.
"""
self.repo.get_pulls = MagicMock(return_value=self.prs_do_not_target_fast)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = []
self.assertEqual(open_prs_to_fast, expected_prs)
def test_correct_prs_should_be_yielded(self):
"""
Test getting PRs which are correct and should be yielded back.
"""
self.repo.get_pulls = MagicMock(return_value=self.prs_correct_and_expected_to_be_yielded)
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_prs = self.prs_correct_and_expected_to_be_yielded
self.assertEqual(open_prs_to_fast, expected_prs)
def test_get_pulls_query_params(self):
"""
Test query params used for getting the initial PRs from the repository.
"""
self.repo.get_pulls = MagicMock(return_value=[])
open_prs_to_fast = []
for pr in errata.get_open_prs_to_fast(self.repo):
open_prs_to_fast.append(pr)
expected_params = {
'state': 'open',
'base': 'master',
'sort': 'created',
}
self.assertEqual(self.repo.get_pulls.call_args, (unittest.mock.call(**expected_params)))
class LgtmFastPrForErrata(unittest.TestCase):
def setUp(self):
self.repo = MagicMock()
self.github_object_mock = MagicMock()
self.github_object_mock.get_repo.return_value = self.repo
self.prs_with_html_url_of_expected_pr = [
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1, "https://errata.devel.redhat.com/advisory/1111", "PR_URL1", "PR_HTML_URL1"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2, "https://errata.devel.redhat.com/advisory/1234", "PR_URL2", "PR_HTML_URL2"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3, "https://errata.devel.redhat.com/advisory/5678", "PR_URL3", "PR_HTML_URL3"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4, "https://errata.devel.redhat.com/advisory/1357", "PR_URL4", "PR_HTML_URL4")
],
{
"errata_id": 1357
},
"PR_HTML_URL4" # HTML url of a PR which body has the wanted errata id.
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 12345, "https://errata.devel.redhat.com/advisory/41", "PR_URL12345", "PR_HTML_URL12345"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 12354, "https://errata.devel.redhat.com/advisory/42", "PR_URL12354", "PR_HTML_URL12354"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 12340, "https://errata.devel.redhat.com/advisory/43", "PR_URL12340", "PR_HTML_URL12340"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 43215, "https://errata.devel.redhat.com/advisory/44", "PR_URL43215", "PR_HTML_URL43215")
],
{
"errata_id": 41
},
"PR_HTML_URL12345"
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1111, "https://errata.devel.redhat.com/advisory/51", "PR_URL1111", "PR_HTML_URL1111"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2222, "https://errata.devel.redhat.com/advisory/62", "PR_URL2222", "PR_HTML_URL2222"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3333, "https://errata.devel.redhat.com/advisory/73", "PR_URL3333", "PR_HTML_URL3333"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4444, "https://errata.devel.redhat.com/advisory/84", "PR_URL4444", "PR_HTML_URL4444")
],
{
"errata_id": 73
},
"PR_HTML_URL3333"
)
]
self.prs_with_index_of_expected_pr = [
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1, "https://errata.devel.redhat.com/advisory/1111", "PR_URL1", "PR_HTML_URL1"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2, "https://errata.devel.redhat.com/advisory/1234", "PR_URL2", "PR_HTML_URL2"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3, "https://errata.devel.redhat.com/advisory/5678", "PR_URL3", "PR_HTML_URL3"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4, "https://errata.devel.redhat.com/advisory/1357", "PR_URL4", "PR_HTML_URL4")
],
{
"errata_id": 1357
},
3 # Index of the PR which has the wanted errata id.
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 12345, "https://errata.devel.redhat.com/advisory/41", "PR_URL12345", "PR_HTML_URL12345"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 12354, "https://errata.devel.redhat.com/advisory/42", "PR_URL12354", "PR_HTML_URL12354"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 12340, "https://errata.devel.redhat.com/advisory/43", "PR_URL12340", "PR_HTML_URL12340"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 43215, "https://errata.devel.redhat.com/advisory/44", "PR_URL43215", "PR_HTML_URL43215")
],
{
"errata_id": 41
},
0
),
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1111, "https://errata.devel.redhat.com/advisory/51", "PR_URL1111", "PR_HTML_URL1111"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2222, "https://errata.devel.redhat.com/advisory/62", "PR_URL2222", "PR_HTML_URL2222"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3333, "https://errata.devel.redhat.com/advisory/73", "PR_URL3333", "PR_HTML_URL3333"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4444, "https://errata.devel.redhat.com/advisory/84", "PR_URL4444", "PR_HTML_URL4444")
],
{
"errata_id": 73
},
2
)
]
self.prs_with_invalid_errata_url = [
(
[
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 3.0.0 in fast channel(s)", [], 1, "", "PR_URL1", "PR_HTML_URL1"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.0.0 in fast channel(s)", [], 2, "https://errata", "PR_URL2", "PR_HTML_URL2"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.1.2 in fast channel(s)", [], 3, "https://redhat.com/advisory/84", "PR_URL3", "PR_HTML_URL3"),
GithubPRMock(GithubUserMock("openshift-bot"), "Enable 4.2.3 in fast channel(s)", [], 4, "https://errata.devel.redhat.com", "PR_URL4", "PR_HTML_URL4")
],
{
"errata_id": 21
}
)
]
@patch("github.Github")
def test_return_value_is_correct_for_specific_pr(self, Github_mock):
"""
Test retrieving the HTML url of a PR which is related to a specific errata id.
"""
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_html_url_of_expected_pr
for (prs, message, expected_pr_html_url) in param_list:
with self.subTest(prs_body=[x.body for x in prs], message=message):
self.repo.get_pulls = MagicMock(return_value=prs)
pr_html_url = errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
self.assertEqual(pr_html_url, expected_pr_html_url)
@patch("github.Github")
def test_only_create_issue_on_the_expected_pr(self, Github_mock):
"""
Test creating an issue comment only on the PR which is related to the specific errata id.
"""
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_index_of_expected_pr
for (prs, message, expected_index_of_pr_to_create_issue) in param_list:
self.repo.get_pulls = MagicMock(return_value=prs)
errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
for index, pr in enumerate(prs):
with self.subTest(prs_body=[x.body for x in prs], message=message):
if index == expected_index_of_pr_to_create_issue:
pr.create_issue_comment.assert_called_once()
else:
pr.create_issue_comment.assert_not_called()
@patch("github.Github")
def test_issue_comment_format(self, Github_mock):
"""
Test the format of the created issue comment on the PR which is related to the specific errata id.
"""
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_index_of_expected_pr
for (prs, message, expected_index_of_pr_to_create_issue) in param_list:
with self.subTest(prs_body=[x.body for x in prs], message=message):
self.repo.get_pulls = MagicMock(return_value=prs)
errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
issue_comment = prs[expected_index_of_pr_to_create_issue].create_issue_comment.call_args
expected_issue_comment = "Autoapproving PR to fast after the errata has shipped\n/lgtm"
self.assertEqual(issue_comment, (unittest.mock.call(expected_issue_comment)))
@patch("github.Github")
def test_prs_include_invalid_errata_url(self, Github_mock):
"""
Test PRs which body include invalid errata url.
These prs should be skipped.
"""
githubrepo = MagicMock()
githubtoken = MagicMock()
Github_mock.return_value = self.github_object_mock
param_list = self.prs_with_invalid_errata_url
for (prs, message) in param_list:
with self.subTest(body=[x.body for x in prs]):
self.repo.get_pulls = MagicMock(return_value=prs)
pr_html_url = errata.lgtm_fast_pr_for_errata(githubrepo, githubtoken, message)
self.assertEqual(pr_html_url, None)
class PublicErrataUriTest(unittest.TestCase):
def setUp(self):
self.nodes_valid = [
(
{ # nodes received via urlopen
"nodes": [
{
"version": "4.0.0",
"metadata": {
"url": "https://access.redhat.com/errata/RHBA-2020:0000"
}
}
]
},
( # Parameteres for calling errata.public_errata_uri
"4.0.0",
"RHBA-2020:0000",
"candidate-4.0.0",
),
# Expected uri of the wanted node
"https://access.redhat.com/errata/RHBA-2020:0000",
),
(
{
"nodes": [
{
"version": "4.1.0",
"metadata": {
"url": "https://access.redhat.com/errata/RHBA-2020:1000"
}
}
]
},
(
"4.1.0",
"RHBA-2020:1000",
"candidate-4.1.0",
),
"https://access.redhat.com/errata/RHBA-2020:1000",
),
(
{
"nodes": [
{
"version": "4.2.0",
"metadata": {
"url": "https://access.redhat.com/errata/RHBA-2020:2000"
}
}
]
},
(
"4.2.0",
"RHBA-2020:2000",
"candidate-4.2.0",
),
"https://access.redhat.com/errata/RHBA-2020:2000",
),
]
@patch("json.load")
@patch("urllib.request.urlopen")
def test_should_return_uri_of_same_version(self, urlopen_mock, json_load_mock):
"""
Test if URL of the node with the same version as the parameter is returned.
"""
for (data, params, expected_errata_uri) in self.nodes_valid:
version = params[0]
channel = params[2]
json_load_mock.return_value = data
with self.subTest(version=version):
errata_uri = errata.public_errata_uri(version=version, advisory="", channel=channel)
self.assertEqual(errata_uri, expected_errata_uri)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_should_return_uri_of_the_same_advisory(self, urlopen_mock, json_load_mock):
"""
Test if URL of the node with the same advisory as the parameter is returned.
"""
for (data, params, expected_errata_uri) in self.nodes_valid:
advisory = params[1]
channel = params[2]
json_load_mock.return_value = data
with self.subTest(advisory=advisory):
errata_uri = errata.public_errata_uri(version="", advisory=advisory, channel=channel)
self.assertEqual(errata_uri, expected_errata_uri)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_zero_nodes_received(self, urlopen_mock, json_load_mock):
"""
Test if None is returned when zero nodes are received.
"""
json_load_mock.return_value = {
"nodes": []
}
for (_, params, _) in self.nodes_valid:
version = params[0]
advisory = params[1]
channel = params[2]
with self.subTest(version=version, advisory=advisory):
errata_uri = errata.public_errata_uri(version=version, advisory=advisory, channel=channel)
self.assertEqual(errata_uri, None)
@patch("json.load")
@patch("urllib.request.urlopen")
def test_zero_nodes_match(self, urlopen_mock, json_load_mock):
"""
Test if None is returned when zero nodes match wanted version or advisory.
"""
for (data, params, _) in self.nodes_valid:
version = params[0]
advisory = params[1]
channel = params[2]
json_load_mock.return_value = data
with self.subTest(version=version, advisory=advisory):
errata_uri = errata.public_errata_uri(version="", advisory="", channel=channel)
self.assertEqual(errata_uri, None)
@patch("time.sleep")
@patch("json.load")
@patch("urllib.request.urlopen")
def test_unresponsive_url_becomes_responsive(self, urlopen_mock, json_load_mock, sleep_mock):
"""
Test requesting messages if request.urlopen throws exception on a first try.
"""
for (data, params, expected_errata_uri) in self.nodes_valid:
version = params[0]
advisory = params[1]
channel = params[2]
json_load_mock.return_value = data
urlopen_mock.side_effect = [
Exception("Unresponsive, request.urlopen has failed"),
MagicMock()
]
sleep_mock.reset_mock()
with self.subTest():
errata_uri = errata.public_errata_uri(version=version, advisory=advisory, channel=channel)
sleep_mock.assert_called_once()
self.assertEqual(errata_uri, expected_errata_uri)
class ProcessMessageTest(unittest.TestCase):
def setUp(self):
self.valid_params = [
(
"https://access.redhat.com/errata/RHBA-2020:0000",
{
"synopsis": "Moderate: OpenShift Container Platform 4.0.0 bug fix and golang security update",
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
),
(
"https://access.redhat.com/errata/RHBA-2021:0749",
{
"synopsis": "OpenShift Container Platform 4.7.2 bug fix update",
"fulladvisory": "RHBA-2021:0749-06",
"when": "2021-03-16 08:42:16 UTC",
}
)
]
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_raise_exception_when_new_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing an invalid synopsis which is not in the excluded cache.
Should raise the ValueError exception.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_content_of_cache_when_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test content of the cache should remain unchanged when invalid synopsis is received.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
cache = {
"RHBA-2020:0000-01":
{
"synopsis": "Moderate: OpenShift Container Platform 4.0.0 bug fix and golang security update",
"uri": "https://access.redhat.com/errata/RHBA-2020:0000",
"when": "2021-01-01 00:00:00 UTC",
}
}
cache_copy = copy.deepcopy(cache)
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(cache, cache_copy)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_add_new_invalid_synopsis_to_the_excluded_cache(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing invalid synopsis which is not in the excluded cache.
Should add the synopsis and the fulladvisory to the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(
excluded_cache,
{
invalid_synopsis: "RHBA-2020:0000-01",
}
)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_new_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to lgtm fast pr when a new invalid synopsis is received.
The new invalid synopsis wasn't saved in the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_new_invalid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to notify when a new invalid synopsis is received.
The new invalid synopsis wasn't saved in the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {}
with self.assertRaises(ValueError):
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_content_of_excluded_cache_when_reprocessing_invalid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing invalid synopsis which is already in the excluded cache.
Should not change the content of the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
invalid_synopsis_2 = "Invalid 1.0.0"
excluded_cache = {
invalid_synopsis: "RHBA-2020:0000-01",
invalid_synopsis_2: "RHBA-2020:1111-01"
}
excluded_cache_copy = copy.deepcopy(excluded_cache)
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(excluded_cache, excluded_cache_copy)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_reprocessing_invalid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to lgtm fast pr
when an already processed invalid synopsis is received.
Invalid synopsis is saved in the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {
invalid_synopsis: "RHBA-2020:0000-01"
}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_reprocessing_invalid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to notify
when an already processed invalid synopsis is received.
Invalid synopsis is saved in the excluded cache.
"""
public_errata_uri_mock.return_value = "https://access.redhat.com/errata/RHBA-2020:0000"
invalid_synopsis = "Invalid Synopsis 0.0.0"
message = {
"synopsis": invalid_synopsis,
"fulladvisory": "RHBA-2020:0000-01",
"when": "2021-01-01 00:00:00 UTC",
}
cache = {}
excluded_cache = {
invalid_synopsis: "RHBA-2020:0000-01",
}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_add_new_valid_synopsis_to_the_cache(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing valid synopsis which is not in the cache.
Should add the synopsis's data to the cache.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
message_copy = copy.deepcopy(message)
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(
cache,
{
message_copy['fulladvisory']:
{
"when": message_copy['when'],
"synopsis": message_copy['synopsis'],
"uri": public_errata_uri,
}
}
)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_notify_when_new_valid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there is an attempt to notify when a new valid synopsis is received.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_called_once()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_lgtm_fast_pr_when_new_valid_synopsis_is_received(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there is an attempt to lgtm fast pr when a new valid synopsis is received.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
lgtm_fast_pr_for_errata_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_called_once()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_content_of_cache_when_reprocessing_valid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing valid synopsis which is already in the cache.
Should not change the content of the cache.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
cache = {}
cache[message['fulladvisory']] = {
'when': message['when'],
'synopsis': message['synopsis'],
'uri': public_errata_uri,
}
cache_copy = copy.deepcopy(cache)
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
self.assertDictEqual(cache, cache_copy)
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_reprocessing_valid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to notify when
reprocessing a valid synopsis.
The valid synopsis is already saved in the cache.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
notify_mock.reset_mock()
cache = {}
cache[message['fulladvisory']] = {
'when': message['when'],
'synopsis': message['synopsis'],
'uri': public_errata_uri,
}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_reprocessing_valid_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test if there isn't an attempt to lgtm fast PR when
reprocessing a valid synopsis.
The valid synopsis is already saved in the cache.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = public_errata_uri
lgtm_fast_pr_for_errata_mock.reset_mock()
cache = {}
cache[message['fulladvisory']] = {
'when': message['when'],
'synopsis': message['synopsis'],
'uri': public_errata_uri,
}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_for_valid_synopsis_does_not_have_public_errata(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing a new valid synopsis which does not have a public errata uri.
Test if there isn't attempt to notify.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = None
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_for_valid_synopsis_does_not_have_public_errata(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing a new valid synopsis which does not have a public errata uri.
Test if there isn't attempt to lgtm fast pr for a message's synopsis.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = None
lgtm_fast_pr_for_errata_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_notify_when_public_errata_does_not_match_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing a new valid synopsis which does not have a matching public errata uri.
Test if there isn't attempt to notify
when the public errata uri does not match message's advisory.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = 'non_matching_errata_uri'
lgtm_fast_pr_for_errata_mock.reset_mock()
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
notify_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_should_not_lgtm_fast_pr_when_public_errata_does_not_match_synopsis(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Test processing a new valid synopsis which does not have a matching public errata uri.
Test if there isn't attempt to lgtm fast pr for a message's synopsis
when the public errata uri does not match message's advisory.
"""
for (public_errata_uri, message) in self.valid_params:
with self.subTest(message=message, errata_uri=public_errata_uri):
public_errata_uri_mock.return_value = 'non_matching_errata_uri'
lgtm_fast_pr_for_errata_mock.reset_mock()
notify_mock.reset_mock()
cache = {}
excluded_cache = {}
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
lgtm_fast_pr_for_errata_mock.assert_not_called()
@patch("errata.lgtm_fast_pr_for_errata")
@patch("errata.public_errata_uri")
@patch("errata.notify")
def test_processing_valid_message_multiple_times(
self,
notify_mock,
public_errata_uri_mock,
lgtm_fast_pr_for_errata_mock
):
"""
Processing multiple valid messages.
Should attempt to notify and to lgtm the fast pr once for the same message.
"""
for (public_errata_uri, message) in self.valid_params:
public_errata_uri_mock.return_value = public_errata_uri
lgtm_fast_pr_for_errata_mock.reset_mock()
notify_mock.reset_mock()
message_copy = copy.deepcopy(message)
cache = {}
excluded_cache = {}
for _ in range(10):
message = copy.deepcopy(message_copy)
errata.process_message(
message=message,
cache=cache,
excluded_cache=excluded_cache,
webhook=None,
githubrepo=None,
githubtoken=None,
)
with self.subTest(message=message, errata_uri=public_errata_uri):
lgtm_fast_pr_for_errata_mock.assert_called_once()
with self.subTest(message=message, errata_uri=public_errata_uri):
notify_mock.assert_called_once()
if __name__ == '__main__':
unittest.main()
| 40.509346
| 194
| 0.552818
| 7,360
| 69,352
| 4.961413
| 0.062092
| 0.034752
| 0.045596
| 0.020648
| 0.84122
| 0.799485
| 0.766048
| 0.746248
| 0.720068
| 0.696078
| 0
| 0.035599
| 0.345455
| 69,352
| 1,711
| 195
| 40.533022
| 0.768824
| 0.079277
| 0
| 0.606277
| 0
| 0
| 0.20617
| 0.025677
| 0
| 0
| 0
| 0
| 0.044223
| 1
| 0.042796
| false
| 0
| 0.006419
| 0
| 0.059914
| 0.00214
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c46e8ebc705732b535b16f3a42154c4df52a3d9
| 82
|
py
|
Python
|
tests/conftest.py
|
mishc9/flake_rba
|
eda1e80436f401871dba61a4c769204c2cbcfc65
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
mishc9/flake_rba
|
eda1e80436f401871dba61a4c769204c2cbcfc65
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
mishc9/flake_rba
|
eda1e80436f401871dba61a4c769204c2cbcfc65
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.fixture
def fixture_template():
return "Hello World!"
| 11.714286
| 25
| 0.731707
| 10
| 82
| 5.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 82
| 6
| 26
| 13.666667
| 0.867647
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
1c4c3e85639c74de8f576b822fda5f08b758c4fe
| 31,832
|
py
|
Python
|
Chatbot_DockerVersion/webapp/requirements/mindmeld/tests/test_markup.py
|
ptrckhmmr/ChatBotforCulturalInstitutions
|
c3da1a6d142e306c2e3183ba5609553e15a0e124
|
[
"Apache-2.0"
] | 1
|
2020-12-24T13:28:35.000Z
|
2020-12-24T13:28:35.000Z
|
Chatbot_DockerVersion/webapp/requirements/mindmeld/tests/test_markup.py
|
ptrckhmmr/ChatBotforCulturalInstitutions
|
c3da1a6d142e306c2e3183ba5609553e15a0e124
|
[
"Apache-2.0"
] | null | null | null |
Chatbot_DockerVersion/webapp/requirements/mindmeld/tests/test_markup.py
|
ptrckhmmr/ChatBotforCulturalInstitutions
|
c3da1a6d142e306c2e3183ba5609553e15a0e124
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_markup
----------------------------------
Tests for `markup` module.
"""
import pytest
from mindmeld import markup
from mindmeld.core import Entity, NestedEntity, ProcessedQuery, QueryEntity, Span
MARKED_UP_STRS = [
'show me houses under {[600,000|sys_number] dollars|price}',
'show me houses under {[$600,000|sys_number]|price}',
'show me houses under {[1.5|sys_number] million dollars|price}',
'play {s.o.b.|track}',
"what's on at {[8 p.m.|sys_time]|range}?",
'is {s.o.b.|show} gonna be on at {[8 p.m.|sys_time]|range}?',
'this is a {role model|type|role}',
'this query has no entities'
]
MARKED_DOWN_STRS = [
'show me houses under 600,000 dollars',
'show me houses under $600,000',
'show me houses under 1.5 million dollars',
'play s.o.b.',
"what's on at 8 p.m.?",
'is s.o.b. gonna be on at 8 p.m.?',
'this is a role model',
'this query has no entities'
]
@pytest.mark.mark_down
def test_mark_down():
"""Tests the mark down function"""
text = 'is {s.o.b.|show} gonna be {{on at 8 p.m.|sys_time}|range}?'
marked_down = markup.mark_down(text)
assert marked_down == 'is s.o.b. gonna be on at 8 p.m.?'
@pytest.mark.load
def test_load_basic_query(query_factory):
"""Tests loading a basic query with no entities"""
markup_text = 'This is a test query string'
processed_query = markup.load_query(markup_text, query_factory)
assert processed_query
assert processed_query.query
@pytest.mark.load
def test_load_entity(query_factory):
"""Tests loading a basic query with an entity"""
markup_text = 'When does the {Elm Street|store_name} store close?'
processed_query = markup.load_query(markup_text, query_factory)
assert len(processed_query.entities) == 1
entity = processed_query.entities[0]
assert entity.span.start == 14
assert entity.span.end == 23
assert entity.normalized_text == 'elm street'
assert entity.entity.type == 'store_name'
assert entity.entity.text == 'Elm Street'
@pytest.mark.load
@pytest.mark.system
def test_load_system(query_factory):
"""Tests loading a query with a system entity"""
text = 'show me houses under {600,000 dollars|sys_amount-of-money}'
processed_query = markup.load_query(text, query_factory)
assert processed_query
assert len(processed_query.entities) == 1
entity = processed_query.entities[0]
assert entity.text == '600,000 dollars'
assert entity.entity.type == 'sys_amount-of-money'
assert entity.span.start == 21
assert not isinstance(entity.entity.value, str)
assert entity.entity.value == {'unit': '$', 'value': 600000}
@pytest.mark.dump
@pytest.mark.system
@pytest.mark.role
def test_load_system_role(query_factory):
"""Tests loading a basic query with an entity with a role"""
text = ('What stores are open between {3|sys_time|open_hours} and '
'{5|sys_time|close_hours}')
processed_query = markup.load_query(text, query_factory)
assert len(processed_query.entities) == 2
entity = processed_query.entities[0]
assert entity.span.start == 29
assert entity.span.end == 29
assert entity.normalized_text == '3'
assert entity.entity.type == 'sys_time'
assert entity.entity.text == '3'
assert entity.entity.role == 'open_hours'
entity = processed_query.entities[1]
assert entity.span.start == 35
assert entity.span.end == 35
assert entity.normalized_text == '5'
assert entity.entity.type == 'sys_time'
assert entity.entity.text == '5'
assert entity.entity.role == 'close_hours'
@pytest.mark.load
@pytest.mark.system
@pytest.mark.nested
def test_load_nested(query_factory):
"""Tests loading a query with a nested system entity"""
text = 'show me houses under {{600,000|sys_number} dollars|price}'
processed_query = markup.load_query(text, query_factory)
assert processed_query
assert len(processed_query.entities) == 1
entity = processed_query.entities[0]
assert entity.text == '600,000 dollars'
assert entity.entity.type == 'price'
assert entity.span == Span(21, 35)
assert not isinstance(entity.entity.value, str)
assert 'children' in entity.entity.value
assert len(entity.entity.value['children']) == 1
nested = entity.entity.value['children'][0]
assert nested.text == '600,000'
assert nested.span == Span(0, 6)
assert nested.entity.type == 'sys_number'
assert nested.entity.value == {'value': 600000}
@pytest.mark.load
@pytest.mark.system
@pytest.mark.nested
def test_load_nested_2(query_factory):
"""Tests loading a query with a nested system entity"""
text = 'show me houses under {${600,000|sys_number}|price}'
processed_query = markup.load_query(text, query_factory)
assert processed_query
assert len(processed_query.entities) == 1
entity = processed_query.entities[0]
assert entity.text == '$600,000'
assert entity.entity.type == 'price'
assert entity.span == Span(21, 28)
assert not isinstance(entity.entity.value, str)
assert 'children' in entity.entity.value
assert len(entity.entity.value['children']) == 1
nested = entity.entity.value['children'][0]
assert nested.text == '600,000'
assert nested.entity.value == {'value': 600000}
assert nested.span == Span(1, 7)
@pytest.mark.load
@pytest.mark.system
@pytest.mark.nested
def test_load_nested_3(query_factory):
"""Tests loading a query with a nested system entity"""
text = 'show me houses under {{1.5 million|sys_number} dollars|price}'
processed_query = markup.load_query(text, query_factory)
assert processed_query
@pytest.mark.load
@pytest.mark.system
@pytest.mark.nested
def test_load_nested_4(query_factory):
"""Tests dumping a query with multiple nested system entities"""
text = 'show me houses {between {600,000|sys_number} and {1,000,000|sys_number} dollars|price}'
processed_query = markup.load_query(text, query_factory)
assert processed_query
assert len(processed_query.entities) == 1
entity = processed_query.entities[0]
assert entity.text == 'between 600,000 and 1,000,000 dollars'
assert entity.entity.type == 'price'
assert entity.span == Span(15, 51)
assert not isinstance(entity.entity.value, str)
assert 'children' in entity.entity.value
assert len(entity.entity.value['children']) == 2
lower, upper = entity.entity.value['children']
assert lower.text == '600,000'
assert lower.entity.value == {'value': 600000}
assert lower.span == Span(8, 14)
assert upper.text == '1,000,000'
assert upper.entity.value == {'value': 1000000}
assert upper.span == Span(20, 28)
@pytest.mark.load
@pytest.mark.special
def test_load_special_chars(query_factory):
"""Tests loading a query with special characters"""
text = 'play {s.o.b.|track}'
processed_query = markup.load_query(text, query_factory)
entities = processed_query.entities
assert len(entities)
entity = entities[0]
assert entity.text == 's.o.b.'
assert entity.normalized_text == 's o b'
assert entity.span.start == 5
assert entity.span.end == 10
@pytest.mark.load
@pytest.mark.special
def test_load_special_chars_2(query_factory):
"""Tests loading a query with special characters"""
text = "what's on at {{8 p.m.|sys_time}|range}?"
processed_query = markup.load_query(text, query_factory)
entities = processed_query.entities
assert len(entities) == 1
entity = entities[0]
assert entity.text == '8 p.m.'
assert entity.normalized_text == '8 p m'
assert entity.span == Span(13, 18)
assert entity.entity.type == 'range'
nested = entity.entity.value['children'][0]
assert nested.text == '8 p.m.'
assert nested.span == Span(0, 5)
assert nested.entity.type == 'sys_time'
assert nested.entity.value['value']
@pytest.mark.load
@pytest.mark.special
def test_load_special_chars_3(query_factory):
"""Tests loading a query with special characters"""
text = 'is {s.o.b.|show} gonna be {{on at 8 p.m.|sys_time}|range}?'
processed_query = markup.load_query(text, query_factory)
entities = processed_query.entities
expected_entity = QueryEntity.from_query(processed_query.query, Span(3, 8), entity_type='show')
assert entities[0] == expected_entity
assert entities[1].entity.type == 'range'
assert entities[1].span == Span(19, 30)
assert 'children' in entities[1].entity.value
assert entities[1].entity.value['children'][0].entity.type == 'sys_time'
@pytest.mark.load
@pytest.mark.special
def test_load_special_chars_4(query_factory):
"""Tests loading a query with special characters"""
text = 'is {s.o.b.|show} ,, gonna be on at {{8 p.m.|sys_time}|range}?'
processed_query = markup.load_query(text, query_factory)
entities = processed_query.entities
expected_entity = QueryEntity.from_query(processed_query.query, Span(3, 8), entity_type='show')
assert entities[0] == expected_entity
assert entities[1].entity.type == 'range'
assert entities[1].span == Span(28, 33)
assert 'children' in entities[1].entity.value
assert entities[1].entity.value['children'][0].entity.type == 'sys_time'
@pytest.mark.load
@pytest.mark.special
def test_load_special_chars_5(query_factory):
"""Tests loading a query with special characters"""
text = 'what christmas movies are , showing at {{8pm|sys_time}|range}'
processed_query = markup.load_query(text, query_factory)
assert len(processed_query.entities) == 1
entity = processed_query.entities[0]
assert entity.span == Span(42, 44)
assert entity.normalized_text == '8pm'
@pytest.mark.load
@pytest.mark.special
def test_load_special_chars_6(query_factory):
"""Tests loading a query with special characters"""
text = "what's on {after {8 p.m.|sys_time}|range}?"
processed_query = markup.load_query(text, query_factory)
entities = processed_query.entities
assert len(entities) == 1
assert entities[0].text == 'after 8 p.m.'
assert entities[0].normalized_text == 'after 8 p m'
assert entities[0].span == Span(10, 21)
@pytest.mark.load
@pytest.mark.group
def test_load_group(query_factory):
"""Tests loading a query with an entity group"""
text = "a [{large|size} {latte|product} with {nonfat milk|option}|product] please"
processed_query = markup.load_query(text, query_factory)
entities = processed_query.entities
assert len(entities) == 3
assert entities[0].text == 'large'
assert entities[0].entity.type == 'size'
assert entities[0].span == Span(2, 6)
assert entities[0].parent == entities[1]
assert entities[1].text == 'latte'
assert entities[1].entity.type == 'product'
assert entities[1].span == Span(8, 12)
assert entities[1].children == (entities[0], entities[2])
assert entities[2].text == 'nonfat milk'
assert entities[2].entity.type == 'option'
assert entities[2].span == Span(19, 29)
assert entities[2].parent == entities[1]
@pytest.mark.load
@pytest.mark.group
def test_load_group_nested(query_factory):
"""Tests loading a query with a nested entity group"""
text = ('Order [{one|quantity} {large|size} {Tesora|product} with [{medium|size} '
'{cream|option}|option] and [{medium|size} {sugar|option}|option]|product]')
processed_query = markup.load_query(text, query_factory)
entities = processed_query.entities
assert len(entities) == 7
assert entities[0].text == 'one'
assert entities[0].entity.type == 'quantity'
assert entities[0].span == Span(6, 8)
assert entities[0].parent == entities[2]
assert entities[1].text == 'large'
assert entities[1].entity.type == 'size'
assert entities[1].span == Span(10, 14)
assert entities[1].parent == entities[2]
assert entities[2].text == 'Tesora'
assert entities[2].entity.type == 'product'
assert entities[2].span == Span(16, 21)
assert entities[2].children == (entities[0], entities[1], entities[4], entities[6])
assert entities[3].text == 'medium'
assert entities[3].entity.type == 'size'
assert entities[3].span == Span(28, 33)
assert entities[3].parent == entities[4]
assert entities[4].text == 'cream'
assert entities[4].entity.type == 'option'
assert entities[4].span == Span(35, 39)
assert entities[4].parent == entities[2]
assert entities[4].children == (entities[3],)
assert entities[5].text == 'medium'
assert entities[5].entity.type == 'size'
assert entities[5].span == Span(45, 50)
assert entities[5].parent == entities[6]
assert entities[6].text == 'sugar'
assert entities[6].entity.type == 'option'
assert entities[6].span == Span(52, 56)
assert entities[6].parent == entities[2]
assert entities[6].children == (entities[5],)
@pytest.mark.load
@pytest.mark.group
def test_load_groups(query_factory):
"""Tests loading a query with multiple top level entity groups"""
text = ('Order [{one|quantity} {large|size} {Tesora|product} with '
'[{medium|size} {cream|option}|option]|product] from '
'[{Philz|store} in {Downtown Sunnyvale|location}|store]')
processed_query = markup.load_query(text, query_factory)
entities = processed_query.entities
assert len(entities) == 7
assert entities[0].text == 'one'
assert entities[0].entity.type == 'quantity'
assert entities[0].span == Span(6, 8)
assert entities[0].parent == entities[2]
assert entities[1].text == 'large'
assert entities[1].entity.type == 'size'
assert entities[1].span == Span(10, 14)
assert entities[1].parent == entities[2]
assert entities[2].text == 'Tesora'
assert entities[2].entity.type == 'product'
assert entities[2].span == Span(16, 21)
assert entities[2].children == (entities[0], entities[1], entities[4])
assert entities[3].text == 'medium'
assert entities[3].entity.type == 'size'
assert entities[3].span == Span(28, 33)
assert entities[3].parent == entities[4]
assert entities[4].text == 'cream'
assert entities[4].entity.type == 'option'
assert entities[4].span == Span(35, 39)
assert entities[4].parent == entities[2]
assert entities[4].children == (entities[3],)
assert entities[5].text == 'Philz'
assert entities[5].entity.type == 'store'
assert entities[5].span == Span(46, 50)
assert entities[5].children == (entities[6],)
assert entities[6].text == 'Downtown Sunnyvale'
assert entities[6].entity.type == 'location'
assert entities[6].span == Span(55, 72)
assert entities[6].parent == entities[5]
@pytest.mark.dump
def test_dump_basic(query_factory):
"""Tests dumping a basic query"""
query_text = 'A basic query'
query = query_factory.create_query(query_text)
processed_query = ProcessedQuery(query)
assert markup.dump_query(processed_query) == query_text
@pytest.mark.dump
def test_dump_entity(query_factory):
"""Tests dumping a basic query with an entity"""
query_text = 'When does the Elm Street store close?'
query = query_factory.create_query(query_text)
entities = [QueryEntity.from_query(query, Span(14, 23), entity_type='store_name')]
processed_query = ProcessedQuery(query, entities=entities)
markup_text = 'When does the {Elm Street|store_name} store close?'
assert markup.dump_query(processed_query) == markup_text
assert markup.dump_query(processed_query, no_entity=True) == query_text
@pytest.mark.dump
def test_dump_role(query_factory):
"""Tests dumping a basic query with an entity with a role"""
query_text = 'What stores are open between 3 and 5'
query = query_factory.create_query(query_text)
entities = [
QueryEntity.from_query(query, Span(29, 29), entity_type='sys_time', role='open_hours'),
QueryEntity.from_query(query, Span(35, 35), entity_type='sys_time', role='close_hours')
]
processed_query = ProcessedQuery(query, entities=entities)
markup_text = ('What stores are open between {3|sys_time|open_hours} and '
'{5|sys_time|close_hours}')
entity_text = 'What stores are open between {3|sys_time} and {5|sys_time}'
assert markup.dump_query(processed_query) == markup_text
assert markup.dump_query(processed_query, no_role=True) == entity_text
assert markup.dump_query(processed_query, no_role=True, no_entity=True) == query_text
@pytest.mark.dump
def test_dump_entities(query_factory):
"""Tests dumping a basic query with two entities"""
query_text = 'When does the Elm Street store close on Monday?'
query = query_factory.create_query(query_text)
entities = [QueryEntity.from_query(query, Span(14, 23), entity_type='store_name'),
QueryEntity.from_query(query, Span(40, 45), entity_type='sys_time')]
processed_query = ProcessedQuery(query, entities=entities)
markup_text = 'When does the {Elm Street|store_name} store close on {Monday|sys_time}?'
assert markup.dump_query(processed_query) == markup_text
assert markup.dump_query(processed_query, no_entity=True) == query_text
@pytest.mark.dump
@pytest.mark.nested
def test_dump_nested(query_factory):
"""Tests dumping a query with a nested system entity"""
query_text = 'show me houses under 600,000 dollars'
query = query_factory.create_query(query_text)
nested = NestedEntity.from_query(query, Span(0, 6), parent_offset=21, entity_type='sys_number')
raw_entity = Entity('600,000 dollars', 'price', value={'children': [nested]})
entities = [QueryEntity.from_query(query, Span(21, 35), entity=raw_entity)]
processed_query = ProcessedQuery(query, entities=entities)
markup_text = 'show me houses under {{600,000|sys_number} dollars|price}'
assert markup.dump_query(processed_query) == markup_text
assert markup.dump_query(processed_query, no_group=True) == markup_text
assert markup.dump_query(processed_query, no_entity=True) == query_text
@pytest.mark.dump
@pytest.mark.nested
def test_dump_multi_nested(query_factory):
"""Tests dumping a query with multiple nested system entities"""
query_text = 'show me houses between 600,000 and 1,000,000 dollars'
query = query_factory.create_query(query_text)
lower = NestedEntity.from_query(query, Span(8, 14), parent_offset=15, entity_type='sys_number')
upper = NestedEntity.from_query(query, Span(20, 28), parent_offset=15, entity_type='sys_number')
raw_entity = Entity('between 600,000 dollars and 1,000,000', 'price',
value={'children': [lower, upper]})
entities = [QueryEntity.from_query(query, Span(15, 51), entity=raw_entity)]
processed_query = ProcessedQuery(query, entities=entities)
markup_text = ('show me houses {between {600,000|sys_number} and '
'{1,000,000|sys_number} dollars|price}')
assert markup.dump_query(processed_query) == markup_text
assert markup.dump_query(processed_query, no_group=True) == markup_text
assert markup.dump_query(processed_query, no_entity=True) == query_text
@pytest.mark.dump
@pytest.mark.group
def test_dump_group(query_factory):
"""Tests dumping a query with an entity group"""
query_text = 'a large latte with nonfat milk please'
query = query_factory.create_query(query_text)
size = QueryEntity.from_query(query, Span(2, 6), entity_type='size')
option = QueryEntity.from_query(query, Span(19, 29), entity_type='option')
product = QueryEntity.from_query(query, Span(8, 12), entity_type='product',
children=(size, option))
processed_query = ProcessedQuery(query, entities=[size, product, option])
markup_text = "a [{large|size} {latte|product} with {nonfat milk|option}|product] please"
entity_text = "a {large|size} {latte|product} with {nonfat milk|option} please"
group_text = "a [large latte with nonfat milk|product] please"
assert markup.dump_query(processed_query) == markup_text
assert markup.dump_query(processed_query, no_group=True) == entity_text
assert markup.dump_query(processed_query, no_entity=True) == group_text
assert markup.dump_query(processed_query, no_group=True, no_entity=True) == query_text
@pytest.mark.dump
@pytest.mark.group
def test_dump_group_with_role(query_factory):
"""Tests dumping a query with an entity group with role type"""
query_text = 'a large latte with nonfat milk please'
query = query_factory.create_query(query_text)
size = QueryEntity.from_query(query, Span(2, 6), entity_type='size')
option = QueryEntity.from_query(query, Span(19, 29), entity_type='option', role='beverage')
product = QueryEntity.from_query(query, Span(8, 12), entity_type='dish-type', role='beverage',
children=(size, option))
processed_query = ProcessedQuery(query, entities=[size, product, option])
markup_text = "a [{large|size} {latte|dish-type|beverage} with " \
"{nonfat milk|option|beverage}|dish-type] please"
entity_text = "a {large|size} {latte|dish-type|beverage} with " \
"{nonfat milk|option|beverage} please"
group_text = "a [large latte with nonfat milk|dish-type] please"
assert markup.dump_query(processed_query) == markup_text
assert markup.dump_query(processed_query, no_group=True) == entity_text
assert markup.dump_query(processed_query, no_entity=True, no_role=True) == group_text
assert markup.dump_query(processed_query,
no_group=True, no_entity=True, no_role=True) == query_text
@pytest.mark.dump
@pytest.mark.group
def test_dump_group_nested(query_factory):
"""Tests dumping a query with nested entity groups"""
query_text = 'Order one large Tesora with medium cream and medium sugar'
query = query_factory.create_query(query_text)
entities = [
QueryEntity.from_query(query, Span(6, 8), entity_type='quantity'),
QueryEntity.from_query(query, Span(10, 14), entity_type='size'),
QueryEntity.from_query(query, Span(16, 21), entity_type='product'),
QueryEntity.from_query(query, Span(28, 33), entity_type='size'),
QueryEntity.from_query(query, Span(35, 39), entity_type='option'),
QueryEntity.from_query(query, Span(45, 50), entity_type='size'),
QueryEntity.from_query(query, Span(52, 56), entity_type='option')
]
entities[4] = entities[4].with_children((entities[3],))
entities[6] = entities[6].with_children((entities[5],))
entities[2] = entities[2].with_children((entities[0], entities[1], entities[4], entities[6]))
processed_query = ProcessedQuery(query, entities=entities)
markup_text = ('Order [{one|quantity} {large|size} {Tesora|product} with [{medium|size} '
'{cream|option}|option] and [{medium|size} {sugar|option}|option]|product]')
entity_text = ('Order {one|quantity} {large|size} {Tesora|product} with {medium|size} '
'{cream|option} and {medium|size} {sugar|option}')
group_text = ('Order [one large Tesora with [medium '
'cream|option] and [medium sugar|option]|product]')
assert markup.dump_query(processed_query) == markup_text
assert markup.dump_query(processed_query, no_group=True) == entity_text
assert markup.dump_query(processed_query, no_entity=True) == group_text
assert markup.dump_query(processed_query, no_group=True, no_entity=True) == query_text
@pytest.mark.dump
@pytest.mark.group
def test_dump_group_nested_2(query_factory):
"""Tests dumping a query with nested entity groups"""
query_text = 'Can I get one curry sauce with my rice ball with house salad'
query = query_factory.create_query(query_text)
entities = [
QueryEntity.from_query(query, Span(10, 12), entity_type='sys_number', role='quantity'),
QueryEntity.from_query(query, Span(14, 24), entity_type='option'),
QueryEntity.from_query(query, Span(34, 59), entity_type='dish')
]
entities[1] = entities[1].with_children((entities[0],))
entities[2] = entities[2].with_children((entities[1],))
processed_query = ProcessedQuery(query, entities=entities)
markup_text = ('Can I get [[{one|sys_number|quantity} {curry sauce|option}|option] '
'with my {rice ball with house salad|dish}|dish]')
entity_text = ('Can I get {one|sys_number|quantity} {curry sauce|option} '
'with my {rice ball with house salad|dish}')
role_text = ('Can I get {one|quantity} curry sauce '
'with my rice ball with house salad')
group_text = ('Can I get [[one curry sauce|option] '
'with my rice ball with house salad|dish]')
assert markup.dump_query(processed_query) == markup_text
assert markup.dump_query(processed_query, no_group=True) == entity_text
assert markup.dump_query(processed_query, no_group=True, no_entity=True) == role_text
assert markup.dump_query(processed_query, no_entity=True, no_role=True) == group_text
assert markup.dump_query(processed_query,
no_group=True, no_entity=True, no_role=True) == query_text
@pytest.mark.dump
@pytest.mark.group
def test_dump_groups(query_factory):
"""Tests dumping a query with multiple top level entity groups"""
query_text = 'Order one large Tesora with medium cream from Philz in Downtown Sunnyvale'
query = query_factory.create_query(query_text)
entities = [
QueryEntity.from_query(query, Span(6, 8), entity_type='quantity'),
QueryEntity.from_query(query, Span(10, 14), entity_type='size'),
QueryEntity.from_query(query, Span(16, 21), entity_type='product'),
QueryEntity.from_query(query, Span(28, 33), entity_type='size'),
QueryEntity.from_query(query, Span(35, 39), entity_type='option'),
QueryEntity.from_query(query, Span(46, 50), entity_type='store'),
QueryEntity.from_query(query, Span(55, 72), entity_type='location')
]
entities[4] = entities[4].with_children((entities[3],))
entities[2] = entities[2].with_children((entities[0], entities[1], entities[4]))
entities[5] = entities[5].with_children((entities[6],))
processed_query = ProcessedQuery(query, entities=entities)
markup_text = ('Order [{one|quantity} {large|size} {Tesora|product} with '
'[{medium|size} {cream|option}|option]|product] from '
'[{Philz|store} in {Downtown Sunnyvale|location}|store]')
assert markup.dump_query(processed_query) == markup_text
@pytest.mark.load
@pytest.mark.dump
@pytest.mark.group
def test_load_dump_groups(query_factory):
"""Tests that load_query and dump_query are reversible"""
text = ('Order [{one|quantity} {large|size} {Tesora|product} with '
'[{medium|size} {cream|option}|option]|product] from '
'[{Philz|store} in {Downtown Sunnyvale|location}|store]')
processed_query = markup.load_query(text, query_factory)
markup_text = markup.dump_query(processed_query)
assert text == markup_text
@pytest.mark.load
@pytest.mark.dump
@pytest.mark.group
def test_load_dump_groups_roles(query_factory):
"""Tests that load_query and dump_query are reversible"""
text = ('Order [{one|sys_number|quantity} {large|size} {Tesora|product|dish} with '
'[{medium|size} {cream|option|addin}|option]|product]')
processed_query = markup.load_query(text, query_factory)
markup_text = markup.dump_query(processed_query)
assert text == markup_text
@pytest.mark.load
@pytest.mark.dump
def test_load_dump_2(query_factory):
"""Tests that load_query and dump_query are reversible"""
text = ("i'm extra hungry get me a {chicken leg|dish}, [{1|quantity} "
"{kheema nan|dish}|dish] [{2|quantity} regular {nans|dish}|dish] "
"[{one|quantity} {chicken karahi|dish}|dish], [{1|quantity} "
"{saag paneer|dish}|dish] and [{1|quantity} {chicken biryani|dish}|dish]")
processed_query = markup.load_query(text, query_factory)
markup_text = markup.dump_query(processed_query)
assert text == markup_text
def test_bootstrap_query_with_entities(query_factory):
query_text = 'Can I get one curry sauce with my rice ball with house salad'
query = query_factory.create_query(query_text)
entities = [
QueryEntity.from_query(query, Span(10, 12), entity_type='sys_number', role='quantity'),
QueryEntity.from_query(query, Span(14, 24), entity_type='option'),
QueryEntity.from_query(query, Span(34, 59), entity_type='dish')
]
entities[1] = entities[1].with_children((entities[0],))
entities[2] = entities[2].with_children((entities[1],))
confidence = {
'domains': {
'food': 0.95,
'music': 0.05
},
'intents': {
'get_comestibles': 0.99,
'reorder': 0.01
},
'entities': [
{'sys_number': 0.9},
{'option': 0.99},
{'dish': 0.65}
],
'roles': [
{'quantity': 0.8, 'quality': 0.2},
None,
None
]
}
processed_query = ProcessedQuery(
query, domain='food', intent='get_comestibles', entities=entities, confidence=confidence
)
bootstrap_data = markup.bootstrap_query_row(processed_query, show_confidence=True)
expected_data = {
'query': ('Can I get [[{one|sys_number|quantity} {curry sauce|option}|option] '
'with my {rice ball with house salad|dish}|dish]'),
'domain': 'food',
'domain_conf': 0.95,
'intent': 'get_comestibles',
'intent_conf': 0.99,
'entity_conf': 0.65,
'role_conf': 0.8
}
assert bootstrap_data == expected_data
def test_bootstrap_query_no_entity(query_factory):
""""Tests bootstrap output for a query without entities"""
query_text = 'cancel the timer'
query = query_factory.create_query(query_text)
confidence = {
'domains': {
'times_and_dates': 0.95,
'espionage': 0.05
},
'intents': {
'stop_timer': 0.9,
'start_timer': 0.07,
'cut_blue_wire': 0.03
},
'entities': [],
'roles': []
}
processed_query = ProcessedQuery(
query, domain='times_and_dates', intent='stop_timer', entities=[], confidence=confidence
)
bootstrap_data = markup.bootstrap_query_row(processed_query, show_confidence=True)
expected_data = {
'query': 'cancel the timer',
'domain': 'times_and_dates',
'domain_conf': 0.95,
'intent': 'stop_timer',
'intent_conf': 0.9,
'entity_conf': 1.0,
'role_conf': 1.0
}
assert bootstrap_data == expected_data
| 38.819512
| 101
| 0.663986
| 4,242
| 31,832
| 4.824847
| 0.059406
| 0.069771
| 0.025993
| 0.031661
| 0.858993
| 0.795671
| 0.766258
| 0.742659
| 0.709386
| 0.677237
| 0
| 0.029522
| 0.206176
| 31,832
| 819
| 102
| 38.866911
| 0.780442
| 0.053908
| 0
| 0.528239
| 0
| 0.024917
| 0.209868
| 0.038252
| 0
| 0
| 0
| 0
| 0.347176
| 1
| 0.056478
| false
| 0
| 0.004983
| 0
| 0.061462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c5bf5712c64da44df655f05b3579218e605402a
| 5,260
|
py
|
Python
|
project/poisson1d.py
|
amit17133129/pyMG-2016
|
b82a60811bb0a8b91d8793c47177a240221f9176
|
[
"BSD-2-Clause"
] | 2
|
2016-04-04T15:20:50.000Z
|
2020-08-01T19:28:55.000Z
|
project/poisson1d.py
|
amit17133129/pyMG-2016
|
b82a60811bb0a8b91d8793c47177a240221f9176
|
[
"BSD-2-Clause"
] | 1
|
2020-10-02T05:44:45.000Z
|
2020-10-02T05:44:45.000Z
|
project/poisson1d.py
|
amit17133129/pyMG-2016
|
b82a60811bb0a8b91d8793c47177a240221f9176
|
[
"BSD-2-Clause"
] | 11
|
2016-03-26T18:37:06.000Z
|
2020-10-01T19:44:55.000Z
|
# coding=utf-8
import numpy as np
import scipy.sparse as sp
from pymg.problem_base import ProblemBase
class Poisson1D(ProblemBase):
"""Implementation of the 1D Poission problem.
Here we define the 1D Poisson problem :math:`-\Delta u = 0` with
Dirichlet-Zero boundary conditions. This is the homogeneous problem,
derive from this class if you want to play around with different RHS.
Attributes:
dx (float): mesh size
"""
def __init__(self, ndofs, *args, **kwargs):
"""Initialization routine for the Poisson1D problem
Args:
ndofs (int): number of degrees of freedom (see
:attr:`pymg.problem_base.ProblemBase.ndofs`)
*args: Variable length argument list
**kwargs: Arbitrary keyword arguments
"""
self.dx = 1.0 / (ndofs + 1)
# compute system matrix A, scale by 1/dx^2
A = 1.0 / (self.dx ** 2) * self.__get_system_matrix(ndofs)
rhs = self.__get_rhs(ndofs)
super(Poisson1D, self).__init__(ndofs, A, rhs, *args, **kwargs)
@staticmethod
def __get_system_matrix(ndofs):
"""Helper routine to get the system matrix discretizing :math:`-Delta` with second order FD
Args:
ndofs (int): number of inner grid points (no boundaries!)
Returns:
scipy.sparse.csc_matrix: sparse system matrix A
of size :attr:`ndofs` x :attr:`ndofs`
"""
data = np.array([[2] * ndofs, [-1] * ndofs, [-1] * ndofs])
diags = np.array([0, -1, 1])
return sp.spdiags(data, diags, ndofs, ndofs, format='csc')
@staticmethod
def __get_rhs(ndofs):
"""Helper routine to set the right-hand side
Args:
ndofs (int): number of inner grid points (no boundaries!)
Returns:
numpy.ndarray: the right-hand side vector of size :attr:`ndofs`
"""
return np.zeros(ndofs)
@property
def u_exact(self):
"""Routine to compute the exact solution
Returns:
numpy.ndarray: exact solution array of size :attr:`ndofs`
"""
return np.zeros(self.ndofs)
@property
def domain(self):
return np.array([(i + 1) * self.dx for i in range(self.ndofs)])
@ProblemBase.ndofs.setter
def ndofs(self, val):
ProblemBase.ndofs.fset(self, val)
self.dx = 1.0 / (val + 1)
# compute system matrix A, scale by 1/dx^2
self.A = 1.0 / (self.dx ** 2) * self.__get_system_matrix(val)
self.rhs = self.__get_rhs(self._ndofs)
class Poisson1DPeriodic(ProblemBase):
"""Implementation of the 1D Poission problem.
Here we define the 1D Poisson problem :math:`-\Delta u = 0` with
Dirichlet-Zero boundary conditions. This is the homogeneous problem,
derive from this class if you want to play around with different RHS.
Attributes:
dx (float): mesh size
"""
def __init__(self, ndofs, sigma, *args, **kwargs):
"""Initialization routine for the Poisson1D problem
Args:
ndofs (int): number of degrees of freedom (see
:attr:`pymg.problem_base.ProblemBase.ndofs`)
*args: Variable length argument list
**kwargs: Arbitrary keyword arguments
"""
self.dx = 1.0 / ndofs
self.sigma = sigma
# compute system matrix A, scale by 1/dx^2
A = self.__get_system_matrix(ndofs)
A[0, -1] = A[0, 1]
A[-1, 0] = A[1, 0]
A = -sigma * 1.0 / (self.dx ** 2) * A
rhs = self.__get_rhs(ndofs)
super(Poisson1DPeriodic, self).__init__(ndofs, A, rhs, *args, **kwargs)
@staticmethod
def __get_system_matrix(ndofs):
"""Helper routine to get the system matrix discretizing :math:`-Delta` with second order FD
Args:
ndofs (int): number of inner grid points (no boundaries!)
Returns:
scipy.sparse.csc_matrix: sparse system matrix A
of size :attr:`ndofs` x :attr:`ndofs`
"""
data = np.array([[2] * ndofs, [-1] * ndofs, [-1] * ndofs])
diags = np.array([0, -1, 1])
return sp.spdiags(data, diags, ndofs, ndofs, format='csc')
@staticmethod
def __get_rhs(ndofs):
"""Helper routine to set the right-hand side
Args:
ndofs (int): number of inner grid points (no boundaries!)
Returns:
numpy.ndarray: the right-hand side vector of size :attr:`ndofs`
"""
return np.zeros(ndofs)
@property
def u_exact(self):
"""Routine to compute the exact solution
Returns:
numpy.ndarray: exact solution array of size :attr:`ndofs`
"""
return np.zeros(self.ndofs)
@property
def domain(self):
return np.array([(i) * self.dx for i in range(self.ndofs)])
@ProblemBase.ndofs.setter
def ndofs(self, val):
ProblemBase.ndofs.fset(self, val)
self.dx = 1.0 / val
# compute system matrix A, scale by 1/dx^2
self.A = -self.sigma * 1.0 / (self.dx ** 2) * self.__get_system_matrix(val)
self.A[0, -1] = self.A[0, 1]
self.A[-1, 0] = self.A[1, 0]
self.rhs = self.__get_rhs(self._ndofs)
| 32.469136
| 99
| 0.591825
| 696
| 5,260
| 4.376437
| 0.176724
| 0.055154
| 0.023638
| 0.035456
| 0.94025
| 0.927774
| 0.90151
| 0.885752
| 0.885752
| 0.885752
| 0
| 0.019386
| 0.293916
| 5,260
| 161
| 100
| 32.670807
| 0.800754
| 0.44924
| 0
| 0.590164
| 0
| 0
| 0.002435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.196721
| false
| 0
| 0.04918
| 0.032787
| 0.409836
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c64216e4ead4a269fb6538ce07b3a5ba8b8f592
| 33
|
py
|
Python
|
code/code_annotation/code_retrieval/__init__.py
|
sunlab-osu/CoaCor
|
e5df8fd38830590b9f132dd68bc26c630e41e509
|
[
"Apache-2.0"
] | 30
|
2019-03-08T05:11:32.000Z
|
2021-12-09T12:11:29.000Z
|
code/code_annotation/code_retrieval/__init__.py
|
sunlab-osu/CoaCor
|
e5df8fd38830590b9f132dd68bc26c630e41e509
|
[
"Apache-2.0"
] | 1
|
2020-04-18T14:46:48.000Z
|
2020-06-17T20:08:37.000Z
|
code/code_annotation/code_retrieval/__init__.py
|
sunlab-osu/CoaCor
|
e5df8fd38830590b9f132dd68bc26c630e41e509
|
[
"Apache-2.0"
] | 4
|
2019-07-02T05:25:11.000Z
|
2021-05-27T12:52:21.000Z
|
from CodeRetrievalCritic import *
| 33
| 33
| 0.878788
| 3
| 33
| 9.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1c747a39b52b2737e693782c95394fc887e40d1f
| 37
|
py
|
Python
|
src/bookmarks_converter/__init__.py
|
radam9/bookmarks_parser
|
fc508908fe4b5551d517e7da7120bcc2480200f7
|
[
"MIT"
] | 8
|
2021-02-19T09:28:31.000Z
|
2022-02-16T02:33:26.000Z
|
src/bookmarks_converter/__init__.py
|
radam9/bookmarks_parser
|
fc508908fe4b5551d517e7da7120bcc2480200f7
|
[
"MIT"
] | 50
|
2021-02-06T14:16:38.000Z
|
2022-03-01T17:55:05.000Z
|
src/bookmarks_converter/__init__.py
|
radam9/bookmarks_parser
|
fc508908fe4b5551d517e7da7120bcc2480200f7
|
[
"MIT"
] | 1
|
2021-09-15T16:45:08.000Z
|
2021-09-15T16:45:08.000Z
|
from .core import BookmarksConverter
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
98c0697b8141518e4ac7ec1fde16be14dd07dd8a
| 163
|
py
|
Python
|
{{cookiecutter.project_slug}}/tests/test_import.py
|
tobiasraabe/cookiecutter-pytask
|
425bbb8480e5eaae560dbb5e0cb36685e8fb4e30
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/tests/test_import.py
|
tobiasraabe/cookiecutter-pytask
|
425bbb8480e5eaae560dbb5e0cb36685e8fb4e30
|
[
"MIT"
] | 9
|
2022-01-24T08:04:54.000Z
|
2022-03-21T20:28:30.000Z
|
{{cookiecutter.project_slug}}/tests/test_import.py
|
pytask-dev/cookiecutter-pytask
|
425bbb8480e5eaae560dbb5e0cb36685e8fb4e30
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import {{ cookiecutter.project_slug }}
def test_import():
assert hasattr({{ cookiecutter.project_slug }}, "__version__")
| 20.375
| 66
| 0.754601
| 17
| 163
| 6.588235
| 0.705882
| 0.339286
| 0.410714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134969
| 163
| 7
| 67
| 23.285714
| 0.794326
| 0
| 0
| 0
| 0
| 0
| 0.067485
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| null | null | 0
| 0.75
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
98e79458318bbe0f9ee035eed54230f1e3ce48fc
| 40
|
py
|
Python
|
cpp/__init__.py
|
hwangyale/AlphaGomoku
|
7c85a71c710fa8d114b3591e4fd27c8649caccbb
|
[
"MIT"
] | 3
|
2018-10-30T07:07:40.000Z
|
2019-11-22T12:32:32.000Z
|
cpp/__init__.py
|
hwangyale/AlphaGomoku
|
7c85a71c710fa8d114b3591e4fd27c8649caccbb
|
[
"MIT"
] | null | null | null |
cpp/__init__.py
|
hwangyale/AlphaGomoku
|
7c85a71c710fa8d114b3591e4fd27c8649caccbb
|
[
"MIT"
] | null | null | null |
from .cpp_board_wrapper import CPPBoard
| 20
| 39
| 0.875
| 6
| 40
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
98f31f6e4b926fc88efb628054fde7c5ea8454a5
| 2,343
|
py
|
Python
|
ad_api/sp_products/negative_product_targeting.py
|
854350999/python-amazon-advertising-api
|
ec52f72dab85ad67e271bfd6248071f00e9c7292
|
[
"MIT"
] | null | null | null |
ad_api/sp_products/negative_product_targeting.py
|
854350999/python-amazon-advertising-api
|
ec52f72dab85ad67e271bfd6248071f00e9c7292
|
[
"MIT"
] | null | null | null |
ad_api/sp_products/negative_product_targeting.py
|
854350999/python-amazon-advertising-api
|
ec52f72dab85ad67e271bfd6248071f00e9c7292
|
[
"MIT"
] | null | null | null |
from ..client import Client
class NegativeProductTargeting(Client):
def create_negative_targets(self, data):
self.uri_path = "/v2/sp/negativeTargets"
self.method = "post"
self.data = data
return self.execute()
def update_negative_targets(self, data):
self.uri_path = "/v2/sp/negativeTargets"
self.method = "put"
self.data = data
return self.execute()
def get_negative_targets(self, start_index: int = 0, count: int = None, state_filter: str = None,
campaign_id_filter: str = None, ad_group_id_filter: str = None,
target_id_filter: str = None):
self.uri_path = "/v2/sp/negativeTargets"
self.params = {
"startIndex": start_index,
"count": count,
"stateFilter": state_filter,
"campaignIdFilter": campaign_id_filter,
"adGroupIdFilter": ad_group_id_filter,
"targetIdFilter": target_id_filter
}
self.method = "get"
return self.execute()
def get_negative_targets_by_id(self, target_id):
self.uri_path = "/v2/sp/negativeTargets/{}".format(target_id)
self.method = "get"
return self.execute()
def delete_negative_targets_by_id(self, target_id):
self.uri_path = "/v2/sp/negativeTargets/{}".format(target_id)
self.method = "delete"
return self.execute()
def get_negative_targets_extended(self, start_index: int = 0, count: int = None, state_filter: str = None,
campaign_id_filter: str = None, ad_group_id_filter: str = None,
target_id_filter: str = None):
self.uri_path = "/v2/sp/negativeTargets/extended"
self.method = "get"
self.params = {
"startIndex": start_index,
"count": count,
"stateFilter": state_filter,
"campaignIdFilter": campaign_id_filter,
"adGroupIdFilter": ad_group_id_filter,
"targetIdFilter": target_id_filter
}
return self.execute()
def get_negative_targets_extended_by_id(self, target_id):
self.uri_path = "/v2/sp/negativeTargets/extended/{}".format(target_id)
self.method = "get"
return self.execute()
| 37.790323
| 110
| 0.598378
| 259
| 2,343
| 5.138996
| 0.181467
| 0.072126
| 0.078137
| 0.06837
| 0.921863
| 0.921863
| 0.921863
| 0.818933
| 0.749812
| 0.701728
| 0
| 0.005471
| 0.297909
| 2,343
| 61
| 111
| 38.409836
| 0.803647
| 0
| 0
| 0.692308
| 0
| 0
| 0.148528
| 0.077251
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134615
| false
| 0
| 0.019231
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c7ab45bfa2bcd34f9e8d66769433a3036296ad1e
| 213
|
py
|
Python
|
common.py
|
sujeevraja/utility-scripts
|
464fedb2b2430567f1d9162834bf64bac14ddc3d
|
[
"MIT"
] | null | null | null |
common.py
|
sujeevraja/utility-scripts
|
464fedb2b2430567f1d9162834bf64bac14ddc3d
|
[
"MIT"
] | null | null | null |
common.py
|
sujeevraja/utility-scripts
|
464fedb2b2430567f1d9162834bf64bac14ddc3d
|
[
"MIT"
] | null | null | null |
class ScriptException(Exception):
"""Custom exception class with message for this module."""
def __init__(self, value):
self.value = value
def __repr__(self):
return repr(self.value)
| 23.666667
| 62
| 0.661972
| 25
| 213
| 5.32
| 0.6
| 0.203008
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234742
| 213
| 8
| 63
| 26.625
| 0.815951
| 0.244131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
c7dfe2cc00ddc5c870039ca299d4ad3811035262
| 34
|
py
|
Python
|
CLR-master/__init__.py
|
dr-yali/Bone-MRI
|
54c50b2da26190575ad0913f715bc15a7dbd857f
|
[
"MIT"
] | 1,201
|
2017-03-23T07:19:33.000Z
|
2022-03-29T08:59:07.000Z
|
CLR/__init__.py
|
prateekvyas1996/Facies-Prediction-From-well-log-using-deep-learning
|
68645ce1b40f9263f3ac5c7758ba4923377cb3d0
|
[
"MIT"
] | 18
|
2017-03-25T00:08:36.000Z
|
2021-05-03T07:12:05.000Z
|
CLR/__init__.py
|
prateekvyas1996/Facies-Prediction-From-well-log-using-deep-learning
|
68645ce1b40f9263f3ac5c7758ba4923377cb3d0
|
[
"MIT"
] | 293
|
2017-03-24T04:37:06.000Z
|
2022-02-16T18:33:54.000Z
|
from .clr_callback import CyclicLR
| 34
| 34
| 0.882353
| 5
| 34
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 34
| 1
| 34
| 34
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c7e50d652b755eb3d65c592e97fcf494e22a2b51
| 15,566
|
py
|
Python
|
tests/processors/graphic_matching_test.py
|
elifesciences/sciencebeam-parser
|
66964f283612b8d6fa8a23ad8790292c1ec07651
|
[
"MIT"
] | 13
|
2021-08-04T12:11:17.000Z
|
2022-03-28T20:41:20.000Z
|
tests/processors/graphic_matching_test.py
|
elifesciences/sciencebeam-parser
|
66964f283612b8d6fa8a23ad8790292c1ec07651
|
[
"MIT"
] | 33
|
2021-08-05T08:37:59.000Z
|
2022-03-29T18:42:09.000Z
|
tests/processors/graphic_matching_test.py
|
elifesciences/sciencebeam-parser
|
66964f283612b8d6fa8a23ad8790292c1ec07651
|
[
"MIT"
] | 1
|
2022-01-05T14:53:06.000Z
|
2022-01-05T14:53:06.000Z
|
import logging
from pathlib import Path
from typing import Sequence, Tuple
from unittest.mock import MagicMock
import pytest
import PIL.Image
from sciencebeam_parser.document.layout_document import (
LayoutBlock,
LayoutGraphic,
LayoutPageCoordinates,
LayoutToken
)
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticFigure,
SemanticGraphic,
SemanticLabel,
SemanticMixedContentWrapper
)
from sciencebeam_parser.processors.graphic_matching import (
BoundingBoxDistanceGraphicMatcher,
GraphicRelatedBlockTextGraphicMatcher,
OpticalCharacterRecognitionGraphicMatcher,
get_bounding_box_list_distance
)
LOGGER = logging.getLogger(__name__)
COORDINATES_1 = LayoutPageCoordinates(
x=10,
y=100,
width=200,
height=100,
page_number=1
)
GRAPHIC_ABOVE_FIGURE_COORDINATES_1 = LayoutPageCoordinates(
x=10,
y=100,
width=200,
height=100,
page_number=1
)
FIGURE_BELOW_GRAPHIC_COORDINATES_1 = LayoutPageCoordinates(
x=10,
y=GRAPHIC_ABOVE_FIGURE_COORDINATES_1.y + GRAPHIC_ABOVE_FIGURE_COORDINATES_1.height + 10,
width=200,
height=20,
page_number=1
)
FAR_AWAY_COORDINATES_1 = LayoutPageCoordinates(
x=10,
y=10,
width=200,
height=20,
page_number=10
)
FAR_AWAY_COORDINATES_2 = LayoutPageCoordinates(
x=10,
y=10,
width=200,
height=20,
page_number=11
)
@pytest.fixture(name='ocr_model_mock')
def _ocr_model_mock() -> MagicMock:
return MagicMock(name='ocr_model')
def _get_semantic_content_for_page_coordinates(
coordinates: LayoutPageCoordinates
) -> SemanticContentWrapper:
return SemanticFigure(
layout_block=LayoutBlock.for_tokens([
LayoutToken(
text='dummy',
coordinates=coordinates
)
])
)
def _get_bounding_box_list_distance_sort_key(
bounding_box_list_1: Sequence[LayoutPageCoordinates],
bounding_box_list_2: Sequence[LayoutPageCoordinates]
) -> Tuple[float, ...]:
return get_bounding_box_list_distance(
bounding_box_list_1, bounding_box_list_2
)
class TestGetBoundingBoxListDistance:
def test_should_return_distance_between_vertical_adjacent_bounding_boxes(self):
bounding_box_distance = get_bounding_box_list_distance(
[COORDINATES_1],
[COORDINATES_1.move_by(dy=COORDINATES_1.height)]
)
assert bounding_box_distance.page_number_diff == 0
assert bounding_box_distance.delta_x == 0
assert bounding_box_distance.delta_y == 0
assert bounding_box_distance.euclidean_distance == 0
def test_should_return_distance_between_horizontal_adjacent_bounding_boxes(self):
bounding_box_distance = get_bounding_box_list_distance(
[COORDINATES_1],
[COORDINATES_1.move_by(dx=COORDINATES_1.width)]
)
assert bounding_box_distance.page_number_diff == 0
assert bounding_box_distance.delta_x == 0
assert bounding_box_distance.delta_y == 0
assert bounding_box_distance.euclidean_distance == 0
def test_should_return_delta_x_for_bounding_box_left_right(self):
bounding_box_distance = get_bounding_box_list_distance(
[COORDINATES_1],
[COORDINATES_1.move_by(dx=COORDINATES_1.width + 10)]
)
assert bounding_box_distance.page_number_diff == 0
assert bounding_box_distance.delta_x == 10
assert bounding_box_distance.delta_y == 0
assert bounding_box_distance.euclidean_distance == 10
def test_should_return_delta_x_for_bounding_box_right_left(self):
bounding_box_distance = get_bounding_box_list_distance(
[COORDINATES_1.move_by(dx=COORDINATES_1.width + 10)],
[COORDINATES_1]
)
assert bounding_box_distance.page_number_diff == 0
assert bounding_box_distance.delta_x == 10
assert bounding_box_distance.delta_y == 0
assert bounding_box_distance.euclidean_distance == 10
def test_should_return_delta_y_for_bounding_box_above_below(self):
bounding_box_distance = get_bounding_box_list_distance(
[COORDINATES_1],
[COORDINATES_1.move_by(dy=COORDINATES_1.height + 10)]
)
assert bounding_box_distance.page_number_diff == 0
assert bounding_box_distance.delta_x == 0
assert bounding_box_distance.delta_y == 10
assert bounding_box_distance.euclidean_distance == 10
def test_should_return_delta_y_for_bounding_box_below_above(self):
bounding_box_distance = get_bounding_box_list_distance(
[COORDINATES_1.move_by(dy=COORDINATES_1.height + 10)],
[COORDINATES_1]
)
assert bounding_box_distance.page_number_diff == 0
assert bounding_box_distance.delta_x == 0
assert bounding_box_distance.delta_y == 10
assert bounding_box_distance.euclidean_distance == 10
class TestBoundingBoxDistanceGraphicMatcher:
def test_should_return_empty_list_with_empty_list_of_graphics(self):
result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(
semantic_graphic_list=[],
candidate_semantic_content_list=[SemanticMixedContentWrapper()]
)
assert not result
def test_should_match_graphic_above_semantic_content(self):
semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=GRAPHIC_ABOVE_FIGURE_COORDINATES_1
))
candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(
coordinates=FIGURE_BELOW_GRAPHIC_COORDINATES_1
)
result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(
semantic_graphic_list=[semantic_graphic_1],
candidate_semantic_content_list=[
_get_semantic_content_for_page_coordinates(
coordinates=FAR_AWAY_COORDINATES_1
),
candidate_semantic_content_1,
_get_semantic_content_for_page_coordinates(
coordinates=FAR_AWAY_COORDINATES_2
)
]
)
LOGGER.debug('result: %r', result)
assert len(result) == 1
first_match = result.graphic_matches[0]
assert first_match.semantic_graphic == semantic_graphic_1
assert first_match.candidate_semantic_content == candidate_semantic_content_1
def test_should_not_match_further_away_graphic_to_same_semantic_content(self):
semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=GRAPHIC_ABOVE_FIGURE_COORDINATES_1
))
candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(
coordinates=FIGURE_BELOW_GRAPHIC_COORDINATES_1
)
further_away_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=FIGURE_BELOW_GRAPHIC_COORDINATES_1.move_by(dy=500)
))
further_away_graphic_2 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=FIGURE_BELOW_GRAPHIC_COORDINATES_1.move_by(dy=1000)
))
result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(
semantic_graphic_list=[
further_away_graphic_1,
semantic_graphic_1,
further_away_graphic_2
],
candidate_semantic_content_list=[
candidate_semantic_content_1
]
)
LOGGER.debug('result: %r', result)
assert len(result) == 1
first_match = result.graphic_matches[0]
assert first_match.semantic_graphic == semantic_graphic_1
assert first_match.candidate_semantic_content == candidate_semantic_content_1
def test_should_not_match_empty_graphic(self):
empty_semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=COORDINATES_1._replace(
width=0, height=0
)
))
candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(
coordinates=COORDINATES_1
)
result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(
semantic_graphic_list=[empty_semantic_graphic_1],
candidate_semantic_content_list=[
candidate_semantic_content_1
]
)
LOGGER.debug('result: %r', result)
assert not result
def test_should_not_match_graphic_on_another_page(self):
semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=COORDINATES_1._replace(
page_number=COORDINATES_1.page_number + 1
)
))
candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(
coordinates=COORDINATES_1
)
result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(
semantic_graphic_list=[semantic_graphic_1],
candidate_semantic_content_list=[
candidate_semantic_content_1
]
)
LOGGER.debug('result: %r', result)
assert not result.graphic_matches
assert result.unmatched_graphics == [semantic_graphic_1]
@pytest.mark.parametrize(
"graphic_type,should_match",
[("svg", False), ("bitmap", True)]
)
def test_should_match_graphic_of_specific(
self,
graphic_type: str,
should_match: bool
):
semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=GRAPHIC_ABOVE_FIGURE_COORDINATES_1,
graphic_type=graphic_type
))
candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(
coordinates=FIGURE_BELOW_GRAPHIC_COORDINATES_1
)
result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(
semantic_graphic_list=[semantic_graphic_1],
candidate_semantic_content_list=[
candidate_semantic_content_1
]
)
LOGGER.debug('result: %r', result)
if should_match:
assert len(result) == 1
first_match = result.graphic_matches[0]
assert first_match.semantic_graphic == semantic_graphic_1
else:
assert not result.graphic_matches
assert result.unmatched_graphics == [semantic_graphic_1]
class TestGraphicRelatedBlockTextGraphicMatcher:
@pytest.mark.parametrize(
"related_text,figure_label,should_match",
[
("Figure 1", "Figure 1", True),
("Figure 1", "Figure 2", False),
("Fig 1", "Figure 1", True),
("F 1", "Figure 1", False),
("Fug 1", "Figure 1", False),
("Other\nFigure 1\nMore", "Figure 1", True)
]
)
def test_should_match_based_on_figure_label(
self,
related_text: str,
figure_label: str,
should_match: bool
):
semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=FAR_AWAY_COORDINATES_1,
related_block=LayoutBlock.for_text(related_text)
))
candidate_semantic_content_1 = SemanticFigure([
SemanticLabel(layout_block=LayoutBlock.for_text(figure_label))
])
result = GraphicRelatedBlockTextGraphicMatcher().get_graphic_matches(
semantic_graphic_list=[semantic_graphic_1],
candidate_semantic_content_list=[
candidate_semantic_content_1
]
)
LOGGER.debug('result: %r', result)
if should_match:
assert len(result) == 1
first_match = result.graphic_matches[0]
assert first_match.semantic_graphic == semantic_graphic_1
else:
assert not result.graphic_matches
assert result.unmatched_graphics == [semantic_graphic_1]
def test_should_ignore_layout_graphic_without_related_block(
self
):
semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=FAR_AWAY_COORDINATES_1,
related_block=None
))
candidate_semantic_content_1 = SemanticFigure([
SemanticLabel(layout_block=LayoutBlock.for_text('Figure 1'))
])
result = GraphicRelatedBlockTextGraphicMatcher().get_graphic_matches(
semantic_graphic_list=[semantic_graphic_1],
candidate_semantic_content_list=[
candidate_semantic_content_1
]
)
LOGGER.debug('result: %r', result)
assert not result.graphic_matches
assert result.unmatched_graphics == [semantic_graphic_1]
class TestOpticalCharacterRecognitionGraphicMatcher:
@pytest.mark.parametrize(
"ocr_text,figure_label,should_match",
[
("Figure 1", "Figure 1", True),
("Figure 1", "Figure 2", False),
("Fig 1", "Figure 1", True),
("F 1", "Figure 1", False),
("Fug 1", "Figure 1", False),
("Other\nFigure 1\nMore", "Figure 1", True)
]
)
def test_should_match_based_on_figure_label(
self,
ocr_model_mock: MagicMock,
ocr_text: str,
figure_label: str,
should_match: bool,
tmp_path: Path
):
local_graphic_path = tmp_path / 'image.png'
PIL.Image.new('RGB', (10, 10), (0, 1, 2)).save(local_graphic_path)
ocr_model_mock.predict_single.return_value.get_text.return_value = ocr_text
semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=FAR_AWAY_COORDINATES_1,
local_file_path=str(local_graphic_path)
))
candidate_semantic_content_1 = SemanticFigure([
SemanticLabel(layout_block=LayoutBlock.for_text(figure_label))
])
result = OpticalCharacterRecognitionGraphicMatcher(
ocr_model=ocr_model_mock
).get_graphic_matches(
semantic_graphic_list=[semantic_graphic_1],
candidate_semantic_content_list=[
candidate_semantic_content_1
]
)
LOGGER.debug('result: %r', result)
if should_match:
assert len(result) == 1
first_match = result.graphic_matches[0]
assert first_match.semantic_graphic == semantic_graphic_1
else:
assert not result.graphic_matches
assert result.unmatched_graphics == [semantic_graphic_1]
def test_should_ignore_layout_graphic_without_local_path(
self,
ocr_model_mock: MagicMock
):
ocr_model_mock.predict_single.return_value.get_text.side_effect = RuntimeError
semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(
coordinates=FAR_AWAY_COORDINATES_1,
local_file_path=None
))
candidate_semantic_content_1 = SemanticFigure([
SemanticLabel(layout_block=LayoutBlock.for_text('Figure 1'))
])
result = OpticalCharacterRecognitionGraphicMatcher(
ocr_model=ocr_model_mock
).get_graphic_matches(
semantic_graphic_list=[semantic_graphic_1],
candidate_semantic_content_list=[
candidate_semantic_content_1
]
)
LOGGER.debug('result: %r', result)
assert not result.graphic_matches
assert result.unmatched_graphics == [semantic_graphic_1]
| 36.454333
| 92
| 0.676539
| 1,642
| 15,566
| 5.950061
| 0.096224
| 0.052917
| 0.078608
| 0.061412
| 0.810031
| 0.793347
| 0.767861
| 0.759365
| 0.738485
| 0.72129
| 0
| 0.021554
| 0.25485
| 15,566
| 426
| 93
| 36.539906
| 0.82076
| 0
| 0
| 0.57732
| 0
| 0
| 0.028781
| 0.006232
| 0
| 0
| 0
| 0
| 0.128866
| 1
| 0.048969
| false
| 0
| 0.023196
| 0.007732
| 0.090206
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1bddb289ff53be3d31895fea85fc09af69ce8295
| 6,883
|
py
|
Python
|
pay-api/tests/unit/api/test_account.py
|
nitheesh-aot/sbc-pay
|
dcb9c1bd3d2954f11c8d643aa6618d8470e3b0f7
|
[
"Apache-2.0"
] | null | null | null |
pay-api/tests/unit/api/test_account.py
|
nitheesh-aot/sbc-pay
|
dcb9c1bd3d2954f11c8d643aa6618d8470e3b0f7
|
[
"Apache-2.0"
] | null | null | null |
pay-api/tests/unit/api/test_account.py
|
nitheesh-aot/sbc-pay
|
dcb9c1bd3d2954f11c8d643aa6618d8470e3b0f7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the accounts end-point.
Test-Suite to ensure that the /accounts endpoint is working as expected.
"""
import json
from pay_api.models.credit_payment_account import CreditPaymentAccount
from pay_api.models.payment import Payment
from pay_api.models.payment_account import PaymentAccount
from pay_api.schemas import utils as schema_utils
from tests.utilities.base_test import (
get_claims, get_payment_request, token_header)
def test_account_purchase_history(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post(f'/api/v1/payment-requests', data=json.dumps(get_payment_request()),
headers=headers)
payment: Payment = Payment.find_by_id(rv.json.get('id'))
credit_account: CreditPaymentAccount = CreditPaymentAccount.find_by_id(payment.invoices[0].credit_account_id)
pay_account: PaymentAccount = PaymentAccount.find_by_id(credit_account.account_id)
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/payments/queries', data=json.dumps({}),
headers=headers)
assert rv.status_code == 200
def test_account_purchase_history_pagination(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
for i in range(10):
rv = client.post(f'/api/v1/payment-requests', data=json.dumps(get_payment_request()), headers=headers)
payment: Payment = Payment.find_by_id(rv.json.get('id'))
credit_account: CreditPaymentAccount = CreditPaymentAccount.find_by_id(payment.invoices[0].credit_account_id)
pay_account: PaymentAccount = PaymentAccount.find_by_id(credit_account.account_id)
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/payments/queries?page=1&limit=5',
data=json.dumps({}),
headers=headers)
assert rv.status_code == 200
assert rv.json.get('total') == 10
assert len(rv.json.get('items')) == 5
def test_account_purchase_history_invalid_request(session, client, jwt, app):
"""Assert that the endpoint returns 400."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post(f'/api/v1/payment-requests', data=json.dumps(get_payment_request()), headers=headers)
payment: Payment = Payment.find_by_id(rv.json.get('id'))
credit_account: CreditPaymentAccount = CreditPaymentAccount.find_by_id(payment.invoices[0].credit_account_id)
pay_account: PaymentAccount = PaymentAccount.find_by_id(credit_account.account_id)
search_filter = {
'businessIdentifier': 1111
}
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/payments/queries?page=1&limit=5',
data=json.dumps(search_filter),
headers=headers)
assert rv.status_code == 400
assert schema_utils.validate(rv.json, 'problem')[0]
def test_account_purchase_history_export_as_csv(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {
'Authorization': f'Bearer {token}',
'content-type': 'application/json'
}
rv = client.post(f'/api/v1/payment-requests', data=json.dumps(get_payment_request()),
headers=headers)
payment: Payment = Payment.find_by_id(rv.json.get('id'))
credit_account: CreditPaymentAccount = CreditPaymentAccount.find_by_id(payment.invoices[0].credit_account_id)
pay_account: PaymentAccount = PaymentAccount.find_by_id(credit_account.account_id)
headers = {
'Authorization': f'Bearer {token}',
'content-type': 'application/json',
'Accept': 'text/csv'
}
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/payments/reports', data=json.dumps({}),
headers=headers)
assert rv.status_code == 201
def test_account_purchase_history_export_as_pdf(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {
'Authorization': f'Bearer {token}',
'content-type': 'application/json'
}
rv = client.post(f'/api/v1/payment-requests', data=json.dumps(get_payment_request()),
headers=headers)
payment: Payment = Payment.find_by_id(rv.json.get('id'))
credit_account: CreditPaymentAccount = CreditPaymentAccount.find_by_id(payment.invoices[0].credit_account_id)
pay_account: PaymentAccount = PaymentAccount.find_by_id(credit_account.account_id)
headers = {
'Authorization': f'Bearer {token}',
'content-type': 'application/json',
'Accept': 'application/pdf'
}
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/payments/reports', data=json.dumps({}),
headers=headers)
assert rv.status_code == 201
def test_account_purchase_history_export_invalid_request(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {
'Authorization': f'Bearer {token}',
'content-type': 'application/json'
}
rv = client.post(f'/api/v1/payment-requests', data=json.dumps(get_payment_request()),
headers=headers)
payment: Payment = Payment.find_by_id(rv.json.get('id'))
credit_account: CreditPaymentAccount = CreditPaymentAccount.find_by_id(payment.invoices[0].credit_account_id)
pay_account: PaymentAccount = PaymentAccount.find_by_id(credit_account.account_id)
headers = {
'Authorization': f'Bearer {token}',
'content-type': 'application/json',
'Accept': 'application/pdf'
}
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/payments/reports', data=json.dumps({
'businessIdentifier': 1111
}), headers=headers)
assert rv.status_code == 400
| 40.017442
| 113
| 0.703763
| 886
| 6,883
| 5.274266
| 0.176072
| 0.023111
| 0.030815
| 0.033383
| 0.794565
| 0.766103
| 0.766103
| 0.742778
| 0.742778
| 0.742778
| 0
| 0.01407
| 0.173907
| 6,883
| 171
| 114
| 40.251462
| 0.807598
| 0.13221
| 0
| 0.704762
| 0
| 0.019048
| 0.197164
| 0.09318
| 0
| 0
| 0
| 0
| 0.085714
| 1
| 0.057143
| false
| 0
| 0.057143
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1bdef0e5df0c225eb2775cb6ea55f9021c9fb5fa
| 576
|
py
|
Python
|
sdk/python/pulumi_aws/s3/__init__.py
|
texdc/pulumi-aws
|
93a7a28ab7db6b1cd7e6686c0b68aa4c89490d4f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/s3/__init__.py
|
texdc/pulumi-aws
|
93a7a28ab7db6b1cd7e6686c0b68aa4c89490d4f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/s3/__init__.py
|
texdc/pulumi-aws
|
93a7a28ab7db6b1cd7e6686c0b68aa4c89490d4f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .account_public_access_block import *
from .bucket import *
from .inventory import *
from .bucket_metric import *
from .bucket_notification import *
from .bucket_object import *
from .bucket_policy import *
from .bucket_public_access_block import *
from .get_bucket import *
from .get_bucket_object import *
from .get_bucket_objects import *
| 33.882353
| 87
| 0.767361
| 85
| 576
| 5.023529
| 0.552941
| 0.234192
| 0.224824
| 0.133489
| 0.126464
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002041
| 0.149306
| 576
| 16
| 88
| 36
| 0.869388
| 0.380208
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
402e44e902fa8cead89eae17a87c8aa8f9902299
| 21,328
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/test_show_mld.py
|
nujo/genieparser
|
083b01efc46afc32abe1a1858729578beab50cd3
|
[
"Apache-2.0"
] | 2
|
2021-01-27T03:37:39.000Z
|
2021-01-27T03:40:50.000Z
|
src/genie/libs/parser/iosxe/tests/test_show_mld.py
|
nujo/genieparser
|
083b01efc46afc32abe1a1858729578beab50cd3
|
[
"Apache-2.0"
] | 1
|
2020-08-01T00:23:31.000Z
|
2020-08-01T00:40:05.000Z
|
src/genie/libs/parser/iosxe/tests/test_show_mld.py
|
nujo/genieparser
|
083b01efc46afc32abe1a1858729578beab50cd3
|
[
"Apache-2.0"
] | null | null | null |
# Python
import unittest
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Metaparset
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
# Parser
from genie.libs.parser.iosxe.show_mld import ShowIpv6MldInterface, \
ShowIpv6MldGroupsDetail, \
ShowIpv6MldSsmMap
# ==================================================
# Unit test for 'show ipv6 mld interface'
# Unit test for 'show ipv6 mld vrf <WORD> interface'
# ==================================================
class test_show_ipv6_mld_interface(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"vrf": {
"default": {
"interface": {
"Tunnel0": {
"oper_status": "up",
"interface_adress": "FE80::21E:BDFF:FEBA:D000/10",
"enable": False,
"interface_status": "up"
},
"VoIP-Null0": {
"oper_status": "up",
"interface_adress": "::/0",
"enable": False,
"interface_status": "up"
},
"LIIN0": {
"oper_status": "up",
"interface_adress": "::/0",
"enable": False,
"interface_status": "up"
},
"GigabitEthernet1": {
"oper_status": "up",
"querier_timeout": 740,
"active_groups": 0,
"group_policy": "test",
"query_interval": 366,
"version": 2,
"query_this_system": True,
"querier": "FE80::5054:FF:FE7C:DC70",
"interface_status": "up",
"last_member_query_interval": 1,
"counters": {
"leaves": 2,
"joins": 11
},
"max_groups": 6400,
"query_max_response_time": 16,
"enable": True,
"interface_adress": "FE80::5054:FF:FE7C:DC70/10"
},
"GigabitEthernet3": {
"oper_status": "down",
"interface_adress": "::/0",
"enable": False,
"interface_status": "administratively down"
},
"Null0": {
"oper_status": "up",
"interface_adress": "FE80::1/10",
"enable": False,
"interface_status": "up"
}
},
"max_groups": 64000,
"active_groups": 0
}
}
}
golden_output = {'execute.return_value': '''\
Global State Limit : 0 active out of 64000 max
GigabitEthernet1 is up, line protocol is up
Internet address is FE80::5054:FF:FE7C:DC70/10
MLD is enabled on interface
Current MLD version is 2
MLD query interval is 366 seconds
MLD querier timeout is 740 seconds
MLD max query response time is 16 seconds
Last member query response interval is 1 seconds
Inbound MLD access group is: test
Interface State Limit : 0 active out of 6400 max
MLD activity: 11 joins, 2 leaves
MLD querying router is FE80::5054:FF:FE7C:DC70 (this system)
GigabitEthernet3 is administratively down, line protocol is down
Internet address is ::/0
MLD is disabled on interface
Null0 is up, line protocol is up
Internet address is FE80::1/10
MLD is disabled on interface
VoIP-Null0 is up, line protocol is up
Internet address is ::/0
MLD is disabled on interface
LIIN0 is up, line protocol is up
Internet address is ::/0
MLD is disabled on interface
Tunnel0 is up, line protocol is up
Internet address is FE80::21E:BDFF:FEBA:D000/10
MLD is disabled on interface
'''}
golden_parsed_output_1 = {
"vrf": {
"VRF1": {
"interface": {
"GigabitEthernet2": {
"query_max_response_time": 16,
"enable": True,
"query_interval": 366,
"querier": "FE80::5054:FF:FEDD:BB49",
"interface_status": "up",
"query_this_system": True,
"version": 2,
"interface_adress": "FE80::5054:FF:FEDD:BB49/10",
"active_groups": 0,
"querier_timeout": 740,
"last_member_query_interval": 1,
"counters": {
"joins": 9,
"leaves": 0
},
"oper_status": "up",
"max_groups": 6400
},
"Tunnel1": {
"interface_status": "up",
"interface_adress": "FE80::21E:BDFF:FEBA:D000/10",
"oper_status": "up",
"enable": False
}
},
"max_groups": 64000,
"active_groups": 0
}
}
}
golden_output_1 = {'execute.return_value': '''\
Global State Limit : 0 active out of 64000 max
GigabitEthernet2 is up, line protocol is up
Internet address is FE80::5054:FF:FEDD:BB49/10
MLD is enabled on interface
Current MLD version is 2
MLD query interval is 366 seconds
MLD querier timeout is 740 seconds
MLD max query response time is 16 seconds
Last member query response interval is 1 seconds
Interface State Limit : 0 active out of 6400 max
MLD activity: 9 joins, 0 leaves
MLD querying router is FE80::5054:FF:FEDD:BB49 (this system)
Tunnel1 is up, line protocol is up
Internet address is FE80::21E:BDFF:FEBA:D000/10
MLD is disabled on interface
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowIpv6MldInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_default_vrf(self):
self.device = Mock(**self.golden_output)
obj = ShowIpv6MldInterface(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_non_default_vrf(self):
self.device = Mock(**self.golden_output_1)
obj = ShowIpv6MldInterface(device=self.device)
parsed_output = obj.parse(vrf='VRF1')
self.assertEqual(parsed_output,self.golden_parsed_output_1)
# =====================================================
# Unit test for 'show ipv6 mld groups detail'
# Unit test for 'show ipv6 mld vrf <WORD> groups detail'
# =====================================================
class test_show_ipv6_mld_groups_detail(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"vrf": {
"default": {
"interface": {
"GigabitEthernet1": {
"group": {
"FF15:1::1": {
"up_time": "08:14:15",
"source": {
"2001:DB8:2:2::2": {
"forward": True,
"up_time": "08:13:22",
"flags": "Remote Local 2D",
"expire": "00:06:42"
}
},
"filter_mode": "include",
"host_mode": "include",
"last_reporter": "FE80::5054:FF:FE7C:DC70"
},
"FF25:2::1": {
"up_time": "08:14:01",
"filter_mode": "exclude",
"last_reporter": "FE80::5054:FF:FE7C:DC70",
"host_mode": "exclude",
"expire": "never"
},
"FF35:1::1": {
"up_time": "00:42:41",
"source": {
"2001:DB8:3:3::3": {
"forward": True,
"up_time": "00:42:41",
"flags": "Remote Local E",
"expire": "00:06:42"
}
},
"filter_mode": "include",
"host_mode": "include",
"last_reporter": "FE80::5054:FF:FE7C:DC70"
},
"FF45:1::1": {
"up_time": "00:42:32",
"filter_mode": "exclude",
"last_reporter": "FE80::5054:FF:FE7C:DC70",
"host_mode": "exclude",
"expire": "never"
}
},
"join_group": {
"FF15:1::1 2001:DB8:2:2::2": {
"group": "FF15:1::1",
"source": "2001:DB8:2:2::2"
},
},
"static_group": {
"FF35:1::1 2001:DB8:3:3::3": {
"group": "FF35:1::1",
"source": "2001:DB8:3:3::3"
}
}
}
}
}
}
}
golden_output = {'execute.return_value': '''\
Interface: GigabitEthernet1
Group: FF15:1::1
Uptime: 08:14:15
Router mode: INCLUDE
Host mode: INCLUDE
Last reporter: FE80::5054:FF:FE7C:DC70
Group source list:
Source Address Uptime Expires Fwd Flags
2001:DB8:2:2::2 08:13:22 00:06:42 Yes Remote Local 2D
Interface: GigabitEthernet1
Group: FF25:2::1
Uptime: 08:14:01
Router mode: EXCLUDE (Expires: never)
Host mode: EXCLUDE
Last reporter: FE80::5054:FF:FE7C:DC70
Source list is empty
Interface: GigabitEthernet1
Group: FF35:1::1
Uptime: 00:42:41
Router mode: INCLUDE
Host mode: INCLUDE
Last reporter: FE80::5054:FF:FE7C:DC70
Group source list:
Source Address Uptime Expires Fwd Flags
2001:DB8:3:3::3 00:42:41 00:06:42 Yes Remote Local E
Interface: GigabitEthernet1
Group: FF45:1::1
Uptime: 00:42:32
Router mode: EXCLUDE (Expires: never)
Host mode: EXCLUDE
Last reporter: FE80::5054:FF:FE7C:DC70
Source list is empty
'''}
golden_parsed_output_1 = {
"vrf": {
"VRF1": {
"interface": {
"GigabitEthernet2": {
"group": {
"FF15:1::1": {
"up_time": "08:14:20",
"source": {
"2001:DB8:2:2::2": {
"forward": True,
"up_time": "08:13:56",
"flags": "Remote Local 2D",
"expire": "00:12:23"
}
},
"filter_mode": "include",
"host_mode": "include",
"last_reporter": "FE80::5054:FF:FEDD:BB49"
},
"FF25:2::1": {
"up_time": "08:14:18",
"filter_mode": "exclude",
"last_reporter": "FE80::5054:FF:FEDD:BB49",
"host_mode": "exclude",
"expire": "never"
},
"FF35:1::1": {
"up_time": "00:42:30",
"source": {
"2001:DB8:3:3::3": {
"forward": True,
"up_time": "00:42:30",
"flags": "Remote Local E",
"expire": "00:12:23"
}
},
"filter_mode": "include",
"host_mode": "include",
"last_reporter": "FE80::5054:FF:FEDD:BB49"
},
"FF45:1::1": {
"up_time": "00:42:30",
"filter_mode": "exclude",
"last_reporter": "FE80::5054:FF:FEDD:BB49",
"host_mode": "exclude",
"expire": "never"
}
},
"join_group": {
"FF15:1::1 2001:DB8:2:2::2": {
"group": "FF15:1::1",
"source": "2001:DB8:2:2::2"
}
},
"static_group": {
"FF35:1::1 2001:DB8:3:3::3": {
"group": "FF35:1::1",
"source": "2001:DB8:3:3::3"
}
}
}
}
}
}
}
golden_output_1 = {'execute.return_value': '''\
Interface: GigabitEthernet2
Group: FF15:1::1
Uptime: 08:14:20
Router mode: INCLUDE
Host mode: INCLUDE
Last reporter: FE80::5054:FF:FEDD:BB49
Group source list:
Source Address Uptime Expires Fwd Flags
2001:DB8:2:2::2 08:13:56 00:12:23 Yes Remote Local 2D
Interface: GigabitEthernet2
Group: FF25:2::1
Uptime: 08:14:18
Router mode: EXCLUDE (Expires: never)
Host mode: EXCLUDE
Last reporter: FE80::5054:FF:FEDD:BB49
Source list is empty
Interface: GigabitEthernet2
Group: FF35:1::1
Uptime: 00:42:30
Router mode: INCLUDE
Host mode: INCLUDE
Last reporter: FE80::5054:FF:FEDD:BB49
Group source list:
Source Address Uptime Expires Fwd Flags
2001:DB8:3:3::3 00:42:30 00:12:23 Yes Remote Local E
Interface: GigabitEthernet2
Group: FF45:1::1
Uptime: 00:42:30
Router mode: EXCLUDE (Expires: never)
Host mode: EXCLUDE
Last reporter: FE80::5054:FF:FEDD:BB49
Source list is empty
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowIpv6MldGroupsDetail(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_default_vrf(self):
self.device = Mock(**self.golden_output)
obj = ShowIpv6MldGroupsDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_non_default_vrf(self):
self.device = Mock(**self.golden_output_1)
obj = ShowIpv6MldGroupsDetail(device=self.device)
parsed_output = obj.parse(vrf='VRF1')
self.assertEqual(parsed_output,self.golden_parsed_output_1)
# ===========================================================
# Unit test for 'show ipv6 mld ssm-mapping <WROD>'
# Unit test for 'show ipv6 mld vrf <WORD> ssm-mapping <WORD>'
# ============================================================
class test_show_ipv6_mld_ssm_mapping(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"vrf": {
"default": {
"ssm_map": {
"2001:DB8:1:1::1 FF35:1::1": {
"source_addr": "2001:DB8:1:1::1",
"group_address": "FF35:1::1",
"database": "static",
"group_mode_ssm": False
}
}
}
}
}
golden_output = {'execute.return_value': '''\
Group address : FF35:1::1
Group mode ssm : FALSE
Database : STATIC
Source list : 2001:DB8:1:1::1
'''}
golden_parsed_output_1 = {
"vrf": {
"VRF1": {
"ssm_map": {
"2001:DB8:1:1::1 FF35:1::1": {
"source_addr": "2001:DB8:1:1::1",
"group_address": "FF35:1::1",
"database": "static",
"group_mode_ssm": False
},
"2001:DB8::3 FF35:1::1": {
"source_addr": "2001:DB8::3",
"group_address": "FF35:1::1",
"database": "static",
"group_mode_ssm": False
}
}
}
}
}
golden_output_1 = {'execute.return_value': '''\
Group address : FF35:1::1
Group mode ssm : FALSE
Database : STATIC
Source list : 2001:DB8:1:1::1
2001:DB8::3
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowIpv6MldSsmMap(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(group='ff35:1::1')
def test_golden_default_vrf(self):
self.device = Mock(**self.golden_output)
obj = ShowIpv6MldSsmMap(device=self.device)
parsed_output = obj.parse(group='ff35:1::1')
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_non_default_vrf(self):
self.device = Mock(**self.golden_output_1)
obj = ShowIpv6MldSsmMap(device=self.device)
parsed_output = obj.parse(vrf='VRF1', group='ff35:1::1')
self.assertEqual(parsed_output,self.golden_parsed_output_1)
if __name__ == '__main__':
unittest.main()
| 42.40159
| 88
| 0.386534
| 1,759
| 21,328
| 4.554861
| 0.105173
| 0.010734
| 0.029955
| 0.03994
| 0.866076
| 0.833375
| 0.757239
| 0.714678
| 0.662506
| 0.632302
| 0
| 0.09011
| 0.504126
| 21,328
| 503
| 89
| 42.40159
| 0.667455
| 0.030851
| 0
| 0.699779
| 0
| 0.00883
| 0.410187
| 0.037281
| 0
| 0
| 0
| 0
| 0.019868
| 1
| 0.019868
| false
| 0
| 0.011038
| 0
| 0.077263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4034928e25b018ee24acfa6ff967519b3bcf52ce
| 3,045
|
py
|
Python
|
test/test_conditionals.py
|
jackdavidweber/cjs_capstone
|
8929e4939f5b7b172c595dbb49b1d2ccc7805b8a
|
[
"MIT"
] | 2
|
2020-07-13T18:58:57.000Z
|
2020-07-20T23:30:21.000Z
|
test/test_conditionals.py
|
jackdavidweber/cjs_capstone
|
8929e4939f5b7b172c595dbb49b1d2ccc7805b8a
|
[
"MIT"
] | 20
|
2020-06-18T20:49:20.000Z
|
2020-08-04T16:15:46.000Z
|
test/test_conditionals.py
|
jackdavidweber/cjs_capstone
|
8929e4939f5b7b172c595dbb49b1d2ccc7805b8a
|
[
"MIT"
] | null | null | null |
import unittest2
import matrix
from Unittest import Unittest
class TestConditionals(unittest2.TestCase):
def test_if(self):
js_code = Unittest('if (1) {\n\tconsole.log("This is true")\n}', 'js')
py_code = Unittest('if (1):\n\tprint("This is true")', 'py')
bash_code = Unittest('if [[ 1 ]]; then\n\techo "This is true"\nfi',
'bash',
is_input=False)
java_code = Unittest(
'if (1) {\n\tSystem.out.println("This is true");\n}', 'java')
matrix.matrix(self, [py_code, js_code, java_code, bash_code])
def test_else(self):
js_code = Unittest(
'if (1) {\n\tconsole.log("1 is true")\n} else {\n\tconsole.log("1 is NOT true")\n}',
'js')
py_code = Unittest(
'if (1):\n\tprint("1 is true")\nelse:\n\tprint("1 is NOT true")',
'py')
bash_code = Unittest(
'if [[ 1 ]]; then\n\techo "1 is true"\nelse\n\techo "1 is NOT true"\nfi',
'bash',
is_input=False)
java_code = Unittest(
'if (1) {\n\tSystem.out.println("1 is true");\n} else {\n\tSystem.out.println("1 is NOT true");\n}',
'java')
matrix.matrix(self, [py_code, js_code, java_code, bash_code])
def test_elif(self):
js_code = Unittest(
'if (1) {\n\tconsole.log("1 is true")\n} else if (2) {\n\tconsole.log("2 is true")\n\tconsole.log("second line")\n}',
'js')
py_code = Unittest(
'if (1):\n\tprint("1 is true")\nelif (2):\n\tprint("2 is true")\n\tprint("second line")',
'py')
bash_code = Unittest(
'if [[ 1 ]]; then\n\techo "1 is true"\nelif [[ 2 ]]; then\n\techo "2 is true"\n\techo "second line"\nfi',
'bash',
is_input=False)
java_code = Unittest(
'if (1) {\n\tSystem.out.println("1 is true");\n} else if (2) {\n\tSystem.out.println("2 is true");\n\tSystem.out.println("second line");\n}',
'java')
matrix.matrix(self, [py_code, js_code, java_code, bash_code])
def test_elif_else(self):
js_code = Unittest(
'if (1) {\n\tconsole.log("1 is true")\n} else if (2) {\n\tconsole.log("2 is true")\n} else {\n\tconsole.log("nothing is true")\n}',
'js')
py_code = Unittest(
'if (1):\n\tprint("1 is true")\nelif (2):\n\tprint("2 is true")\nelse:\n\tprint("nothing is true")',
'py')
bash_code = Unittest(
'if [[ 1 ]]; then\n\techo "1 is true"\nelif [[ 2 ]]; then\n\techo "2 is true"\nelse\n\techo "nothing is true"\nfi',
'bash',
is_input=False)
java_code = Unittest(
'if (1) {\n\tSystem.out.println("1 is true");\n} else if (2) {\n\tSystem.out.println("2 is true");\n} else {\n\tSystem.out.println("nothing is true");\n}',
'java')
matrix.matrix(self, [py_code, js_code, java_code, bash_code])
if __name__ == '__main__':
unittest2.main()
| 44.130435
| 167
| 0.533333
| 447
| 3,045
| 3.52349
| 0.098434
| 0.106667
| 0.142222
| 0.152381
| 0.834921
| 0.8
| 0.798095
| 0.780317
| 0.754921
| 0.733968
| 0
| 0.023309
| 0.281445
| 3,045
| 68
| 168
| 44.779412
| 0.696527
| 0
| 0
| 0.566667
| 0
| 0.2
| 0.480131
| 0.134319
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.05
| 0
| 0.133333
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
404e5830e4578ca4f6e6632a4880c94c2c42ba62
| 85
|
py
|
Python
|
spikeextractors/extractors/kilosortextractors/__init__.py
|
zekearneodo/spikeextractors
|
d30aa85e69d0331fffdb58a03a2bb628f93b405e
|
[
"MIT"
] | 145
|
2018-12-06T23:12:54.000Z
|
2022-02-10T22:57:35.000Z
|
spikeextractors/extractors/kilosortextractors/__init__.py
|
zekearneodo/spikeextractors
|
d30aa85e69d0331fffdb58a03a2bb628f93b405e
|
[
"MIT"
] | 396
|
2018-11-26T11:46:30.000Z
|
2022-01-04T07:27:47.000Z
|
spikeextractors/extractors/kilosortextractors/__init__.py
|
zekearneodo/spikeextractors
|
d30aa85e69d0331fffdb58a03a2bb628f93b405e
|
[
"MIT"
] | 67
|
2018-11-19T12:38:01.000Z
|
2021-09-25T03:18:22.000Z
|
from .kilosortextractors import KiloSortSortingExtractor, KiloSortRecordingExtractor
| 42.5
| 84
| 0.917647
| 5
| 85
| 15.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 85
| 1
| 85
| 85
| 0.975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4087c7c36a8169662cbe27cfbc842c59b72bc32f
| 50,379
|
py
|
Python
|
spiketoolkit/validation/quality_metrics.py
|
Shawn-Guo-CN/spiketoolkit
|
11e60f3cd80c135c62e27538a4e141115a7e27ad
|
[
"MIT"
] | null | null | null |
spiketoolkit/validation/quality_metrics.py
|
Shawn-Guo-CN/spiketoolkit
|
11e60f3cd80c135c62e27538a4e141115a7e27ad
|
[
"MIT"
] | null | null | null |
spiketoolkit/validation/quality_metrics.py
|
Shawn-Guo-CN/spiketoolkit
|
11e60f3cd80c135c62e27538a4e141115a7e27ad
|
[
"MIT"
] | null | null | null |
import spiketoolkit as st
def compute_num_spikes(sorting, sampling_frequency=None, unit_ids=None, epoch_tuples=None, epoch_names=None,
save_as_property=True):
'''
Computes and returns the spike times in seconds and also returns the cluster_ids needed for quality metrics.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
sampling_frequency:
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch
epoch_names: list
A list of strings for the names of the given epochs
save_as_property: bool
If True, the metric is saved as sorting property
Returns
----------
num_spikes_epochs: list
The spike counts of the sorted units in the given epochs.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=sampling_frequency,
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
num_spikes_epochs = metric_calculator.compute_num_spikes()
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'num_spikes', num_spikes_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return num_spikes_epochs
def compute_firing_rates(sorting, sampling_frequency=None, unit_ids=None, epoch_tuples=None, epoch_names=None,
save_as_property=True):
'''
Computes and returns the spike times in seconds and also returns the cluster_ids needed for quality metrics.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
sampling_frequency:
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch
epoch_names: list
A list of strings for the names of the given epochs
save_as_property: bool
If True, the metric is saved as sorting property
Returns
----------
firing_rates_epochs: list
The firing rates of the sorted units in the given epochs.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=sampling_frequency,
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
firings_rates_epochs = metric_calculator.compute_firing_rates()
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'firing_rate', firings_rates_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return firings_rates_epochs
def compute_presence_ratios(sorting, sampling_frequency=None, unit_ids=None, epoch_tuples=None, epoch_names=None,
save_as_property=True):
'''
Computes and returns the presence ratios.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
sampling_frequency:
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch
epoch_names: list
A list of strings for the names of the given epochs
save_as_property: bool
If True, the metric is saved as sorting property
Returns
----------
presence_ratios_epochs: list
The presence ratios violations of the sorted units in the given epochs.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=sampling_frequency,
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
presence_ratios_epochs = metric_calculator.compute_presence_ratios()
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'presence_ratio', presence_ratios_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return presence_ratios_epochs
def compute_isi_violations(sorting, sampling_frequency=None, isi_threshold=0.0015, min_isi=0.000166, unit_ids=None,
epoch_tuples=None, epoch_names=None, save_as_property=True):
'''
Computes and returns the ISI violations for the given parameters.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
sampling_frequency:
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
isi_threshold: float
The isi threshold for calculating isi violations
min_isi: float
The minimum expected isi value
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch
epoch_names: list
A list of strings for the names of the given epochs
save_as_property: bool
If True, the metric is saved as sorting property
Returns
----------
isi_violations_epochs: list
The isi violations of the sorted units in the given epochs.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=sampling_frequency,
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
isi_violations_epochs = metric_calculator.compute_isi_violations(isi_threshold=isi_threshold, min_isi=min_isi)
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'isi_violation', isi_violations_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return isi_violations_epochs
def compute_amplitude_cutoffs(sorting, recording, amp_method='absolute', amp_peak='both', amp_frames_before=3,
amp_frames_after=3, apply_filter=True, freq_min=300, freq_max=6000, save_features_props=False,
unit_ids=None, epoch_tuples=None, epoch_names=None, save_as_property=True, seed=0):
'''
Computes and returns the amplitude cutoffs for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
amp_method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.
amp_peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)
amp_frames_before: int
Frames before peak to compute amplitude.
amp_frames_after: int
Frames after peak to compute amplitude.
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz).
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz).
save_features_props: bool
If true, it will save amplitudes in the sorting extractor.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch
epoch_names: list
A list of strings for the names of the given epochs.
seed: int
Random seed for reproducibility
save_as_property: bool
If True, the metric is saved as sorting property
Returns
----------
amplitude_cutoffs_epochs: list
The amplitude cutoffs of the sorted units in the given epochs.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=recording.get_sampling_frequency(),
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
metric_calculator.compute_amplitudes(recording=recording, amp_method=amp_method, amp_peak=amp_peak,
amp_frames_before=amp_frames_before,
amp_frames_after=amp_frames_after, apply_filter=apply_filter,
freq_min=freq_min, freq_max=freq_max,
save_features_props=save_features_props, seed=seed)
amplitude_cutoffs_epochs = metric_calculator.compute_amplitude_cutoffs()
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'amplitude_cutoff', amplitude_cutoffs_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return amplitude_cutoffs_epochs
def compute_snrs(sorting, recording, snr_mode='mad', snr_noise_duration=10.0, max_spikes_per_unit_for_snr=1000,
recompute_info=True, apply_filter=True, freq_min=300, freq_max=6000, save_features_props=False,
unit_ids=None, epoch_tuples=None, epoch_names=None, save_as_property=True, seed=0):
'''
Computes and stores snrs for the sorted units.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes.
snr_mode: str
Mode to compute noise SNR ('mad' | 'std' - default 'mad')
snr_noise_duration: float
Number of seconds to compute noise level from (default 10.0)
max_spikes_per_unit_for_snr: int
Maximum number of spikes to compute templates from (default 1000)
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
recompute_info: bool
If True, waveforms are recomputed
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz).
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz).
save_features_props: bool
If True, waveforms and templates are saved as properties and features of the sorting extractor
epoch_tuples: list
A list of tuples with a start and end time for each epoch.
epoch_names: list
A list of strings for the names of the given epochs.
save_as_property: bool
If True, the metric is saved as sorting property
seed: int
Random seed for reproducibility.
Returns
----------
snrs_epochs: list
The snrs of the sorted units in the given epochs.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=recording.get_sampling_frequency(),
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
metric_calculator.set_recording(recording, apply_filter=apply_filter, freq_min=freq_min, freq_max=freq_max)
snrs_epochs = metric_calculator.compute_snrs(snr_mode=snr_mode, snr_noise_duration=snr_noise_duration,
max_spikes_per_unit_for_snr=max_spikes_per_unit_for_snr,
recompute_info=recompute_info,
save_features_props=save_features_props, seed=seed)
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'snr', snrs_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return snrs_epochs
def compute_drift_metrics(sorting, recording, drift_metrics_interval_s=51, drift_metrics_min_spikes_per_interval=10,
n_comp=3, ms_before=1., ms_after=2., dtype=None, max_spikes_per_unit=300, recompute_info=True,
max_spikes_for_pca=1e5, apply_filter=True, freq_min=300, freq_max=6000, save_features_props=False,
unit_ids=None, epoch_tuples=None, epoch_names=None, save_as_property=True, seed=0):
'''
Computes and returns the drift metrics for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes.
drift_metrics_interval_s: float
Time period for evaluating drift.
drift_metrics_min_spikes_per_interval: int
Minimum number of spikes for evaluating drift metrics per interval.
n_comp: int
n_compFeatures in template-gui format
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
max_spikes_per_unit: int
The maximum number of spikes to extract (default is np.inf)
recompute_info: bool
If True, will always re-extract waveforms.
max_spikes_for_pca: int
The maximum number of spikes to use to compute PCA (default is np.inf)
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz).
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz).
save_features_props: bool
If True, save all features and properties in the sorting extractor.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch.
epoch_names: list
A list of strings for the names of the given epochs.
save_as_property: bool
If True, the metric is saved as sorting property
seed: int
Random seed for reproducibility
Returns
----------
max_drifts_epochs: list
The max drift of the given units over the specified epochs
cumulative_drifts_epochs: list
The cumulative drifts of the given units over the specified epochs
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=recording.get_sampling_frequency(),
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
metric_calculator.compute_pca_scores(recording=recording, n_comp=n_comp, ms_before=ms_before, ms_after=ms_after,
dtype=dtype,
max_spikes_per_unit=max_spikes_per_unit,
recompute_info=recompute_info,
max_spikes_for_pca=max_spikes_for_pca,
apply_filter=apply_filter, freq_min=freq_min, freq_max=freq_max,
save_features_props=save_features_props, seed=seed)
max_drifts_epochs, cumulative_drifts_epochs = metric_calculator.compute_drift_metrics(
drift_metrics_interval_s=drift_metrics_interval_s,
drift_metrics_min_spikes_per_interval=drift_metrics_min_spikes_per_interval)
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'max_drift', max_drifts_epochs[i_u])
sorting.set_unit_property(u, 'cumulative_drift', cumulative_drifts_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return max_drifts_epochs, cumulative_drifts_epochs
def compute_silhouette_scores(sorting, recording, max_spikes_for_silhouette=10000, n_comp=3, ms_before=1., ms_after=2.,
dtype=None, max_spikes_per_unit=300, recompute_info=True,
max_spikes_for_pca=1e5, apply_filter=True, freq_min=300, freq_max=6000, save_features_props=False,
unit_ids=None, epoch_tuples=None, epoch_names=None, save_as_property=True, seed=0):
'''
Computes and returns the silhouette scores in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes.
max_spikes_for_silhouette: int
Max spikes to be used for silhouette metric
n_comp: int
n_compFeatures in template-gui format
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
max_spikes_per_unit: int
The maximum number of spikes to extract (default is np.inf)
recompute_info: bool
If True, will always re-extract waveforms.
max_spikes_for_pca: int
The maximum number of spikes to use to compute PCA (default is np.inf)
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz).
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz).
save_features_props: bool
If True, save all features and properties in the sorting extractor.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch.
epoch_names: list
A list of strings for the names of the given epochs.
save_as_property: bool
If True, the metric is saved as sorting property
seed: int
Random seed for reproducibility
Returns
----------
silhouette_scores_epochs: list
The silhouette scores of the given units for the specified epochs.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=recording.get_sampling_frequency(),
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
metric_calculator.compute_pca_scores(recording=recording, n_comp=n_comp, ms_before=ms_before, ms_after=ms_after,
dtype=dtype,
max_spikes_per_unit=max_spikes_per_unit,
recompute_info=recompute_info,
max_spikes_for_pca=max_spikes_for_pca,
apply_filter=apply_filter, freq_min=freq_min, freq_max=freq_max,
save_features_props=save_features_props, seed=seed)
silhouette_scores_epochs = metric_calculator.compute_silhouette_scores(
max_spikes_for_silhouette=max_spikes_for_silhouette, seed=seed)
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'silhouette_score', silhouette_scores_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return silhouette_scores_epochs
def compute_isolation_distances(sorting, recording, num_channels_to_compare=13, max_spikes_per_cluster=500, n_comp=3,
ms_before=1., ms_after=2.,
dtype=None, max_spikes_per_unit=300, recompute_info=True, max_spikes_for_pca=1e5,
apply_filter=True, freq_min=300, freq_max=6000, save_features_props=False,
unit_ids=None, epoch_tuples=None, epoch_names=None, save_as_property=True, seed=0):
'''
Computes and returns the mahalanobis metric, isolation distance, for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes.
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
n_comp: int
n_compFeatures in template-gui format
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
max_spikes_per_unit: int
The maximum number of spikes to extract (default is np.inf)
recompute_info: bool
If True, will always re-extract waveforms.
max_spikes_for_pca: int
The maximum number of spikes to use to compute PCA (default is np.inf)
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz).
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz).
save_features_props: bool
If True, save all features and properties in the sorting extractor.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch.
epoch_names: list
A list of strings for the names of the given epochs.
save_as_property: bool
If True, the metric is saved as sorting property
seed: int
Random seed for reproducibility
Returns
----------
isolation_distances_epochs: list
Returns the isolation distances of each specified unit for the given epochs.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=recording.get_sampling_frequency(),
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
metric_calculator.compute_pca_scores(recording=recording, n_comp=n_comp, ms_before=ms_before, ms_after=ms_after,
dtype=dtype,
max_spikes_per_unit=max_spikes_per_unit,
recompute_info=recompute_info,
max_spikes_for_pca=max_spikes_for_pca,
apply_filter=apply_filter, freq_min=freq_min, freq_max=freq_max,
save_features_props=save_features_props, seed=seed)
isolation_distances_epochs = metric_calculator.compute_isolation_distances(
num_channels_to_compare=num_channels_to_compare,
max_spikes_per_cluster=max_spikes_per_cluster, seed=seed)
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'isolation_distance', isolation_distances_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return isolation_distances_epochs
def compute_l_ratios(sorting, recording, num_channels_to_compare=13, max_spikes_per_cluster=500, n_comp=3, ms_before=1.,
ms_after=2., dtype=None, max_spikes_per_unit=300, recompute_info=True,
max_spikes_for_pca=1e5, apply_filter=True, freq_min=300, freq_max=6000, save_features_props=False,
unit_ids=None, epoch_tuples=None, epoch_names=None, save_as_property=True, seed=0):
'''
Computes and returns the mahalanobis metric, l-ratio, for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes.
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
n_comp: int
n_compFeatures in template-gui format
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
max_spikes_per_unit: int
The maximum number of spikes to extract (default is np.inf)
recompute_info: bool
If True, will always re-extract waveforms.
max_spikes_for_pca: int
The maximum number of spikes to use to compute PCA (default is np.inf)
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz).
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz).
save_features_props: bool
If True, save all features and properties in the sorting extractor.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch.
epoch_names: list
A list of strings for the names of the given epochs.
save_as_property: bool
If True, the metric is saved as sorting property
seed: int
Random seed for reproducibility
Returns
----------
l_ratios_epochs: list
Returns the L ratios of each specified unit for the given epochs
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=recording.get_sampling_frequency(),
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
metric_calculator.compute_pca_scores(recording=recording, n_comp=n_comp, ms_before=ms_before, ms_after=ms_after,
dtype=dtype,
max_spikes_per_unit=max_spikes_per_unit,
recompute_info=recompute_info,
max_spikes_for_pca=max_spikes_for_pca,
apply_filter=apply_filter, freq_min=freq_min, freq_max=freq_max,
save_features_props=save_features_props, seed=seed)
l_ratios_epochs = metric_calculator.compute_l_ratios(num_channels_to_compare=num_channels_to_compare,
max_spikes_per_cluster=max_spikes_per_cluster, seed=seed)
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'l_ratio', l_ratios_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return l_ratios_epochs
def compute_d_primes(sorting, recording, num_channels_to_compare=13, max_spikes_per_cluster=500, n_comp=3, ms_before=1.,
ms_after=2., dtype=None, max_spikes_per_unit=300, recompute_info=True,
max_spikes_for_pca=1e5, apply_filter=True, freq_min=300, freq_max=6000,
save_features_props=False, unit_ids=None, epoch_tuples=None, epoch_names=None,
save_as_property=True, seed=0):
'''
Computes and returns the lda-based metric, d-prime, for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes.
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
n_comp: int
n_compFeatures in template-gui format
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
max_spikes_per_unit: int
The maximum number of spikes to extract (default is np.inf)
recompute_info: bool
If True, will always re-extract waveforms.
max_spikes_for_pca: int
The maximum number of spikes to use to compute PCA (default is np.inf)
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz).
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz).
save_features_props: bool
If True, save all features and properties in the sorting extractor.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch.
epoch_names: list
A list of strings for the names of the given epochs.
save_as_property: bool
If True, the metric is saved as sorting property
seed: int
Random seed for reproducibility
Returns
----------
d_primes_epochs: list
Returns the d primes of each specified unit for the given epochs.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=recording.get_sampling_frequency(),
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
metric_calculator.compute_pca_scores(recording=recording, n_comp=n_comp, ms_before=ms_before, ms_after=ms_after,
dtype=dtype,
max_spikes_per_unit=max_spikes_per_unit,
recompute_info=recompute_info,
max_spikes_for_pca=max_spikes_for_pca,
apply_filter=apply_filter, freq_min=freq_min, freq_max=freq_max,
save_features_props=save_features_props, seed=seed)
d_primes_epochs = metric_calculator.compute_d_primes(num_channels_to_compare=num_channels_to_compare,
max_spikes_per_cluster=max_spikes_per_cluster, seed=seed)
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'd_prime', d_primes_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return d_primes_epochs
def compute_nn_metrics(sorting, recording, num_channels_to_compare=13, max_spikes_per_cluster=500,
max_spikes_for_nn=10000, n_neighbors=4, n_comp=3, ms_before=1., ms_after=2.,
dtype=None, max_spikes_per_unit=300, recompute_info=True, max_spikes_for_pca=1e5,
apply_filter=True, freq_min=300, freq_max=6000, save_features_props=False,
unit_ids=None, epoch_tuples=None, epoch_names=None, save_as_property=True, seed=0):
'''
Computes and returns the nearest neighbor metrics for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes.
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison.
max_spikes_per_cluster: int
Max spikes to be used from each unit.
max_spikes_for_nn: int
Max spikes to be used for nearest-neighbors calculation.
n_neighbors: int
Number of neighbors to compare.
n_comp: int
n_compFeatures in template-gui format
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
max_spikes_per_unit: int
The maximum number of spikes to extract (default is np.inf)
recompute_info: bool
If True, will always re-extract waveforms.
max_spikes_for_pca: int
The maximum number of spikes to use to compute PCA (default is np.inf)
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz).
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz).
save_features_props: bool
If True, save all features and properties in the sorting extractor.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch.
epoch_names: list
A list of strings for the names of the given epochs.
save_as_property: bool
If True, the metric is saved as sorting property
seed: int
Random seed for reproducibility
Returns
----------
nn_hit_rates_epochs: np.array
The nearest neighbor hit rates for each specified unit.
nn_miss_rates_epochs: np.array
The nearest neighbor miss rates for each specified unit.
'''
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=recording.get_sampling_frequency(),
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
metric_calculator.compute_pca_scores(recording=recording, n_comp=n_comp, ms_before=ms_before, ms_after=ms_after,
dtype=dtype,
max_spikes_per_unit=max_spikes_per_unit,
recompute_info=recompute_info,
max_spikes_for_pca=max_spikes_for_pca,
apply_filter=apply_filter, freq_min=freq_min, freq_max=freq_max,
save_features_props=save_features_props, seed=seed)
nn_hit_rates_epochs, nn_miss_rates_epochs = metric_calculator.compute_nn_metrics(
num_channels_to_compare=num_channels_to_compare,
max_spikes_per_cluster=max_spikes_per_cluster,
max_spikes_for_nn=max_spikes_for_nn, n_neighbors=n_neighbors,
seed=seed)
if save_as_property:
if epoch_tuples is None:
for i_u, u in enumerate(unit_ids):
sorting.set_unit_property(u, 'nn_hit_rates', nn_hit_rates_epochs[i_u])
sorting.set_unit_property(u, 'nn_miss_rates', nn_miss_rates_epochs[i_u])
else:
raise NotImplementedError("Quality metrics cannot be saved as properties if 'epochs_tuples' are given.")
return nn_hit_rates_epochs, nn_miss_rates_epochs
def compute_metrics(sorting, recording=None, sampling_frequency=None, isi_threshold=0.0015, min_isi=0.000166,
snr_mode='mad', snr_noise_duration=10.0, max_spikes_per_unit_for_snr=1000,
drift_metrics_interval_s=51, drift_metrics_min_spikes_per_interval=10,
max_spikes_for_silhouette=10000, num_channels_to_compare=13, max_spikes_per_cluster=500,
max_spikes_for_nn=10000, n_neighbors=4, n_comp=3, ms_before=1., ms_after=2., dtype=None,
max_spikes_per_unit=300, amp_method='absolute', amp_peak='both', amp_frames_before=3,
amp_frames_after=3, recompute_info=True, max_spikes_for_pca=1e5, apply_filter=True,
freq_min=300, freq_max=6000, save_features_props=False, metric_names=None, unit_ids=None,
epoch_tuples=None, epoch_names=None, return_dataframe=False, seed=0):
'''
Computes and returns all specified metrics for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
sampling_frequency:
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes. If None, certain metrics cannot be computed.
isi_threshold: float
The isi threshold for calculating isi violations.
min_isi: float
The minimum expected isi value.
snr_mode: str
Mode to compute noise SNR ('mad' | 'std' - default 'mad')
snr_noise_duration: float
Number of seconds to compute noise level from (default 10.0)
max_spikes_per_unit_for_snr: int
Maximum number of spikes to compute templates from (default 1000)
drift_metrics_interval_s: float
Time period for evaluating drift.
drift_metrics_min_spikes_per_interval: int
Minimum number of spikes for evaluating drift metrics per interval.
max_spikes_for_silhouette: int
Max spikes to be used for silhouette metric
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison.
max_spikes_per_cluster: int
Max spikes to be used from each unit to compute metrics.
max_spikes_for_nn: int
Max spikes to be used for nearest-neighbors calculation.
n_neighbors: int
Number of neighbors to compare for nearest-neighbors calculation.
max_spikes_per_unit: int
The maximum number of spikes to extract (default is np.inf)
amp_method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.
amp_peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)
amp_frames_before: int
Frames before peak to compute amplitude
amp_frames_after: float
Frames after peak to compute amplitude
recompute_info: bool
If True, will always re-extract waveforms.
max_spikes_for_pca: int
The maximum number of spikes to use to compute PCA (default is np.inf)
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz).
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz).
save_features_props: bool
If True, save all features and properties in the sorting extractor.
n_comp: int
n_compFeatures in template-gui format
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
metrics_names: list
The list of metric names to be computed. Available metrics are: 'firing_rate', 'num_spikes', 'isi_viol',
'presence_ratio', 'amplitude_cutoff', 'max_drift', 'cumulative_drift', 'silhouette_score',
'isolation_distance', 'l_ratio', 'd_prime', 'nn_hit_rate', 'nn_miss_rate', 'snr'. If None, all metrics are
computed.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
epoch_tuples: list
A list of tuples with a start and end time for each epoch.
epoch_names: list
A list of strings for the names of the given epochs.
return_dataframe: bool
If True, this function will return a dataframe of the metrics.
seed: int
Random seed for reproducibility
Returns
----------
metrics_epochs : list
List of metrics data. The list consists of lists of metric data for each given epoch.
OR
metrics_df: pandas.DataFrame
A pandas dataframe of the cached metrics
'''
metrics_epochs = []
all_metrics_list = ['firing_rate', 'num_spikes', 'isi_viol', 'presence_ratio', 'amplitude_cutoff', 'max_drift',
'cumulative_drift', 'silhouette_score', 'isolation_distance', 'l_ratio', 'd_prime',
'nn_hit_rate', 'nn_miss_rate', 'snr']
if metric_names is None:
metric_names = all_metrics_list
else:
bad_metrics = []
for m in metric_names:
if m not in all_metrics_list:
bad_metrics.append(m)
if len(bad_metrics) > 0:
raise ValueError("Wrong metrics name: " + str(bad_metrics))
if recording is not None:
sampling_frequency = recording.get_sampling_frequency()
metric_calculator = st.validation.MetricCalculator(sorting, sampling_frequency=sampling_frequency,
unit_ids=unit_ids,
epoch_tuples=epoch_tuples, epoch_names=epoch_names)
if 'max_drift' in metric_names or 'cumulative_drift' in metric_names or 'silhouette_score' in metric_names \
or 'isolation_distance' in metric_names or 'l_ratio' in metric_names or 'd_prime' in metric_names \
or 'nn_hit_rate' in metric_names or 'nn_miss_rate' in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing max_drift, cumulative_drift, "
"silhouette_score isolation_distance, l_ratio, d_prime, nn_hit_rate, amplitude_cutoff, "
"or nn_miss_rate.")
else:
metric_calculator.compute_all_metric_data(recording=recording, n_comp=n_comp, ms_before=ms_before,
ms_after=ms_after, dtype=dtype,
max_spikes_per_unit=max_spikes_per_unit, amp_method=amp_method,
amp_peak=amp_peak,
amp_frames_before=amp_frames_before,
amp_frames_after=amp_frames_after,
recompute_info=recompute_info,
max_spikes_for_pca=max_spikes_for_pca,
apply_filter=apply_filter, freq_min=freq_min, freq_max=freq_max,
save_features_props=save_features_props, seed=seed)
elif 'amplitude_cutoff' in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing amplitude cutoffs.")
else:
metric_calculator.compute_amplitudes(recording=recording, amp_method=amp_method, amp_peak=amp_peak,
amp_frames_before=amp_frames_before,
amp_frames_after=amp_frames_after, apply_filter=apply_filter,
freq_min=freq_min, freq_max=freq_max,
save_features_props=save_features_props, seed=seed)
elif 'snr' in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing snr.")
else:
metric_calculator.set_recording(recording, apply_filter=apply_filter, freq_min=freq_min, freq_max=freq_max)
if 'num_spikes' in metric_names:
num_spikes_epochs = metric_calculator.compute_num_spikes()
metrics_epochs.append(num_spikes_epochs)
if 'firing_rate' in metric_names:
firing_rates_epochs = metric_calculator.compute_firing_rates()
metrics_epochs.append(firing_rates_epochs)
if 'presence_ratio' in metric_names:
presence_ratios_epochs = metric_calculator.compute_presence_ratios()
metrics_epochs.append(presence_ratios_epochs)
if 'isi_viol' in metric_names:
isi_violations_epochs = metric_calculator.compute_isi_violations(isi_threshold=isi_threshold, min_isi=min_isi)
metrics_epochs.append(isi_violations_epochs)
if 'amplitude_cutoff' in metric_names:
amplitude_cutoffs_epochs = metric_calculator.compute_amplitude_cutoffs()
metrics_epochs.append(amplitude_cutoffs_epochs)
if 'snr' in metric_names:
snrs_epochs = metric_calculator.compute_snrs(snr_mode=snr_mode, snr_noise_duration=snr_noise_duration,
max_spikes_per_unit_for_snr=max_spikes_per_unit_for_snr)
metrics_epochs.append(snrs_epochs)
if 'max_drift' in metric_names or 'cumulative_drift' in metric_names:
max_drifts_epochs, cumulative_drifts_epochs = metric_calculator.compute_drift_metrics(
drift_metrics_interval_s=drift_metrics_interval_s,
drift_metrics_min_spikes_per_interval=drift_metrics_min_spikes_per_interval)
if 'max_drift' in metric_names:
metrics_epochs.append(max_drifts_epochs)
if 'cumulative_drift' in metric_names:
metrics_epochs.append(cumulative_drifts_epochs)
if 'silhouette_score' in metric_names:
silhouette_scores_epochs = metric_calculator.compute_silhouette_scores(
max_spikes_for_silhouette=max_spikes_for_silhouette, seed=seed)
metrics_epochs.append(silhouette_scores_epochs)
if 'isolation_distance' in metric_names:
isolation_distances_epochs = metric_calculator.compute_isolation_distances(
num_channels_to_compare=num_channels_to_compare, max_spikes_per_cluster=max_spikes_per_cluster,
seed=seed)
metrics_epochs.append(isolation_distances_epochs)
if 'l_ratio' in metric_names:
l_ratios_epochs = metric_calculator.compute_l_ratios(num_channels_to_compare=num_channels_to_compare,
max_spikes_per_cluster=max_spikes_per_cluster, seed=seed)
metrics_epochs.append(l_ratios_epochs)
if 'd_prime' in metric_names:
d_primes_epochs = metric_calculator.compute_d_primes(num_channels_to_compare=num_channels_to_compare,
max_spikes_per_cluster=max_spikes_per_cluster, seed=seed)
metrics_epochs.append(d_primes_epochs)
if 'nn_hit_rate' in metric_names or 'nn_miss_rate' in metric_names:
nn_hit_rates_epochs, nn_miss_rates_epochs = metric_calculator.compute_nn_metrics(
num_channels_to_compare=num_channels_to_compare, max_spikes_per_cluster=max_spikes_per_cluster,
max_spikes_for_nn=max_spikes_for_nn, n_neighbors=n_neighbors, seed=seed)
if 'nn_hit_rate' in metric_names:
metrics_epochs.append(nn_hit_rates_epochs)
if 'nn_miss_rate' in metric_names:
metrics_epochs.append(nn_miss_rates_epochs)
if return_dataframe:
metrics_df = metric_calculator.get_metrics_df()
return metrics_df
else:
return metrics_epochs
| 48.302013
| 129
| 0.658905
| 6,537
| 50,379
| 4.804803
| 0.039774
| 0.032952
| 0.023687
| 0.018339
| 0.904518
| 0.89121
| 0.884937
| 0.875864
| 0.86367
| 0.856729
| 0
| 0.008437
| 0.287124
| 50,379
| 1,042
| 130
| 48.348369
| 0.866125
| 0.407829
| 0
| 0.661376
| 0
| 0
| 0.068146
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034392
| false
| 0
| 0.002646
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
40a01a454c06c426f8e730660c8186f0008bbe29
| 27,389
|
py
|
Python
|
events/ped_events.py
|
mtasa-typescript/mtasa-wiki-dump
|
edea1746850fb6c99d6155d1d7891e2cceb33a5c
|
[
"MIT"
] | null | null | null |
events/ped_events.py
|
mtasa-typescript/mtasa-wiki-dump
|
edea1746850fb6c99d6155d1d7891e2cceb33a5c
|
[
"MIT"
] | 1
|
2021-02-24T21:50:18.000Z
|
2021-02-24T21:50:18.000Z
|
events/ped_events.py
|
mtasa-typescript/mtasa-wiki-dump
|
edea1746850fb6c99d6155d1d7891e2cceb33a5c
|
[
"MIT"
] | null | null | null |
# Autogenerated file. ANY CHANGES WILL BE OVERWRITTEN
from to_python.core.types import FunctionType, \
FunctionArgument, \
FunctionArgumentValues, \
FunctionReturnTypes, \
FunctionSignature, \
FunctionDoc, \
EventData, \
CompoundEventData
DUMP_PARTIAL = [
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientPedDamage',
docs=FunctionDoc(
description='This event is triggered whenever a ped is damaged.' ,
arguments={
"attacker": """: A player element representing the attacker or vehicle element (when a ped falls of a bike). """,
"weapon": """: An integer representing the Weapons|weapon ID the attacker used """,
"bodypart": """: An integer representing the bodypart the ped was damaged """,
"loss": """: A float representing the percentage of health the ped lost. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='attacker',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='weapon',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='bodypart',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='loss',
argument_type=FunctionType(
names=['float'],
is_optional=True,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientPedHeliKilled',
docs=FunctionDoc(
description='This event is fired when a ped is killed due to the effect of a helicopter blades.' ,
arguments={
"killer": """the vehicle (heli) responsible for causing the death. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='killer',
argument_type=FunctionType(
names=['vehicle'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientPedHitByWaterCannon',
docs=FunctionDoc(
description='This event is fired when a ped is hit by a water cannon.' ,
arguments={
"pedHit": """the ped which got shot by the water cannon """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='pedHit',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientPedStep',
docs=FunctionDoc(
description='This event is called when a peds foot has come on to the ground after jumping or taking a full step.' ,
arguments={
"leftFoot": """: a bool representing if it was the left foot that moved. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='leftFoot',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientPedVehicleEnter',
docs=FunctionDoc(
description='' ,
arguments={
"theVehicle": """The vehicle that the ped entered. """,
"seat": """The seat that the ped now is on. Drivers seat = 0, higher numbers are passenger seats. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theVehicle',
argument_type=FunctionType(
names=['vehicle'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='seat',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientPedVehicleExit',
docs=FunctionDoc(
description='' ,
arguments={
"theVehicle": """The vehicle that the ped exited. """,
"seat": """The number of the seat that the ped was sitting on. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theVehicle',
argument_type=FunctionType(
names=['vehicle'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='seat',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientPedWasted',
docs=FunctionDoc(
description='This event is triggered whenever a ped dies.' ,
arguments={
"killer": """: A player element representing the killer. """,
"weapon": """: An int|integer representing the Weapons|killer weapon or the Damage Types|damage types. """,
"bodypart": """: An int|integer representing the bodypart the player was damaged. """,
"loss": """: A float representing the percentage of health the ped lost in the final hit. Note: Only for client-side created peds.
'''OR''' """,
"stealth": """: A boolean representing whether or not this was a stealth kill. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='killer',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='weapon',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='bodypart',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='loss',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
),
FunctionArgument(
name='stealth',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
],
client=[
EventData(
name='onClientPedWeaponFire',
docs=FunctionDoc(
description='This event is called when ped shoots a weapon. This does not trigger for projectiles based, or melee weapons.' ,
arguments={
"weapon": """: an int representing weapons|weapon used for making a shot. """,
"ammo": """: an int ammount of ammo left for this weapon type. """,
"ammoInClip": """: an int ammount of ammo left for this weapon type in clip. """,
"hitX": """, hitY, hitZ: float world coordinates representing a hit point. """,
"hitElement": """: an element which was hit by a shot. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='weapon',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='ammo',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='ammoInClip',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='hitX',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='hitY',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='hitZ',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='hitElement',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
),
CompoundEventData(
server=[
EventData(
name='onPedDamage',
docs=FunctionDoc(
description='This event is triggered when a ped is damaged. For player damage, use onPlayerDamage instead.' ,
arguments={
"loss": """: an int representing the percentage of health the ped lost. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='loss',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
client=[
],
),
CompoundEventData(
server=[
EventData(
name='onPedVehicleEnter',
docs=FunctionDoc(
description='' ,
arguments={
"theVehicle": """: A vehicle element representing the vehicle that was entered. """,
"seat": """: An int representing the seat in which the ped is entering. """,
"jacked": """: A player or ped element representing who has been jacked. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theVehicle',
argument_type=FunctionType(
names=['vehicle'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='seat',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='jacked',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
client=[
],
),
CompoundEventData(
server=[
EventData(
name='onPedVehicleExit',
docs=FunctionDoc(
description='' ,
arguments={
"theVehicle": """: A vehicle element representing the vehicle in which the ped exited from. """,
"seat": """: An int representing the seat in which the ped was before exiting. """,
"jacker": """: A player or ped element representing who jacked the driver. """,
"forcedByScript": """A boolean representing whether the exit was forced using removePedFromVehicle or by the ped. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theVehicle',
argument_type=FunctionType(
names=['vehicle'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='seat',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='jacker',
argument_type=FunctionType(
names=['ped'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='forcedByScript',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
client=[
],
),
CompoundEventData(
server=[
EventData(
name='onPedWasted',
docs=FunctionDoc(
description='This event is triggered when a ped is killed or dies. It is not triggered for players.' ,
arguments={
"totalAmmo": """: an int representing the total ammo the victim had when he died. """,
"killer": """: an element representing the player or vehicle who was the killer. If there was no killer this is false. """,
"killerWeapon": """: an int representing the Weapons|killer weapon or the Damage Types|damage types. """,
"bodypart": """: an int representing the bodypart ID the victim was hit on when he died. """,
"stealth": """: a boolean representing whether or not this was a stealth kill. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='totalAmmo',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='killer',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='killerWeapon',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='bodypart',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='stealth',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
client=[
],
),
CompoundEventData(
server=[
EventData(
name='onPedWeaponSwitch',
docs=FunctionDoc(
description='This event is triggered when a ped switches weapons.' ,
arguments={
"previousWeaponID": """: an int representing the weapon that was switched from. """,
"currentWeaponID": """: an int representing the weapon that was switched to. """
},
result='' ,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='previousWeaponID',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='currentWeaponID',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
)
],
client=[
],
)
]
| 39.071327
| 150
| 0.312461
| 1,339
| 27,389
| 6.294996
| 0.150112
| 0.090165
| 0.108198
| 0.130739
| 0.761656
| 0.735081
| 0.706134
| 0.698066
| 0.674813
| 0.674813
| 0
| 0.000096
| 0.618606
| 27,389
| 700
| 151
| 39.127143
| 0.806816
| 0.001862
| 0
| 0.708029
| 1
| 0.005839
| 0.145778
| 0.00417
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.00146
| 0.00146
| 0
| 0.00146
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9054ff3253327eff6e4f7afe4df3cc12d5f1ddc5
| 533
|
py
|
Python
|
imgaug/augmenters/__init__.py
|
hungrayho/yolo3
|
f3759bf10ab8bed4a5133989cda8f8d59b20aa16
|
[
"MIT"
] | null | null | null |
imgaug/augmenters/__init__.py
|
hungrayho/yolo3
|
f3759bf10ab8bed4a5133989cda8f8d59b20aa16
|
[
"MIT"
] | null | null | null |
imgaug/augmenters/__init__.py
|
hungrayho/yolo3
|
f3759bf10ab8bed4a5133989cda8f8d59b20aa16
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from imgaug.augmenters.arithmetic import *
from imgaug.augmenters.blend import *
from imgaug.augmenters.blur import *
from imgaug.augmenters.color import *
from imgaug.augmenters.contrast import *
from imgaug.augmenters.convolutional import *
from imgaug.augmenters.flip import *
from imgaug.augmenters.geometric import *
from imgaug.augmenters.meta import *
from imgaug.augmenters.segmentation import *
from imgaug.augmenters.size import *
from imgaug.augmenters.weather import *
| 38.071429
| 46
| 0.810507
| 65
| 533
| 6.569231
| 0.276923
| 0.28103
| 0.449649
| 0.730679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 533
| 13
| 47
| 41
| 0.912393
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9085c0a4366098004c978d03a3c382a4605bcbb3
| 5,160
|
py
|
Python
|
TestHtmlWrapper.py
|
ytyaru/Python.pylangstudy.HtmlWrapper.201705220846
|
e18a488577c4ed1af690fa629d988ef867c1834d
|
[
"CC0-1.0"
] | null | null | null |
TestHtmlWrapper.py
|
ytyaru/Python.pylangstudy.HtmlWrapper.201705220846
|
e18a488577c4ed1af690fa629d988ef867c1834d
|
[
"CC0-1.0"
] | null | null | null |
TestHtmlWrapper.py
|
ytyaru/Python.pylangstudy.HtmlWrapper.201705220846
|
e18a488577c4ed1af690fa629d988ef867c1834d
|
[
"CC0-1.0"
] | null | null | null |
import unittest
import os.path
import HtmlWrapper
class TestHtmlWrapper(unittest.TestCase):
# def __init__(self):
# pass
def test_CreateElement_html(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html')
self.assertEqual(html, '<html></html>')
def test_Wrap_html(self):
w = HtmlWrapper.HtmlWrapper()
html = w.Wrap(w.CreateElement('html'), w.CreateElement('body'))
self.assertEqual(html, '<html><body></body></html>')
def test_Wrap_nest3(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html')
body = w.CreateElement('body')
ul = w.CreateElement('ul')
lis = ''
for count in range(1, 4):
lis += w.CreateElement('li', text_node_value='項目{0}'.format(count))
ul = w.Wrap(ul, lis)
body = w.Wrap(body, ul)
html = w.Wrap(html, body)
self.assertEqual(html, '<html><body><ul><li>項目1</li><li>項目2</li><li>項目3</li></ul></body></html>')
def test_CreateElement_Attributes(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', lang='ja')
self.assertEqual(html, '<html lang="ja"></html>')
def test_CreateElement_Attributes_dict(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', **{'lang': 'ja'})
self.assertEqual(html, '<html lang="ja"></html>')
def test_CreateElement_Attributes_id_(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', id_='id1')
self.assertEqual(html, '<html id="id1"></html>')
def test_CreateElement_Attributes_class_(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', class_='cls1 cls2')
self.assertEqual(html, '<html class="cls1 cls2"></html>')
def test_CreateElement_Attributes_id_class_(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', id_='id1', class_='cls1 cls2')
self.assertIn(html, ['<html id="id1" class="cls1 cls2"></html>', '<html class="cls1 cls2" id="id1"></html>'])
def test_CreateElement_Attributes_id(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', **{'id': 'id1'})
self.assertEqual(html, '<html id="id1"></html>')
def test_CreateElement_Attributes_class(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', **{'class': 'cls1 cls2'})
self.assertEqual(html, '<html class="cls1 cls2"></html>')
def test_CreateElement_Attributes_id_class(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', **{'id': 'id1', 'class': 'cls1 cls2'})
self.assertIn(html, ['<html id="id1" class="cls1 cls2"></html>', '<html class="cls1 cls2" id="id1"></html>'])
def test_CreateElement_Attributes_id_id(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', id_='id1', **{'id': 'id2'})
self.assertEqual(html, '<html id="id1"></html>')
def test_CreateElement_Attributes_class_class(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', class_='cls1 cls2', **{'class': 'cls3 cls4'})
self.assertEqual(html, '<html class="cls1 cls2"></html>')
def test_CreateElement_Attributes_id_id_class_class(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', id_='id1', class_='cls1 cls2', **{'id': 'id2', 'class': 'cls3 cls4'})
self.assertIn(html, ['<html id="id1" class="cls1 cls2"></html>', '<html class="cls1 cls2" id="id1"></html>'])
def test_CreateElement_Attributes_dict(self):
w = HtmlWrapper.HtmlWrapper()
html = w.CreateElement('html', **{'id': 'id1', 'class': 'cls1 cls2', 'lang': 'ja'})
self.assertIn(html, [
'<html id="id1" class="cls1 cls2" lang="ja"></html>',
'<html id="id1" lang="ja" class="cls1 cls2"></html>',
'<html lang="ja" id="id1" class="cls1 cls2"></html>',
'<html lang="ja" class="cls1 cls2" id="id1"></html>',
'<html class="cls1 cls2" lang="ja" id="id1"></html>',
'<html class="cls1 cls2" id="id1" lang="ja"></html>'
])
def test_CreateElement_None(self):
w = HtmlWrapper.HtmlWrapper()
element_name = None
with self.assertRaises(Exception) as e:
html = w.CreateElement(element_name)
self.assertEqual(e.msg, '要素名を指定してください。: element_name = {0}'.format(element_name))
def test_CreateElement_blank(self):
w = HtmlWrapper.HtmlWrapper()
element_name = ''
with self.assertRaises(Exception) as e:
html = w.CreateElement(element_name)
self.assertEqual(e.msg, '要素名を指定してください。: element_name = {0}'.format(element_name))
def test_CreateElement_space(self):
w = HtmlWrapper.HtmlWrapper()
element_name = ' '
with self.assertRaises(Exception) as e:
html = w.CreateElement(element_name)
self.assertEqual(e.msg, '要素名を指定してください。: element_name = {0}'.format(element_name))
| 43.728814
| 117
| 0.603682
| 604
| 5,160
| 5.019868
| 0.102649
| 0.063325
| 0.094327
| 0.16029
| 0.850264
| 0.835092
| 0.787929
| 0.735818
| 0.699538
| 0.687005
| 0
| 0.020515
| 0.225388
| 5,160
| 117
| 118
| 44.102564
| 0.738054
| 0.006008
| 0
| 0.453608
| 0
| 0.010309
| 0.232722
| 0.018938
| 0
| 0
| 0
| 0
| 0.216495
| 1
| 0.185567
| false
| 0
| 0.030928
| 0
| 0.226804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
908ac9bab86cd055cff258beb8bef1a4ea213786
| 250
|
py
|
Python
|
decorator_2.py
|
jepster/python_advanced_techniques
|
f4b0e0dda7b66be55f650f9f902e735d3f5a9f64
|
[
"MIT"
] | null | null | null |
decorator_2.py
|
jepster/python_advanced_techniques
|
f4b0e0dda7b66be55f650f9f902e735d3f5a9f64
|
[
"MIT"
] | null | null | null |
decorator_2.py
|
jepster/python_advanced_techniques
|
f4b0e0dda7b66be55f650f9f902e735d3f5a9f64
|
[
"MIT"
] | null | null | null |
def make_divisibility_test(n):
def divisible_by_n(m):
return m % n == 0
return divisible_by_n
div_by_3 = make_divisibility_test(3)
tuple(filter(div_by_3, range(10))) # => (0, 3, 6, 9)
print(make_divisibility_test(5)(10)) # => True
| 27.777778
| 53
| 0.672
| 43
| 250
| 3.581395
| 0.488372
| 0.311688
| 0.38961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063725
| 0.184
| 250
| 9
| 54
| 27.777778
| 0.691176
| 0.092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0.142857
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
90f9d1b8609fcd41593763343270ab9e8e8f2f86
| 150
|
py
|
Python
|
capcom/__init__.py
|
rcthomas/capcom
|
e72c41da046e05d8450dfa7297cb5dee5a206daa
|
[
"BSD-3-Clause"
] | 1
|
2015-07-24T21:32:22.000Z
|
2015-07-24T21:32:22.000Z
|
capcom/__init__.py
|
rcthomas/capcom
|
e72c41da046e05d8450dfa7297cb5dee5a206daa
|
[
"BSD-3-Clause"
] | null | null | null |
capcom/__init__.py
|
rcthomas/capcom
|
e72c41da046e05d8450dfa7297cb5dee5a206daa
|
[
"BSD-3-Clause"
] | null | null | null |
from census import *
from dbi import *
from parser import *
from scraper import *
from selector import *
| 25
| 29
| 0.533333
| 15
| 150
| 5.333333
| 0.466667
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.433333
| 150
| 5
| 30
| 30
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
290cd744c3bed450a7f51b9e08827eacc782b0b8
| 169
|
py
|
Python
|
active_semi_clustering/exceptions.py
|
heriosousa/active-semi-supervised-clustering
|
8ed97c7f3bdd76cf6c03e0ca6ef56bcf27b2d399
|
[
"MIT"
] | 67
|
2018-11-09T06:59:31.000Z
|
2021-11-04T06:54:36.000Z
|
active_semi_clustering/exceptions.py
|
heriosousa/active-semi-supervised-clustering
|
8ed97c7f3bdd76cf6c03e0ca6ef56bcf27b2d399
|
[
"MIT"
] | 5
|
2020-03-24T18:10:32.000Z
|
2021-06-02T01:08:20.000Z
|
active_semi_clustering/exceptions.py
|
heriosousa/active-semi-supervised-clustering
|
8ed97c7f3bdd76cf6c03e0ca6ef56bcf27b2d399
|
[
"MIT"
] | 29
|
2018-10-16T15:36:28.000Z
|
2021-11-20T10:09:41.000Z
|
class ClusteringNotFoundException(Exception):
pass
class EmptyClustersException(Exception):
pass
class InconsistentConstraintsException(Exception):
pass
| 15.363636
| 50
| 0.798817
| 12
| 169
| 11.25
| 0.5
| 0.288889
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147929
| 169
| 10
| 51
| 16.9
| 0.9375
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
291d7795a276f54915ea30b9e2a876b3400978a4
| 45
|
py
|
Python
|
src/satextractor/tiler/__init__.py
|
oxfordeo/sat-extractor
|
1d6841751e8b2ce65a02f5d3d608f181a31ab917
|
[
"BSD-2-Clause"
] | 29
|
2021-11-02T16:07:04.000Z
|
2022-03-14T00:16:27.000Z
|
src/satextractor/tiler/__init__.py
|
oxfordeo/sat-extractor
|
1d6841751e8b2ce65a02f5d3d608f181a31ab917
|
[
"BSD-2-Clause"
] | 16
|
2021-11-01T16:23:01.000Z
|
2022-03-24T11:44:13.000Z
|
src/satextractor/tiler/__init__.py
|
oxfordeo/sat-extractor
|
1d6841751e8b2ce65a02f5d3d608f181a31ab917
|
[
"BSD-2-Clause"
] | 6
|
2021-11-09T01:10:30.000Z
|
2022-03-14T18:04:32.000Z
|
from .tiler import split_region_in_utm_tiles
| 22.5
| 44
| 0.888889
| 8
| 45
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
29385fc52e1ebe822e998c4e12734fb1b6d0a3d0
| 4,292
|
py
|
Python
|
torchqf/stochastic/brownian.py
|
simaki/torchqf
|
e4dfd154c62ccd858847048f77d8c2f82924ae80
|
[
"BSD-3-Clause"
] | 7
|
2021-05-18T17:03:10.000Z
|
2021-12-01T07:58:41.000Z
|
torchqf/stochastic/brownian.py
|
vishalbelsare/torchqf
|
e4dfd154c62ccd858847048f77d8c2f82924ae80
|
[
"BSD-3-Clause"
] | 27
|
2021-05-18T03:54:17.000Z
|
2022-01-31T15:16:16.000Z
|
torchqf/stochastic/brownian.py
|
vishalbelsare/torchqf
|
e4dfd154c62ccd858847048f77d8c2f82924ae80
|
[
"BSD-3-Clause"
] | 3
|
2021-07-13T12:56:12.000Z
|
2021-12-26T23:00:06.000Z
|
import torch
from torch import Tensor
from ..tensor import steps
def generate_brownian(
size,
time,
drift: float = 0.0,
volatility: float = 0.2,
init_value: float = 0.0,
dtype=None,
device=None,
) -> Tensor:
"""Generates and returns time-series that follows Brownian motion.
Args:
size (torch.Size): The shape of the output tensor.
The last dimension means the number of time steps.
time (float | Tensor): The total time length (`float`) or time steps (`Tensor`).
drift (float, default 0.0): The drift of the process.
volatility (float, default 0.2): The volatility of the process.
init_value (float, default 0.0)
Initial value of the process.
dtype (`torch.dtype`, optional): The desired data type of returned tensor.
Default: if None, uses a global default
(see `torch.set_default_tensor_type()`).
device (`torch.device`, optional): The desired device of returned tensor.
Default: if None, uses the current device for the default tensor type
(see `torch.set_default_tensor_type()`).
`device` will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.
Returns:
Tensor: The time-series.
Size:
- time: :math:`(T,)`
:math:`T` means the number of time steps.
- output: :math:`(*, T)`
The shape specified by `size`.
Examples:
>>> from torchqf.stochastic import generate_brownian
>>> _ = torch.manual_seed(42)
>>> generate_brownian((2, 5), time=0.1)
tensor([[ 0.0095, 0.0132, 0.0198, 0.0263, -0.0054],
[-0.0053, 0.0572, 0.0391, 0.0522, 0.0598]])
"""
assert dtype is None, "not supported"
assert device is None, "not supported"
n_steps = size[-1]
if not isinstance(time, torch.Tensor):
time = steps(time, n_steps) # shape : (T,)
dt = torch.empty_like(time)
dt[0] = time[0] - 0.0
dt[1:] = time[1:] - time[:-1]
drift_term = drift * time
random_term = (volatility * torch.randn(size) * dt.sqrt()).cumsum(-1)
return init_value + drift_term + random_term
def generate_geometric_brownian(
size,
time,
drift: float = 0.0,
volatility: float = 0.2,
init_value: float = 1.0,
dtype=None,
device=None,
) -> Tensor:
"""Generates and returns time-series that follows geometric Brownian motion.
Args:
size (tuple[int]): The shape of the output tensor.
The last dimension means the number of time steps.
time (float | Tensor): The total time length (`float`) or time steps (`Tensor`).
drift (float, default 0.0): The drift of the process.
volatility (float, default 0.2): The volatility of the process.
init_value (float, default 0.0): Initial value of the process.
dtype (`torch.dtype`, optional): The desired data type of returned tensor.
Default: if None, uses a global default
(see `torch.set_default_tensor_type()`).
device (`torch.device`, optional): The desired device of returned tensor.
Default: if None, uses the current device for the default tensor type
(see `torch.set_default_tensor_type()`).
`device` will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types.
Returns:
-------
Tensor: The time-series.
Size:
- time: :math:`(T,)`
:math:`T` means the number of time steps.
- output: :math:`(*, T)`
The shape specified by `size`.
Examples:
>>> from torchqf.stochastic import generate_geometric_brownian
>>> _ = torch.manual_seed(42)
>>> generate_geometric_brownian((2, 5), time=0.1)
tensor([[1.0092, 1.0124, 1.0188, 1.0250, 0.9926],
[0.9943, 1.0580, 1.0387, 1.0519, 1.0595]])
"""
assert dtype is None, "not supported"
assert device is None, "not supported"
brown = generate_brownian(
size,
time=time,
drift=drift - volatility ** 2 / 2,
volatility=volatility,
dtype=dtype,
device=device,
)
return init_value * torch.exp(brown)
| 33.015385
| 108
| 0.60438
| 572
| 4,292
| 4.466783
| 0.195804
| 0.007045
| 0.030528
| 0.025049
| 0.762427
| 0.762427
| 0.736595
| 0.719374
| 0.719374
| 0.719374
| 0
| 0.047743
| 0.282619
| 4,292
| 129
| 109
| 33.271318
| 0.782072
| 0.666356
| 0
| 0.44186
| 1
| 0
| 0.043808
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 1
| 0.046512
| false
| 0
| 0.069767
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
294c59832bba932ffc6997cadfe29c3060020bf2
| 71
|
py
|
Python
|
problem3_6starter_origin.py
|
BjornChrisnach/Coursera_Python_concise_intro
|
12c3c022b89dc8bba0fe02000728a69d7e4fd0ef
|
[
"MIT"
] | null | null | null |
problem3_6starter_origin.py
|
BjornChrisnach/Coursera_Python_concise_intro
|
12c3c022b89dc8bba0fe02000728a69d7e4fd0ef
|
[
"MIT"
] | null | null | null |
problem3_6starter_origin.py
|
BjornChrisnach/Coursera_Python_concise_intro
|
12c3c022b89dc8bba0fe02000728a69d7e4fd0ef
|
[
"MIT"
] | null | null | null |
# -problem3_6.py *- coding: utf-8 -*-
import sys
# add your code here
| 14.2
| 37
| 0.647887
| 12
| 71
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.197183
| 71
| 5
| 38
| 14.2
| 0.736842
| 0.760563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4621d7096646ebfc22985e637e3ed0aa96d4a8f8
| 39
|
py
|
Python
|
sleepmind/models/__init__.py
|
IamGianluca/sleepmind
|
7132feb08086e57219ff9859545eafa6842b5c96
|
[
"MIT"
] | 1
|
2018-12-06T21:19:50.000Z
|
2018-12-06T21:19:50.000Z
|
sleepmind/models/__init__.py
|
IamGianluca/sleepmind
|
7132feb08086e57219ff9859545eafa6842b5c96
|
[
"MIT"
] | 2
|
2018-11-24T17:20:33.000Z
|
2021-06-01T22:28:31.000Z
|
sleepmind/models/__init__.py
|
IamGianluca/sleepmind
|
7132feb08086e57219ff9859545eafa6842b5c96
|
[
"MIT"
] | 1
|
2019-03-05T19:57:18.000Z
|
2019-03-05T19:57:18.000Z
|
from .xgboost import XGBoostClassifier
| 19.5
| 38
| 0.871795
| 4
| 39
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
462ad89e5a98b2236f40eda1d78eb54c68ad2774
| 116
|
py
|
Python
|
Lib/fontTools/ttLib/tables/T_S_I_J_.py
|
anntzer/fonttools
|
726cd67549956b985bbbe83e26fb0af9da59ddf7
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2021-04-07T16:47:04.000Z
|
2022-01-15T04:01:01.000Z
|
Lib/fontTools/ttLib/tables/T_S_I_J_.py
|
anntzer/fonttools
|
726cd67549956b985bbbe83e26fb0af9da59ddf7
|
[
"MIT",
"BSD-3-Clause"
] | 74
|
2020-01-30T07:27:54.000Z
|
2021-08-03T05:47:17.000Z
|
Lib/fontTools/ttLib/tables/T_S_I_J_.py
|
anntzer/fonttools
|
726cd67549956b985bbbe83e26fb0af9da59ddf7
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2020-01-22T20:06:09.000Z
|
2020-01-22T20:06:09.000Z
|
from fontTools.misc.py23 import *
from .T_S_I_V_ import table_T_S_I_V_
class table_T_S_I_J_(table_T_S_I_V_):
pass
| 19.333333
| 37
| 0.827586
| 28
| 116
| 2.75
| 0.464286
| 0.103896
| 0.155844
| 0.155844
| 0.233766
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019417
| 0.112069
| 116
| 5
| 38
| 23.2
| 0.728155
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
465e79d220f84e2d98aaec826747b644e378b931
| 157
|
py
|
Python
|
molecule/official_packages/tests/test_software.py
|
mesaguy/ansible-hashicorp
|
1e2a0fb5fa11061968de2a574853b1a6b4cee80d
|
[
"MIT"
] | 2
|
2021-04-23T12:32:28.000Z
|
2021-12-23T20:00:10.000Z
|
molecule/official_packages/tests/test_software.py
|
mesaguy/ansible-hashicorp
|
1e2a0fb5fa11061968de2a574853b1a6b4cee80d
|
[
"MIT"
] | null | null | null |
molecule/official_packages/tests/test_software.py
|
mesaguy/ansible-hashicorp
|
1e2a0fb5fa11061968de2a574853b1a6b4cee80d
|
[
"MIT"
] | null | null | null |
def test_installed(host, hashicorp_official_package_names):
for name in hashicorp_official_package_names:
assert host.package(name).is_installed
| 39.25
| 59
| 0.808917
| 21
| 157
| 5.666667
| 0.619048
| 0.285714
| 0.403361
| 0.487395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133758
| 157
| 3
| 60
| 52.333333
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
466b740ac6f5504a3e0bd592a76eb7ea9cb7f560
| 52
|
py
|
Python
|
server/app.py
|
henri-hulski/morepath_cerebral_todomvc
|
568ac277c1844c4cf28bbacf484940f779fc7407
|
[
"BSD-3-Clause"
] | 314
|
2015-01-01T01:42:52.000Z
|
2022-01-07T21:46:15.000Z
|
server/app.py
|
henri-hulski/morepath_cerebral_todomvc
|
568ac277c1844c4cf28bbacf484940f779fc7407
|
[
"BSD-3-Clause"
] | 369
|
2015-01-02T19:10:40.000Z
|
2021-07-03T04:37:27.000Z
|
server/app.py
|
henri-hulski/morepath_cerebral_todomvc
|
568ac277c1844c4cf28bbacf484940f779fc7407
|
[
"BSD-3-Clause"
] | 37
|
2015-01-11T09:22:02.000Z
|
2021-07-02T20:48:20.000Z
|
import morepath
class App(morepath.App):
pass
| 8.666667
| 24
| 0.711538
| 7
| 52
| 5.285714
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211538
| 52
| 5
| 25
| 10.4
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
46996a6bda526fcafacf4d33206d23606c9e01fa
| 8,127
|
py
|
Python
|
ExampleCases/OpFAST_FLORIS_WF3x1/plotyaw.py
|
tonino102008/openfast
|
cfb401af163f4e0b6bb8588c23374e1534ad8d87
|
[
"Apache-2.0"
] | null | null | null |
ExampleCases/OpFAST_FLORIS_WF3x1/plotyaw.py
|
tonino102008/openfast
|
cfb401af163f4e0b6bb8588c23374e1534ad8d87
|
[
"Apache-2.0"
] | null | null | null |
ExampleCases/OpFAST_FLORIS_WF3x1/plotyaw.py
|
tonino102008/openfast
|
cfb401af163f4e0b6bb8588c23374e1534ad8d87
|
[
"Apache-2.0"
] | 1
|
2021-02-05T17:50:01.000Z
|
2021-02-05T17:50:01.000Z
|
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import control.matlab as cnt
import cp
import scipy.optimize as optim
dfdata = pd.read_csv('t1.T1.out', sep='\t', header=None, skiprows=10)
datadata = dfdata.values
dfdata2 = pd.read_csv('t2.T2.out', sep='\t', header=None, skiprows=10)
datadata2 = dfdata2.values
dfdata3 = pd.read_csv('t3.T3.out', sep='\t', header=None, skiprows=10)
datadata3 = dfdata3.values
iT = 0
nT = 3
nend = 30000
df = pd.read_csv('EPOWER.txt', header=None)
data = df.values[iT::nT,:]
df6 = pd.read_csv('ECROSS.txt', header=None)
data6 = df6.values[iT::nT,:]
df8 = pd.read_csv('EWIND.txt', header=None)
data8 = df8.values[iT::nT,:]
fig, axes = plt.subplots(2,sharex = True)
axes[0].plot(datadata[:,0], datadata[:,21], 'r')
axes[0].set_title("T1 Yaw Angle w.r.t. Wind Direction", fontsize = 20)
axes[0].set_ylabel("T1 Yaw Angle (deg)", fontsize = 20)
axes[0].set_xlabel("Simulated Time (s)", fontsize = 20)
axes[0].tick_params(axis="x", labelsize=20)
axes[0].tick_params(axis="y", labelsize=20)
axes[1].plot(datadata2[:,0], datadata2[:,21], 'r')
axes[1].set_title("T2 Yaw Angle w.r.t. Wind Direction", fontsize = 20)
axes[1].set_ylabel("T2 Yaw Angle (deg)", fontsize = 20)
axes[1].set_xlabel("Simulated Time (s)", fontsize = 20)
axes[1].tick_params(axis="x", labelsize=20)
axes[1].tick_params(axis="y", labelsize=20)
plt.show()
plt.plot(datadata[:,0], datadata[:,1], 'b')
plt.title("T1 Wind Speed \n X-axis of Reference Farm Layout", fontsize = 20)
plt.ylabel("Wind Speed (m/s)", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
plt.plot(datadata[:,0], datadata[:,21], 'r')
plt.title("T1 Yaw Angle w.r.t. Wind Direction", fontsize = 20)
plt.ylabel("Yaw Angle", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
plt.plot(datadata2[:,0], datadata2[:,1], 'b')
plt.title("T2 Wind Speed \n X-axis of Reference Farm Layout", fontsize = 20)
plt.ylabel("Wind Speed (m/s)", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
plt.plot(datadata2[:,0], datadata2[:,21], 'r')
plt.title("T2 Yaw Angle w.r.t. Wind Direction", fontsize = 20)
plt.ylabel("Yaw Angle", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
plt.plot(datadata3[:,0], datadata3[:,1], 'b')
plt.title("T3 Wind Speed \n X-axis of Reference Farm Layout", fontsize = 20)
plt.ylabel("Wind Speed (m/s)", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
plt.plot(datadata3[:,0], datadata3[:,21], 'r')
plt.title("T3 Yaw Angle w.r.t. Wind Direction", fontsize = 20)
plt.ylabel("Yaw Angle", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
plt.plot(datadata[:,0], datadata[:,23], 'b', datadata2[:,0], datadata2[:,23], 'r', datadata3[:,0], datadata3[:,23], 'g')
plt.title("Ct")
plt.show()
nn = min([len(data6[:,0]), len(data8[:,0]), len(datadata[:,0])])
Wind_mag = numpy.power(numpy.power(datadata[:nn:,1], 2) + numpy.power(datadata[:nn:,2], 2), 0.5)
plt.plot(data6[:nn:,2], numpy.arctan2(data6[:nn:,0], data8[:nn:,0])*180/numpy.pi, 'b', data6[:nn:,2], numpy.arctan2(data6[:nn:,1], data8[:nn:,0])*180/numpy.pi, 'y', datadata[:nn:,0], numpy.arctan2(datadata[:nn:,2] - numpy.multiply(Wind_mag, numpy.sin(datadata[:nn:,21] * numpy.pi/180.0)), datadata[:,1])*180/numpy.pi,'r', datadata[:nn:,0], numpy.arctan2(datadata[:nn:,2], datadata[:nn:,1])*180.0/numpy.pi, 'g')
plt.title("CROSS WIND ESTIMATE YAW ERROR", fontsize = 20)
plt.xlabel("SIMULATED TIME (s)", fontsize = 20)
plt.ylabel("WIND RELATIVE YAW ERROR (deg)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.ylim(-35,35)
plt.show()
fig, axes = plt.subplots(2,sharex = True)
axes[0].plot(datadata[:,0], datadata[:,1], 'b')
axes[0].set_title("Wind X", fontsize = 20)
axes[0].set_ylabel("Wind X (m/s)", fontsize = 20)
axes[0].set_xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
axes[1].plot(datadata[:,0], datadata[:,2], 'r')
axes[1].set_title("Wind Y", fontsize = 20)
axes[1].set_ylabel("Wind X (m/s)", fontsize = 20)
axes[1].set_xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
plt.plot(datadata[:,0], datadata[:,1], 'b', datadata2[:,0], datadata2[:,1], 'r' ,datadata3[:,0], datadata3[:,1], 'g')
plt.title("Wind U", fontsize = 20)
plt.ylabel("Wind U (m/s)", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
plt.plot(datadata2[:,0], datadata2[:,1], 'b')
plt.title("Wind U", fontsize = 20)
plt.ylabel("Wind U (m/s)", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
plt.plot(datadata3[:,0], datadata3[:,1], 'b')
plt.title("Wind U", fontsize = 20)
plt.ylabel("Wind U (m/s)", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
fig, axes = plt.subplots(2,sharex = True)
axes[0].plot(datadata2[:,0], datadata2[:,1], 'b')
axes[0].set_title("Wind X", fontsize = 20)
axes[0].set_ylabel("Wind X (m/s)", fontsize = 20)
axes[0].set_xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
axes[1].plot(datadata2[:,0], datadata2[:,2], 'r')
axes[1].set_title("Wind Y", fontsize = 20)
axes[1].set_ylabel("Wind X (m/s)", fontsize = 20)
axes[1].set_xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
fig, axes = plt.subplots(2,sharex = True)
axes[0].plot(datadata3[:,0], datadata3[:,1], 'b')
axes[0].set_title("Wind X", fontsize = 20)
axes[0].set_ylabel("Wind X (m/s)", fontsize = 20)
axes[0].set_xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
axes[1].plot(datadata3[:,0], datadata3[:,2], 'r')
axes[1].set_title("Wind Y", fontsize = 20)
axes[1].set_ylabel("Wind X (m/s)", fontsize = 20)
axes[1].set_xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
fig, axes = plt.subplots(2,sharex = True)
axes[0].plot(datadata[:,0], datadata[:,21], 'b')
axes[0].set_title("Yaw Position")
axes[1].plot(datadata[:,0], datadata[:,22]*numpy.pi/180.0, 'r')
axes[1].set_title("Yaw Speed")
plt.show()
fig, axes = plt.subplots(2,sharex = True)
axes[0].plot(datadata2[:,0], datadata2[:,21], 'b')
axes[0].set_title("Yaw Position")
axes[1].plot(datadata2[:,0], datadata2[:,22]*numpy.pi/180.0, 'r')
axes[1].set_title("Yaw Speed")
plt.show()
fig, axes = plt.subplots(2,sharex = True)
axes[0].plot(datadata3[:,0], datadata3[:,21], 'b')
axes[0].set_title("Yaw Position")
axes[1].plot(datadata3[:,0], datadata3[:,22]*numpy.pi/180.0, 'r')
axes[1].set_title("Yaw Speed")
plt.show()
plt.plot(datadata[:,0], datadata[:,52], 'b', datadata2[:,0], datadata2[:,52],'r', datadata3[:,0], datadata3[:,52], 'g')
plt.title("POWER", fontsize = 20)
plt.ylabel("POWER (kW)", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
nn = min([len(datadata[:,0]), len(datadata2[:,0]), len(datadata3[:,0])])
plt.plot(datadata[:nn:,0], datadata[:nn:,52]+datadata2[:nn:,52]+datadata3[:nn:,52],'g')
plt.title("POWER TOTAL", fontsize = 20)
plt.ylabel("POWER TOTAL (kW)", fontsize = 20)
plt.xlabel("Simulated Time (s)", fontsize = 20)
plt.xticks(fontsize=20, rotation=0)
plt.yticks(fontsize=20, rotation=0)
plt.show()
| 39.26087
| 410
| 0.680325
| 1,373
| 8,127
| 3.996358
| 0.086672
| 0.174959
| 0.099508
| 0.124658
| 0.829962
| 0.807545
| 0.756151
| 0.716238
| 0.702387
| 0.696009
| 0
| 0.0727
| 0.0945
| 8,127
| 207
| 411
| 39.26087
| 0.672918
| 0
| 0
| 0.623656
| 0
| 0
| 0.14936
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032258
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d3bc046401a00e84d30558571e42547a86086b34
| 43
|
py
|
Python
|
hyde/__init__.py
|
MoonCrystalPower/Dr.Hyde
|
e324f60899fad0d96aa35a6e669c9aa7dff6ca58
|
[
"MIT"
] | null | null | null |
hyde/__init__.py
|
MoonCrystalPower/Dr.Hyde
|
e324f60899fad0d96aa35a6e669c9aa7dff6ca58
|
[
"MIT"
] | null | null | null |
hyde/__init__.py
|
MoonCrystalPower/Dr.Hyde
|
e324f60899fad0d96aa35a6e669c9aa7dff6ca58
|
[
"MIT"
] | null | null | null |
from .app import *
from .commands import *
| 14.333333
| 23
| 0.72093
| 6
| 43
| 5.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 43
| 2
| 24
| 21.5
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d3e7b88a903e8f5daf0380d2a77023f1e7db7c58
| 4,287
|
py
|
Python
|
savanamedapi/test/test_api.py
|
erego/savanamedapi
|
bcf79fd92c20a2a9abc3cc41d121844a67ec78d3
|
[
"MIT"
] | null | null | null |
savanamedapi/test/test_api.py
|
erego/savanamedapi
|
bcf79fd92c20a2a9abc3cc41d121844a67ec78d3
|
[
"MIT"
] | null | null | null |
savanamedapi/test/test_api.py
|
erego/savanamedapi
|
bcf79fd92c20a2a9abc3cc41d121844a67ec78d3
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import json
from savanamedapi import app
class TestApi(TestCase):
def setUp(self):
self.test_client = app.test_client(self)
def test_list_api_cancer(self):
param_to_sent = {"search": "cancer"}
resp = self.test_client.post("/savanamed/api/get_terms", data=json.dumps(param_to_sent))
data = json.loads(resp.data.decode())
self.assertEqual(len(data["terms"]), 0)
resp = self.test_client.get("/savanamed/api/get_terms", query_string=param_to_sent)
data = json.loads(resp.data.decode())
self.assertEqual(len(data["terms"]), 0)
def test_list_api_embarazo(self):
param_to_sent = {"search": "embarazo"}
resp = self.test_client.post("/savanamed/api/get_terms", data=json.dumps(param_to_sent))
data = json.loads(resp.data.decode())
self.assertEqual(len(data["terms"]), 1)
self.assertEqual(data['terms'][0]['name'], 'embarazo')
self.assertEqual(data['terms'][0]['id'], 4)
resp = self.test_client.get("/savanamed/api/get_terms", query_string=param_to_sent)
data = json.loads(resp.data.decode())
self.assertEqual(len(data["terms"]), 1)
self.assertEqual(data['terms'][0]['name'], 'embarazo')
self.assertEqual(data['terms'][0]['id'], 4)
def test_list_api_cirugia(self):
param_to_sent = {"search": "cirugia"}
resp = self.test_client.post("/savanamed/api/get_terms", data=json.dumps(param_to_sent))
data = json.loads(resp.data.decode())
self.assertEqual(len(data["terms"]), 2)
self.assertEqual(data['terms'][0]['name'], 'cirugia')
self.assertEqual(data['terms'][0]['id'], 2)
self.assertEqual(data['terms'][1]['name'], 'cirugia cardiaca')
self.assertEqual(data['terms'][1]['id'], 3)
resp = self.test_client.get("/savanamed/api/get_terms", query_string=param_to_sent)
data = json.loads(resp.data.decode())
self.assertEqual(len(data["terms"]), 2)
self.assertEqual(data['terms'][0]['name'], 'cirugia')
self.assertEqual(data['terms'][0]['id'], 2)
self.assertEqual(data['terms'][1]['name'], 'cirugia cardiaca')
self.assertEqual(data['terms'][1]['id'], 3)
def test_list_api_param_wrong(self):
param_to_sent = {"searching": "cancer"}
resp = self.test_client.post("/savanamed/api/get_terms", data=json.dumps(param_to_sent))
data = json.loads(resp.data.decode())
self.assertEqual(data["message"], "Search key not found")
def test_detail_api(self):
param_to_sent = {"id": 1}
resp = self.test_client.post("/savanamed/api/get_details", data=json.dumps(param_to_sent))
data = json.loads(resp.data.decode())
self.assertEqual(data['detail_term'][0]['name'], 'ictus')
self.assertEqual(len(data['detail_term'][0]['descriptions']), 2)
resp = self.test_client.get("/savanamed/api/get_details", query_string=param_to_sent)
data = json.loads(resp.data.decode())
self.assertEqual(data['detail_term'][0]['name'], 'ictus')
self.assertEqual(len(data['detail_term'][0]['descriptions']), 2)
def test_detail_api_param_wrong(self):
param_to_sent = {"ident": 1}
resp = self.test_client.post("/savanamed/api/get_details", data=json.dumps(param_to_sent))
data = json.loads(resp.data.decode())
self.assertEqual(data["message"], "Id key not found in parameters")
def test_detail_api_id_not_found(self):
param_to_sent = {"id": 7}
resp = self.test_client.post("/savanamed/api/get_details", data=json.dumps(param_to_sent))
data = json.loads(resp.data.decode())
self.assertEqual(len(data['detail_term']), 0)
def test_endpoint_not_exist(self):
param_to_sent = {"id": 1}
resp = self.test_client.post("/savanamed/api/get_descriptions", data=json.dumps(param_to_sent))
data = json.loads(resp.data.decode())
self.assertEqual(resp.status_code, 404)
self.assertEqual(data['message'],
"The requested URL was not found on the server. If you entered the URL "
"manually please check your spelling and try again.")
| 36.02521
| 103
| 0.637975
| 575
| 4,287
| 4.577391
| 0.14087
| 0.153875
| 0.083587
| 0.082067
| 0.807371
| 0.776976
| 0.776976
| 0.75152
| 0.737842
| 0.737842
| 0
| 0.011037
| 0.196874
| 4,287
| 118
| 104
| 36.330508
| 0.753413
| 0
| 0
| 0.613333
| 0
| 0
| 0.196874
| 0.070679
| 0
| 0
| 0
| 0
| 0.36
| 1
| 0.12
| false
| 0
| 0.04
| 0
| 0.173333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d3fdde1c55ff9d1a94ef0962125ea91849e5ec28
| 78
|
py
|
Python
|
pyspecnest/tests/__init__.py
|
vlas-sokolov/pyspecnest
|
b8bd7044380da3cca7d2a5323c141d4b1ca93cd1
|
[
"MIT"
] | 1
|
2019-01-16T16:19:12.000Z
|
2019-01-16T16:19:12.000Z
|
pyspecnest/tests/__init__.py
|
vlas-sokolov/pyspecnest
|
b8bd7044380da3cca7d2a5323c141d4b1ca93cd1
|
[
"MIT"
] | 4
|
2019-09-17T22:24:55.000Z
|
2020-04-15T15:08:04.000Z
|
pyspecnest/tests/__init__.py
|
vlas-sokolov/pyspecnest
|
b8bd7044380da3cca7d2a5323c141d4b1ca93cd1
|
[
"MIT"
] | 3
|
2019-09-18T08:17:59.000Z
|
2021-02-28T19:50:36.000Z
|
from . import blackbox
from . import spec_model_tests
from . import run_tests
| 19.5
| 30
| 0.807692
| 12
| 78
| 5
| 0.583333
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 78
| 3
| 31
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
312a8ed82b9f7859937aa3f9bd211b42ec91523e
| 69
|
py
|
Python
|
qunetsim/objects/packets/__init__.py
|
pritamsinha2304/QuNetSim
|
65a7486d532816724b5c98cfdcc0910404bfe0e2
|
[
"MIT"
] | 61
|
2020-02-15T00:59:20.000Z
|
2022-03-08T10:29:23.000Z
|
qunetsim/objects/packets/__init__.py
|
pritamsinha2304/QuNetSim
|
65a7486d532816724b5c98cfdcc0910404bfe0e2
|
[
"MIT"
] | 50
|
2020-01-28T12:18:50.000Z
|
2021-12-16T21:38:19.000Z
|
qunetsim/objects/packets/__init__.py
|
pritamsinha2304/QuNetSim
|
65a7486d532816724b5c98cfdcc0910404bfe0e2
|
[
"MIT"
] | 27
|
2020-01-21T12:59:28.000Z
|
2022-02-21T14:23:00.000Z
|
from .packet import Packet
from .routing_packet import RoutingPacket
| 23
| 41
| 0.855072
| 9
| 69
| 6.444444
| 0.555556
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 69
| 2
| 42
| 34.5
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
317a9aaa2a03b0da292b0b17543c96705af24412
| 42
|
py
|
Python
|
services/music/test/__init__.py
|
Ovakefali13/buerro
|
1476f6e708f95a09a2d73f67ae8aa2cb3bb836af
|
[
"MIT"
] | 2
|
2020-03-26T19:20:31.000Z
|
2020-03-30T13:09:07.000Z
|
services/music/test/__init__.py
|
Ovakefali13/buerro
|
1476f6e708f95a09a2d73f67ae8aa2cb3bb836af
|
[
"MIT"
] | 51
|
2020-03-05T09:04:21.000Z
|
2021-12-13T20:34:22.000Z
|
services/music/test/__init__.py
|
Ovakefali13/buerro
|
1476f6e708f95a09a2d73f67ae8aa2cb3bb836af
|
[
"MIT"
] | null | null | null |
from .test_service import MusicMockRemote
| 21
| 41
| 0.880952
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
31938a4df6acf3ead90937873988916863b194fd
| 37
|
py
|
Python
|
src/parsers/__init__.py
|
iweans/translrt
|
e96d2cf0e7f0f378b8078a8e2942669ad924af8f
|
[
"MIT"
] | 1
|
2020-02-25T01:58:34.000Z
|
2020-02-25T01:58:34.000Z
|
src/parsers/__init__.py
|
iweans/translrt
|
e96d2cf0e7f0f378b8078a8e2942669ad924af8f
|
[
"MIT"
] | null | null | null |
src/parsers/__init__.py
|
iweans/translrt
|
e96d2cf0e7f0f378b8078a8e2942669ad924af8f
|
[
"MIT"
] | null | null | null |
from .markdown import MarkdownParser
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
31af52404eb6d6c3cdc11e912028134a7ca5f718
| 169
|
py
|
Python
|
pyinstalive/__main__.py
|
pjw91/PyInstaLive
|
433bf862e2d38f9131772905ceab17343053dc3b
|
[
"MIT"
] | 234
|
2020-04-08T09:47:42.000Z
|
2022-03-29T19:12:52.000Z
|
pyinstalive/__main__.py
|
pjw91/PyInstaLive
|
433bf862e2d38f9131772905ceab17343053dc3b
|
[
"MIT"
] | 80
|
2017-10-08T08:53:09.000Z
|
2020-03-27T16:54:55.000Z
|
pyinstalive/__main__.py
|
pjw91/PyInstaLive
|
433bf862e2d38f9131772905ceab17343053dc3b
|
[
"MIT"
] | 80
|
2020-04-08T10:42:25.000Z
|
2022-03-23T03:54:14.000Z
|
try: # Python 2
from startup import run
except ImportError: # Python 3
from .startup import run
def main():
run()
if __name__ == '__main__':
run()
| 13
| 31
| 0.621302
| 22
| 169
| 4.409091
| 0.636364
| 0.226804
| 0.350515
| 0.412371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.278107
| 169
| 12
| 32
| 14.083333
| 0.778689
| 0.100592
| 0
| 0.25
| 0
| 0
| 0.053691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| true
| 0
| 0.375
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
31d8a53ae57d8ee48ceb57a8eb7d1d88b90cfc2e
| 97
|
py
|
Python
|
aegis/data/parser.py
|
Yijie-Wu/Aegis
|
f8082b66d55be135a5e2bec7ac15f860f99f7df7
|
[
"MIT"
] | null | null | null |
aegis/data/parser.py
|
Yijie-Wu/Aegis
|
f8082b66d55be135a5e2bec7ac15f860f99f7df7
|
[
"MIT"
] | null | null | null |
aegis/data/parser.py
|
Yijie-Wu/Aegis
|
f8082b66d55be135a5e2bec7ac15f860f99f7df7
|
[
"MIT"
] | null | null | null |
# -*- encoding:utf-8 -*-
"""
Author: Yijie.Wu
Email: [email protected]
Date: 2020/5/14 13:43
"""
| 13.857143
| 24
| 0.618557
| 16
| 97
| 3.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 0.134021
| 97
| 6
| 25
| 16.166667
| 0.452381
| 0.896907
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
31df6fb33a5dd05fc0d6331443a27721848ec336
| 20
|
py
|
Python
|
tiotools/__init__.py
|
twinleaf/tio-python
|
ce272d0bf3f60d7b97e41b3b7742a094a8db3f26
|
[
"MIT"
] | 9
|
2017-12-21T16:21:49.000Z
|
2021-12-02T20:48:03.000Z
|
tiotools/__init__.py
|
twinleaf/tio-python
|
ce272d0bf3f60d7b97e41b3b7742a094a8db3f26
|
[
"MIT"
] | 5
|
2018-12-14T22:06:08.000Z
|
2021-09-30T17:33:53.000Z
|
tiotools/__init__.py
|
twinleaf/tio-python
|
ce272d0bf3f60d7b97e41b3b7742a094a8db3f26
|
[
"MIT"
] | 4
|
2017-12-27T12:46:34.000Z
|
2020-06-05T17:14:34.000Z
|
from .itio import *
| 10
| 19
| 0.7
| 3
| 20
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9ef267e9db214e8479d8d79aa0625132da08722e
| 70
|
py
|
Python
|
WeatherPy/api_keys.py
|
tkabdelaziz/python-api-challenge
|
f69b70bc59a17e49130a655738d7c675c63b29b4
|
[
"ADSL"
] | null | null | null |
WeatherPy/api_keys.py
|
tkabdelaziz/python-api-challenge
|
f69b70bc59a17e49130a655738d7c675c63b29b4
|
[
"ADSL"
] | null | null | null |
WeatherPy/api_keys.py
|
tkabdelaziz/python-api-challenge
|
f69b70bc59a17e49130a655738d7c675c63b29b4
|
[
"ADSL"
] | null | null | null |
# OpenWeatherMap API Key
api_key = "e6fdf32acc353fd819e7190e6f44d47d"
| 23.333333
| 44
| 0.842857
| 6
| 70
| 9.666667
| 0.666667
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0.1
| 70
| 2
| 45
| 35
| 0.634921
| 0.314286
| 0
| 0
| 0
| 0
| 0.695652
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
73333abc61baea0ed961b2ad582ade9139ffab4a
| 45
|
py
|
Python
|
image_classification/token_labeling/tlt/models/__init__.py
|
AK391/UniFormer
|
22c6b3b98b68236dda6a8fa7152a32af1af62a20
|
[
"MIT"
] | 367
|
2022-01-14T03:32:25.000Z
|
2022-03-31T04:48:20.000Z
|
image_classification/token_labeling/tlt/models/__init__.py
|
hadlang/UniFormer
|
e8024703bffb89cb7c7d09e0d774a0d2a9f96c25
|
[
"MIT"
] | 27
|
2022-01-27T07:12:49.000Z
|
2022-03-31T04:31:13.000Z
|
image_classification/token_labeling/tlt/models/__init__.py
|
hadlang/UniFormer
|
e8024703bffb89cb7c7d09e0d774a0d2a9f96c25
|
[
"MIT"
] | 53
|
2022-01-18T11:21:43.000Z
|
2022-03-31T06:42:41.000Z
|
from .lvvit import *
from .uniformer import *
| 22.5
| 24
| 0.755556
| 6
| 45
| 5.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 24
| 22.5
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
73505118824c9e00c59e8a0d453bcc5313ec713c
| 38
|
py
|
Python
|
examples/ehdemov3/living_room_rgb/user_boot.py
|
ulno/iot-devkit
|
6e90c1c207f23c4b5bf374f58d3701550e6c70ca
|
[
"MIT"
] | null | null | null |
examples/ehdemov3/living_room_rgb/user_boot.py
|
ulno/iot-devkit
|
6e90c1c207f23c4b5bf374f58d3701550e6c70ca
|
[
"MIT"
] | null | null | null |
examples/ehdemov3/living_room_rgb/user_boot.py
|
ulno/iot-devkit
|
6e90c1c207f23c4b5bf374f58d3701550e6c70ca
|
[
"MIT"
] | 1
|
2020-07-23T03:03:38.000Z
|
2020-07-23T03:03:38.000Z
|
import rgb_handler
rgb_handler.run()
| 9.5
| 18
| 0.815789
| 6
| 38
| 4.833333
| 0.666667
| 0.689655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 3
| 19
| 12.666667
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b40131cc0f7760cd11fcf2ba0595a918a3c845c6
| 25
|
py
|
Python
|
mercs/core/__init__.py
|
systemallica/mercs
|
39e999620ab989abb29310488dcd30354d029490
|
[
"MIT"
] | 11
|
2020-01-28T16:15:53.000Z
|
2021-05-20T08:05:42.000Z
|
mercs/core/__init__.py
|
systemallica/mercs
|
39e999620ab989abb29310488dcd30354d029490
|
[
"MIT"
] | null | null | null |
mercs/core/__init__.py
|
systemallica/mercs
|
39e999620ab989abb29310488dcd30354d029490
|
[
"MIT"
] | 4
|
2020-02-06T09:02:28.000Z
|
2022-02-14T09:42:04.000Z
|
from .Mercs import Mercs
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b40a09413f8b62366473c29f262ace0bbb8cfb88
| 31,345
|
py
|
Python
|
tests/python_venv/test_env_pyenv.py
|
jmknoble/python-venv
|
698aff4341c358b0e1469c845398f275a3df1cb8
|
[
"MIT"
] | 1
|
2021-06-04T15:24:45.000Z
|
2021-06-04T15:24:45.000Z
|
tests/python_venv/test_env_pyenv.py
|
jmknoble/python-venv
|
698aff4341c358b0e1469c845398f275a3df1cb8
|
[
"MIT"
] | 6
|
2021-06-04T15:06:31.000Z
|
2021-09-24T06:04:45.000Z
|
tests/python_venv/test_env_pyenv.py
|
jmknoble/python-venv
|
698aff4341c358b0e1469c845398f275a3df1cb8
|
[
"MIT"
] | null | null | null |
"""Provide unit tests for `~python_venv.env`:py:mod:."""
import os
import os.path
import random
import subprocess
import unittest
import parameterized # https://pypi.org/project/parameterized/
from python_venv import const, env
from python_venv import exceptions as exc
from python_venv import reqs
from tests.python_venv import contextmgr as ctx
from tests.python_venv import flags
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_200_PyenvEnvironment(unittest.TestCase):
def setUp(self):
self.saved_requirements = reqs.REQUIREMENTS
def tearDown(self):
reqs.REQUIREMENTS = self.saved_requirements
def test_PV_ENV_PYNV_000_instantiate_empty(self):
with self.assertRaises(TypeError) as raised:
env.PyenvEnvironment()
msg = raised.exception.args[0]
self.assertTrue(
msg.startswith("__init__() missing 1 required positional argument")
)
@parameterized.parameterized.expand(
[
("dry_run", {"dry_run": True}, "dry_run", True),
("force", {"force": True}, "force", True),
(
"message_prefix",
{"message_prefix": "dummy_message_prefix"},
"message_prefix",
"dummy_message_prefix",
),
("python", {"python": "dummy_python"}, "python", "dummy_python"),
("basename", {"basename": "dummy_basename"}, "_basename", "dummy_basename"),
("env_name", {"env_name": "dummy_env_name"}, "_env_name", "dummy_env_name"),
(
"env_prefix",
{"env_prefix": "dummy_env_prefix"},
"_env_prefix",
"dummy_env_prefix",
),
]
)
def test_PV_ENV_PYNV_002_instantiate_kwargs(self, name, kwargs, attr, value):
x = env.PyenvEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(getattr(x, attr), value)
def test_PV_ENV_PYNV_010_requirements(self):
dummy_requirements = {"dummy_req_source": ["dummy_requirement"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.PyenvEnvironment("dummy_req_scheme")
self.assertListEqual(x.requirements.requirements, [dummy_requirements])
def test_PV_ENV_PYNV_020_package_name(self):
x = env.PyenvEnvironment("dummy_req_scheme")
self.assertEqual(x.package_name, "python_venv")
@parameterized.parameterized.expand(
[
("default", None, "python-venv"),
("specified", "dummy-package", "dummy-package"),
]
)
def test_PV_ENV_PYNV_030_basename(self, name, basename, expected):
kwargs = {} if basename is None else {"basename": basename}
x = env.PyenvEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(x.basename, expected)
@parameterized.parameterized.expand(
[
("default", reqs.REQ_SCHEME_PLAIN, {}, "python-venv"),
("default_dev", reqs.REQ_SCHEME_DEV, {}, "python-venv-dev"),
("default_devplus", reqs.REQ_SCHEME_DEVPLUS, {}, "python-venv-dev"),
(
"default_prefix",
reqs.REQ_SCHEME_PLAIN,
{"env_prefix": "dummy-prefix-"},
"dummy-prefix-python-venv",
),
(
"basename",
reqs.REQ_SCHEME_PLAIN,
{"basename": "dummy-package"},
"dummy-package",
),
(
"basename_dev",
reqs.REQ_SCHEME_DEV,
{"basename": "dummy-package"},
"dummy-package-dev",
),
(
"basename_devplus",
reqs.REQ_SCHEME_DEVPLUS,
{"basename": "dummy-package"},
"dummy-package-dev",
),
(
"basename_prefix",
reqs.REQ_SCHEME_PLAIN,
{"basename": "dummy-package", "env_prefix": "dummy-prefix-"},
"dummy-prefix-dummy-package",
),
("specified", "dummy_req_scheme", {"env_name": "dummy-env"}, "dummy-env"),
(
"specified_prefix",
"dummy_req_scheme",
{"env_name": "dummy-env", "env_prefix": "dummy-prefix-"},
"dummy-env",
),
]
)
def test_PV_ENV_PYNV_040_env_name(self, name, req_scheme, kwargs, expected):
x = env.PyenvEnvironment(req_scheme, **kwargs)
self.assertEqual(x.env_name, expected)
@parameterized.parameterized.expand(
[
("default", "dummy-basename", None, None, "<ENV_DIR>"),
("specified", None, "dummy-env", None, "<ENV_DIR>"),
("with_prefix", "dummy-basename", None, "dummy-prefix", "<ENV_DIR>"),
(
"specified_with_prefix",
"dummy-basename",
"dummy-env",
"dummy-prefix",
"<ENV_DIR>",
),
]
)
def test_PV_ENV_PYNV_050_env_dir_dry_run(
self, name, basename, env_name, env_prefix, expected
):
kwargs = {}
if basename is not None:
kwargs["basename"] = basename
if env_name is not None:
kwargs["env_name"] = env_name
if env_prefix is not None:
kwargs["env_prefix"] = env_prefix
x = env.PyenvEnvironment(reqs.REQ_SCHEME_PLAIN, dry_run=True, **kwargs)
self.assertEqual(x.env_dir, expected)
@parameterized.parameterized.expand(
[
(
"default",
"dummy-basename",
None,
None,
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
(
"specified",
None,
"dummy-env",
None,
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
(
"with_prefix",
"dummy-basename",
None,
"dummy-prefix",
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
(
"specified_with_prefix",
"dummy-basename",
"dummy-env",
"dummy-prefix",
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
]
)
def test_PV_ENV_PYNV_051_abs_env_dir_dry_run(
self, name, basename, env_name, env_prefix, expected
):
kwargs = {}
if basename is not None:
kwargs["basename"] = basename
if env_name is not None:
kwargs["env_name"] = env_name
if env_prefix is not None:
kwargs["env_prefix"] = env_prefix
x = env.PyenvEnvironment(reqs.REQ_SCHEME_PLAIN, dry_run=True, **kwargs)
self.assertEqual(x.abs_env_dir, expected)
@parameterized.parameterized.expand(
[
("specified", "dummy-env", "dummy-env"),
]
)
def test_PV_ENV_PYNV_060_env_description(self, name, env_name, expected):
kwargs = {} if env_name is None else {"env_name": env_name}
x = env.PyenvEnvironment("dummy_req_scheme", **kwargs)
x.env_description
self.assertTrue(x.env_description.endswith(expected))
@parameterized.parameterized.expand(
[
("dry_run_text", {}, "[DRY-RUN]"),
("create_msg", {}, "Creating pyenv environment dummy-package"),
("create_venv", {}, "+ pyenv virtualenv"),
("install_msg", {}, "Installing dummy_req_scheme requirements"),
(
"pip_install",
{},
"+ <ENV_DIR>/bin/python3 -m pip install -r dummy_requirements.txt",
),
("success", {}, "==> Done."),
]
)
def test_PV_ENV_PYNV_100_create_dry_run(self, name, kwargs, expected_text):
dummy_requirements = {const.FROM_FILES: ["dummy_requirements.txt"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.PyenvEnvironment(
"dummy_req_scheme",
dry_run=True,
basename="dummy-package",
ignore_preflight_checks=True,
**kwargs,
)
with ctx.capture(x.create) as (
status,
_stdout,
stderr,
):
self.assertTrue(expected_text in stderr)
@parameterized.parameterized.expand(
[
("dry_run_text", "[DRY-RUN]"),
("remove_msg", "Removing pyenv environment dummy-package"),
]
)
def test_PV_ENV_PYNV_200_remove_dry_run(self, name, expected_text):
x = env.PyenvEnvironment(
reqs.REQ_SCHEME_PLAIN, dry_run=True, basename="dummy-package"
)
with ctx.capture(x.remove) as (status, _stdout, stderr):
self.assertTrue(expected_text in stderr)
@parameterized.parameterized.expand(
[
("dry_run_text", "[DRY-RUN]"),
("replace_msg", "Replacing pyenv environment dummy-package"),
("remove_msg", "Removing pyenv environment dummy-package"),
("create_msg", "Creating pyenv environment dummy-package"),
("success", "==> Done."),
]
)
def test_PV_ENV_PYNV_300_replace_dry_run(self, name, expected_text):
dummy_requirements = {const.FROM_FILES: ["dummy_requirements.txt"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.PyenvEnvironment(
"dummy_req_scheme",
dry_run=True,
basename="dummy-package",
ignore_preflight_checks=True,
)
with ctx.capture(x.replace) as (status, _stdout, stderr):
self.assertTrue(expected_text in stderr)
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_210_PyenvCreate(unittest.TestCase):
def setUp(self):
self.env_name = None
try:
self.choices
except AttributeError:
self.choices = (
[chr(x) for x in range(ord("0"), ord("9") + 1)]
+ [chr(x) for x in range(ord("A"), ord("Z") + 1)]
+ [chr(x) for x in range(ord("a"), ord("z") + 1)]
)
# Random prefix for environments is required
# since pyenv virtualenv doesn't give us a choice
# to place an environment somewhere specific.
self.env_prefix = "".join(random.choice(self.choices) for x in range(10)) + "-"
def tearDown(self):
if self.env_name is not None:
# remove pyenv virtual environment
subprocess.call(
["pyenv", "virtualenv-delete", "-f", self.env_name],
stderr=subprocess.DEVNULL,
)
self.env_name = None
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_110_create(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
self.env_name = x.env_name
if not flags.should_suppress_output():
x.create()
else:
original_stderr = None
with ctx.capture_to_file(x.create) as (
_status,
_stdout,
stderr,
):
original_stderr = stderr
testable_stderr = original_stderr.lower()
if "error" in testable_stderr:
print(original_stderr, file=stderr)
self.assertNotIn("error", testable_stderr)
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None),
("dev", reqs.REQ_SCHEME_DEV, False, None, None),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None),
]
)
def test_PV_ENV_PYNV_120_create_missing_reqs(
self, name, req_scheme, dry_run, basename, env_name
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
with ctx.project("dummy_package", dirs=dirs):
x = env.PyenvEnvironment(
req_scheme,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
self.env_name = x.env_name
with self.assertRaises(exc.MissingRequirementsError):
if not flags.should_suppress_output():
x.create()
else:
with ctx.capture_to_file(x.create) as (
_status,
_stdout,
_stderr,
):
pass
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, True),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, True),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
"dummy-env",
True,
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, "dummy-env", True),
]
)
def test_PV_ENV_PYNV_130_create_duplicate(
self, name, req_scheme, dry_run, env_name, should_raise
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
env_name=env_name,
env_prefix=env_prefix,
dry_run=False,
force=True,
)
self.env_name = x.env_name
if not flags.should_suppress_output():
x.create()
else:
with ctx.capture_to_file(x.create) as (_status, _stdout, _stderr):
pass
x = env.PyenvEnvironment(
req_scheme,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
if should_raise:
with self.assertRaises(exc.EnvExistsError):
if not flags.should_suppress_output():
x.create()
else:
with ctx.capture_to_file(x.create) as (
_status,
_stdout,
_stderr,
):
pass
else:
if not flags.should_suppress_output():
x.create()
else:
original_stderr = None
with ctx.capture_to_file(x.create) as (_status, _stdout, stderr):
original_stderr = stderr
testable_stderr = original_stderr.lower()
if "error" in testable_stderr:
print(original_stderr, file=stderr)
self.assertNotIn("error", testable_stderr)
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_220_PyenvRemove(unittest.TestCase):
def setUp(self):
self.env_name = None
try:
self.choices
except AttributeError:
self.choices = (
[chr(x) for x in range(ord("0"), ord("9") + 1)]
+ [chr(x) for x in range(ord("A"), ord("Z") + 1)]
+ [chr(x) for x in range(ord("a"), ord("z") + 1)]
)
# Random prefix for environments is required
# since pyenv virtualenv doesn't give us a choice
# to place an environment somewhere specific.
self.env_prefix = "".join(random.choice(self.choices) for x in range(10)) + "-"
def tearDown(self):
if self.env_name is not None:
# remove pyenv virtual environment
subprocess.call(
["pyenv", "virtualenv-delete", "-f", self.env_name],
stderr=subprocess.DEVNULL,
)
self.env_name = None
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_210_remove(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
y = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=False,
force=True,
)
self.env_name = y.env_name
if not flags.should_suppress_output():
x.remove() # remove non-existent
y.create()
x.remove() # remove existing
else:
original_stderrs = []
with ctx.capture_to_file(x.remove) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
with ctx.capture_to_file(y.create) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
with ctx.capture_to_file(x.remove) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
testable_stderrs = [text.lower() for text in original_stderrs]
for (i, text) in enumerate(testable_stderrs):
if "error" in text:
print(original_stderrs[i], file=stderr)
self.assertNotIn("error", text)
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_230_PyenvReplace(unittest.TestCase):
def setUp(self):
self.env_name = None
try:
self.choices
except AttributeError:
self.choices = (
[chr(x) for x in range(ord("0"), ord("9") + 1)]
+ [chr(x) for x in range(ord("A"), ord("Z") + 1)]
+ [chr(x) for x in range(ord("a"), ord("z") + 1)]
)
# Random prefix for environments is required
# since pyenv virtualenv doesn't give us a choice
# to place an environment somewhere specific.
self.env_prefix = "".join(random.choice(self.choices) for x in range(10)) + "-"
def tearDown(self):
if self.env_name is not None:
# remove pyenv virtual environment
subprocess.call(
["pyenv", "virtualenv-delete", "-f", self.env_name],
stderr=subprocess.DEVNULL,
)
self.env_name = None
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_310_replace_nonexistent(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
self.env_name = x.env_name
if not flags.should_suppress_output():
x.replace()
else:
original_stderrs = []
with ctx.capture_to_file(x.replace) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
testable_stderrs = [text.lower() for text in original_stderrs]
for (i, text) in enumerate(testable_stderrs):
if "error" in text:
print(original_stderrs[i], file=stderr)
self.assertNotIn("error", text)
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_320_replace_existing(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
y = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=False,
force=True,
)
self.env_name = y.env_name
if not flags.should_suppress_output():
y.create()
x.replace()
else:
original_stderrs = []
with ctx.capture_to_file(y.create) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
with ctx.capture_to_file(x.replace) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
testable_stderrs = [text.lower() for text in original_stderrs]
for (i, text) in enumerate(testable_stderrs):
if "error" in text:
print(original_stderrs[i], file=stderr)
self.assertNotIn("error", text)
| 40.289203
| 88
| 0.53077
| 3,253
| 31,345
| 4.838918
| 0.065785
| 0.071469
| 0.078458
| 0.030557
| 0.87504
| 0.84677
| 0.825233
| 0.767931
| 0.756115
| 0.728543
| 0
| 0.005346
| 0.3435
| 31,345
| 777
| 89
| 40.341055
| 0.759598
| 0.020131
| 0
| 0.7
| 0
| 0
| 0.145248
| 0.020272
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.036111
| false
| 0.004167
| 0.015278
| 0
| 0.056944
| 0.006944
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b41a0470134fb1e8ea681d8bc3db3889104e143a
| 12,319
|
py
|
Python
|
tests/test_radials.py
|
rucool/HFRadarPy
|
4b81d21da2f7c732e5377689df7bb328bfb37e37
|
[
"MIT"
] | 1
|
2022-03-15T13:58:00.000Z
|
2022-03-15T13:58:00.000Z
|
tests/test_radials.py
|
rucool/HFRadarPy
|
4b81d21da2f7c732e5377689df7bb328bfb37e37
|
[
"MIT"
] | 1
|
2021-12-03T00:36:41.000Z
|
2021-12-03T00:36:41.000Z
|
tests/test_radials.py
|
rucool/HFRadarPy
|
4b81d21da2f7c732e5377689df7bb328bfb37e37
|
[
"MIT"
] | 2
|
2020-11-17T17:06:34.000Z
|
2022-03-04T01:26:58.000Z
|
import unittest
from pathlib import Path
import numpy as np
import xarray as xr
from hfradarpy.radials import Radial
from hfradarpy.radials import concat as concatenate_radials
data_path = (Path(__file__).parent.with_name('examples') / 'data').resolve()
output_path = (Path(__file__).parent.with_name('examples') / 'output').resolve()
def test_codar_radial_to_tabular_netcdf():
radial_file = data_path / 'radials' / 'ruv' / 'SEAB' / 'RDLi_SEAB_2019_01_01_0000.ruv'
nc_file = output_path / 'radials' / 'nc' / 'tabular' / 'SEAB' / 'RDLi_SEAB_2019_01_01_0000.nc'
# Converts the underlying .data (natively a pandas DataFrame)
# to an xarray object when `create_netcdf` is called.
# This automatically 'enhances' the netCDF file
# with better variable names and attributes.
rad1 = Radial(radial_file)
rad1.export(str(nc_file), file_type='netcdf-tabular')
# Convert it to an xarray Dataset with no variable
# or attribte enhancements
xds2 = rad1.to_xarray_tabular(enhance=False)
# Convert it to xarray Dataset with increased usability
# by changing variables names, adding attributes,
# and decoding the CF standards like scale_factor
xds3 = rad1.to_xarray_tabular(enhance=True)
with xr.open_dataset(nc_file) as xds1:
# The two enhanced files should be identical
assert xds1.identical(xds3)
# Enhanced and non-enhanced files should not
# be equal
assert not xds1.identical(xds2)
def test_codar_radial_to_multidimensional_netcdf():
radial_file = data_path / 'radials' / 'ruv' / 'SEAB' / 'RDLi_SEAB_2019_01_01_0000.ruv'
nc_file = output_path / 'radials' / 'nc' / 'multidimensional' / 'SEAB' / 'RDLi_SEAB_2019_01_01_0000.nc'
# Converts the underlying .data (natively a pandas DataFrame)
# to an xarray object when `create_netcdf` is called.
# This automatically 'enhances' the netCDF file
# with better variable names and attributes.
rad1 = Radial(radial_file)
rad1.export(str(nc_file), file_type='netcdf-multidimensional')
# Convert it to an xarray Dataset with no variable
# or attribte enhancements
xds2 = rad1.to_xarray_multidimensional(enhance=False)
# Convert it to xarray Dataset with increased usability
# by changing variables names, adding attributes,
# and decoding the CF standards like scale_factor
xds3 = rad1.to_xarray_multidimensional(enhance=True)
with xr.open_dataset(nc_file) as xds1:
# The two enhanced files should be identical
assert xds1.identical(xds3)
# Enhanced and non-enhanced files should not
# be equal
assert not xds1.identical(xds2)
def test_wera_radial_to_tabular_netcdf():
radial_file = data_path / 'radials' / 'ruv' / 'WERA' / 'RDL_csw_2019_10_24_162300.ruv'
nc_file = output_path / 'radials' / 'nc' / 'tabular' / 'WERA' / 'RDL_csw_2019_10_24_162300.nc'
# Converts the underlying .data (natively a pandas DataFrame)
# to an xarray object when `create_netcdf` is called.
# This automatically 'enhances' the netCDF file
# with better variable names and attributes.
rad1 = Radial(radial_file)
rad1.export(str(nc_file), file_type='netcdf-tabular')
# Convert it to an xarray Dataset with no variable
# or attribte enhancements
xds2 = rad1.to_xarray_tabular(enhance=False)
# Convert it to xarray Dataset with increased usability
# by changing variables names, adding attributes,
# and decoding the CF standards like scale_factor
xds3 = rad1.to_xarray_tabular(enhance=True)
with xr.open_dataset(nc_file) as xds1:
# The two enhanced files should be identical
assert xds1.identical(xds3)
# Enhanced and non-enhanced files should not
# be equal
assert not xds1.identical(xds2)
def test_wera_radial_to_multidimensional_netcdf():
radial_file = data_path / 'radials' / 'ruv' / 'WERA' / 'RDL_csw_2019_10_24_162300.ruv'
nc_file = output_path / 'radials' / 'nc' / 'multidimensional' / 'WERA' / 'RDL_csw_2019_10_24_162300.nc'
# Converts the underlying .data (natively a pandas DataFrame)
# to an xarray object when `create_netcdf` is called.
# This automatically 'enhances' the netCDF file
# with better variable names and attributes.
rad1 = Radial(radial_file)
rad1.export(str(nc_file), file_type='netcdf-multidimensional')
# Convert it to an xarray Dataset with no variable
# or attribte enhancements
xds2 = rad1.to_xarray_multidimensional(enhance=False)
# Convert it to xarray Dataset with increased usability
# by changing variables names, adding attributes,
# and decoding the CF standards like scale_factor
xds3 = rad1.to_xarray_multidimensional(enhance=True)
with xr.open_dataset(nc_file) as xds1:
# The two enhanced files should be identical
assert xds1.identical(xds3)
# Enhanced and non-enhanced files should not
# be equal
assert not xds1.identical(xds2)
def test_wera_mask():
radial_file = data_path / 'radials' / 'ruv' / 'WERA' / 'RDL_csw_2019_10_24_162300.ruv'
rad1 = Radial(radial_file, mask_over_land=False, replace_invalid=False)
# Total points before masking
assert len(rad1.data) == 6327
rad1.mask_over_land()
# Make sure we subset the land points
assert len(rad1.data) == 5745
def test_wera_qc():
radial_file = data_path / 'radials' / 'ruv' / 'WERA' / 'RDL_csw_2019_10_24_162300.ruv'
rad1 = Radial(radial_file, mask_over_land=False, replace_invalid=False)
rad1.initialize_qc()
assert len(rad1.data) == 6327
rad1.mask_over_land()
rad1.qc_qartod_radial_count()
rad1.qc_qartod_valid_location()
rad1.qc_qartod_maximum_velocity()
rad1.qc_qartod_spatial_median()
rad1.qc_qartod_avg_radial_bearing(reference_bearing=180)
rad1.qc_qartod_primary_flag()
assert len(rad1.data) == 5745
assert 'QC07' in rad1.data
assert 'QC08' not in rad1.data # no VFLG column so we can't run it
assert 'QC09' in rad1.data
assert 'QC10' in rad1.data
# assert 'QC11' in rad1.data # temporal gradient test
assert 'QC12' in rad1.data
assert 'PRIM' in rad1.data
def test_wera_raw_to_quality_multidimensional_nc():
radial_file = data_path / 'radials' / 'ruv' / 'WERA' / 'RDL_csw_2019_10_24_162300.ruv'
nc_file = output_path / 'radials' / 'qc' / 'nc' / 'multidimensional' / 'WERA' / 'RDL_csw_2019_10_24_162300.nc'
rad1 = Radial(radial_file, mask_over_land=False, replace_invalid=False)
rad1.initialize_qc()
rad1.mask_over_land()
rad1.qc_qartod_radial_count()
rad1.qc_qartod_valid_location()
rad1.qc_qartod_maximum_velocity()
rad1.qc_qartod_spatial_median()
rad1.export(str(nc_file), file_type='netcdf-multidimensional')
xds2 = rad1.to_xarray_multidimensional(enhance=True)
with xr.open_dataset(nc_file) as xds1:
assert len(xds1.QCTest) == 3 # no VFLG column so one test not run
# The two enhanced files should be identical
assert xds1.identical(xds2)
def test_wera_raw_to_quality_tabular_nc():
radial_file = data_path / 'radials' / 'ruv' / 'WERA' / 'RDL_csw_2019_10_24_162300.ruv'
nc_file = output_path / 'radials' / 'qc' / 'nc' / 'tabular' / 'WERA' / 'RDL_csw_2019_10_24_162300.nc'
rad1 = Radial(radial_file, mask_over_land=False, replace_invalid=False)
rad1.initialize_qc()
rad1.mask_over_land()
rad1.qc_qartod_radial_count()
rad1.qc_qartod_valid_location()
rad1.qc_qartod_maximum_velocity()
rad1.qc_qartod_spatial_median()
rad1.export(str(nc_file), file_type='netcdf-tabular')
xds2 = rad1.to_xarray_tabular(enhance=True)
with xr.open_dataset(nc_file) as xds1:
assert len(xds1.QCTest) == 3 # no VFLG column so one test not run
# The two enhanced files should be identical
assert xds1.identical(xds2)
def test_miami_radial_multidimensional_nc():
radial_file = data_path / 'radials' / 'ruv' / 'WERA' / 'RDL_UMiami_STF_2019_06_01_0000.hfrweralluv1.0'
nc_file = output_path / 'radials' / 'nc' / 'multidimensional' / 'WERA' / 'RDL_UMiami_STF_2019_06_01_0000.nc'
# Converts the underlying .data (natively a pandas DataFrame)
# to an xarray object when `create_netcdf` is called.
# This automatically 'enhances' the netCDF file
# with better variable names and attributes.
rad1 = Radial(radial_file)
rad1.export(str(nc_file), file_type='netcdf-multidimensional')
# Convert it to an xarray Dataset with no variable
# or attribte enhancements
xds2 = rad1.to_xarray_multidimensional(enhance=False)
# Convert it to xarray Dataset with increased usability
# by changing variables names, adding attributes,
# and decoding the CF standards like scale_factor
xds3 = rad1.to_xarray_multidimensional(enhance=True)
with xr.open_dataset(nc_file) as xds1:
# The two enhanced files should be identical
assert xds1.identical(xds3)
# Enhanced and non-enhanced files should not
# be equal
assert not xds1.identical(xds2)
def test_miami_radial_tabular_nc():
radial_file = data_path / 'radials' / 'ruv' / 'WERA' / 'RDL_UMiami_STF_2019_06_01_0000.hfrweralluv1.0'
nc_file = output_path / 'radials' / 'nc' / 'tabular' / 'WERA' / 'RDL_UMiami_STF_2019_06_01_0000.nc'
# Converts the underlying .data (natively a pandas DataFrame)
# to an xarray object when `create_netcdf` is called.
# This automatically 'enhances' the netCDF file
# with better variable names and attributes.
rad1 = Radial(radial_file)
rad1.export(str(nc_file), file_type='netcdf-tabular')
# Convert it to an xarray Dataset with no variable
# or attribte enhancements
xds2 = rad1.to_xarray_tabular(enhance=False)
# Convert it to xarray Dataset with increased usability
# by changing variables names, adding attributes,
# and decoding the CF standards like scale_factor
xds3 = rad1.to_xarray_tabular(enhance=True)
with xr.open_dataset(nc_file) as xds1:
# The two enhanced files should be identical
assert xds1.identical(xds3)
# Enhanced and non-enhanced files should not
# be equal
assert not xds1.identical(xds2)
class TestCombineRadials(unittest.TestCase):
def setUp(self):
self.file_paths = list(
(data_path / 'radials' / 'ruv' / 'SEAB').glob('*.ruv')
)
self.radial_files = [
str(r) for r in self.file_paths
]
self.radial_objects = [
Radial(str(r)) for r in self.radial_files
]
# Select even indexed file_paths and odd indexed radial objects
# into one array of mixed content types for concating
self.radial_mixed = self.radial_files[::2] + self.radial_objects[1:][::2]
def test_concat_radial_objects(self):
combined = concatenate_radials(self.radial_objects)
assert combined.time.size == len(self.file_paths)
# Make sure the dataset was sorted by time
assert np.array_equal(
combined.time.values,
np.sort(combined.time.values)
)
def test_concat_radial_files(self):
combined = concatenate_radials(self.radial_files)
assert combined.time.size == len(self.file_paths)
# Make sure the dataset was sorted by time
assert np.array_equal(
combined.time.values,
np.sort(combined.time.values)
)
def test_concat_mixed_radials(self):
combined = concatenate_radials(self.radial_mixed)
assert combined.time.size == len(self.file_paths)
# Make sure the dataset was sorted by time
assert np.array_equal(
combined.time.values,
np.sort(combined.time.values)
)
def test_concat_mixed_radials_enhance(self):
# Select even indexed file_paths and odd indexed radial objects
# into one array of mixed content types for concating
combined = concatenate_radials(self.radial_mixed, enhance=True)
assert combined.time.size == len(self.file_paths)
# Make sure the dataset was sorted by time
assert np.array_equal(
combined.time.values,
np.sort(combined.time.values)
)
| 38.984177
| 114
| 0.701924
| 1,689
| 12,319
| 4.89698
| 0.114861
| 0.01741
| 0.020312
| 0.023939
| 0.907871
| 0.898078
| 0.86894
| 0.858058
| 0.855277
| 0.842703
| 0
| 0.040383
| 0.21203
| 12,319
| 316
| 115
| 38.984177
| 0.811682
| 0.309847
| 0
| 0.631902
| 0
| 0
| 0.132295
| 0.077024
| 0
| 0
| 0
| 0
| 0.208589
| 1
| 0.092025
| false
| 0
| 0.03681
| 0
| 0.134969
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b41a6fb1230ffb4cb7b7dd42943c11f7da6ced76
| 141
|
py
|
Python
|
scippycrm/organisations/__init__.py
|
realScipio/scippycrm
|
f267b82f07e2784e97089b0b9a35024d58fd4a60
|
[
"MIT"
] | 14
|
2018-07-12T19:08:28.000Z
|
2021-10-16T23:46:10.000Z
|
scippycrm/organisations/__init__.py
|
Zenahr/scippycrm
|
f267b82f07e2784e97089b0b9a35024d58fd4a60
|
[
"MIT"
] | 1
|
2020-02-12T00:41:34.000Z
|
2020-02-12T00:41:34.000Z
|
scippycrm/organisations/__init__.py
|
Zenahr/scippycrm
|
f267b82f07e2784e97089b0b9a35024d58fd4a60
|
[
"MIT"
] | 5
|
2020-01-08T18:26:47.000Z
|
2022-03-10T06:51:07.000Z
|
from flask import Blueprint
organisations_blueprint = Blueprint('organisations', __name__, template_folder='templates')
from . import routes
| 35.25
| 91
| 0.829787
| 15
| 141
| 7.4
| 0.666667
| 0.396396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092199
| 141
| 4
| 92
| 35.25
| 0.867188
| 0
| 0
| 0
| 0
| 0
| 0.15493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
b42c1a79c337482386b3d089c413a5fc4a4a523e
| 4,092
|
py
|
Python
|
tests/safe/test_config_parser.py
|
ddiss/rtslib
|
6fd0bbfc20947143eb2e4c3bfd34c65bf8551468
|
[
"Apache-2.0"
] | 21
|
2015-04-02T21:44:26.000Z
|
2020-03-30T12:43:02.000Z
|
tests/safe/test_config_parser.py
|
ddiss/rtslib
|
6fd0bbfc20947143eb2e4c3bfd34c65bf8551468
|
[
"Apache-2.0"
] | 17
|
2015-06-23T09:04:00.000Z
|
2020-01-04T19:31:34.000Z
|
tests/safe/test_config_parser.py
|
ddiss/rtslib
|
6fd0bbfc20947143eb2e4c3bfd34c65bf8551468
|
[
"Apache-2.0"
] | 18
|
2015-06-18T14:29:43.000Z
|
2021-03-25T19:51:18.000Z
|
import sys, pprint, logging, unittest, cPickle
from rtslib import config_parser
# TODO Add PolicyParser tests
logging.basicConfig()
log = logging.getLogger('TestConfigParser')
log.setLevel(logging.INFO)
class TestConfigParser(unittest.TestCase):
parser = config_parser.ConfigParser()
samples_dir = '../data'
def test_one_line(self):
print
log.info(self._testMethodName)
config = "%s/config_one_line.lio" % self.samples_dir
parse_tree = self.parser.parse_file(config)
for statement in parse_tree:
log.debug(pprint.pformat(statement))
# with open("%s.ast" % config[:-4], 'w') as f:
# cPickle.dump(parse_tree, f)
with open("%s.ast" % config[:-4], 'r') as f:
expected_tree = cPickle.load(f)
self.failUnless(parse_tree == expected_tree)
def test_basic(self):
print
log.info(self._testMethodName)
config = "%s/config_basic.lio" % self.samples_dir
parse_tree = self.parser.parse_file(config)
for statement in parse_tree:
log.debug(pprint.pformat(statement))
# with open("%s.ast" % config[:-4], 'w') as f:
# cPickle.dump(parse_tree, f)
with open("%s.ast" % config[:-4], 'r') as f:
expected_tree = cPickle.load(f)
self.failUnless(parse_tree == expected_tree)
def test_attribute_group(self):
print
log.info(self._testMethodName)
config = "%s/config_attribute_group.lio" % self.samples_dir
parse_tree = self.parser.parse_file(config)
for statement in parse_tree:
log.debug(pprint.pformat(statement))
# with open("%s.ast" % config[:-4], 'w') as f:
# cPickle.dump(parse_tree, f)
with open("%s.ast" % config[:-4], 'r') as f:
expected_tree = cPickle.load(f)
self.failUnless(parse_tree == expected_tree)
def test_nested_blocks(self):
print
log.info(self._testMethodName)
config = "%s/config_nested_blocks.lio" % self.samples_dir
parse_tree = self.parser.parse_file(config)
for statement in parse_tree:
log.debug(pprint.pformat(statement))
# with open("%s.ast" % config[:-4], 'w') as f:
# cPickle.dump(parse_tree, f)
with open("%s.ast" % config[:-4], 'r') as f:
expected_tree = cPickle.load(f)
self.failUnless(parse_tree == expected_tree)
def test_comments(self):
print
log.info(self._testMethodName)
config = "%s/config_comments.lio" % self.samples_dir
parse_tree = self.parser.parse_file(config)
for statement in parse_tree:
log.debug(pprint.pformat(statement))
# with open("%s.ast" % config[:-4], 'w') as f:
# cPickle.dump(parse_tree, f)
with open("%s.ast" % config[:-4], 'r') as f:
expected_tree = cPickle.load(f)
self.failUnless(parse_tree == expected_tree)
def test_strings(self):
print
log.info(self._testMethodName)
config = "%s/config_strings.lio" % self.samples_dir
parse_tree = self.parser.parse_file(config)
for statement in parse_tree:
log.debug(pprint.pformat(statement))
# with open("%s.ast" % config[:-4], 'w') as f:
# cPickle.dump(parse_tree, f)
with open("%s.ast" % config[:-4], 'r') as f:
expected_tree = cPickle.load(f)
self.failUnless(parse_tree == expected_tree)
def test_complete(self):
print
log.info(self._testMethodName)
config = "%s/config_complete.lio" % self.samples_dir
parse_tree = self.parser.parse_file(config)
for statement in parse_tree:
log.debug(pprint.pformat(statement))
# with open("%s.ast" % config[:-4], 'w') as f:
# cPickle.dump(parse_tree, f)
with open("%s.ast" % config[:-4], 'r') as f:
expected_tree = cPickle.load(f)
self.failUnless(parse_tree == expected_tree)
if __name__ == '__main__':
unittest.main()
| 37.541284
| 67
| 0.603861
| 523
| 4,092
| 4.544933
| 0.126195
| 0.106016
| 0.053008
| 0.070677
| 0.833403
| 0.833403
| 0.833403
| 0.833403
| 0.833403
| 0.694994
| 0
| 0.004643
| 0.263196
| 4,092
| 108
| 68
| 37.888889
| 0.783748
| 0.138319
| 0
| 0.7
| 0
| 0
| 0.068946
| 0.040741
| 0
| 0
| 0
| 0.009259
| 0
| 1
| 0.0875
| false
| 0
| 0.025
| 0
| 0.15
| 0.1875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b42e18509f9f253682f3bbecf10ca37a07186740
| 305
|
py
|
Python
|
bdd/scenarios.py
|
tinytoon1/python
|
cc320fddea962fec97eb928e2c4ebbd5bad3ed43
|
[
"Apache-2.0"
] | null | null | null |
bdd/scenarios.py
|
tinytoon1/python
|
cc320fddea962fec97eb928e2c4ebbd5bad3ed43
|
[
"Apache-2.0"
] | null | null | null |
bdd/scenarios.py
|
tinytoon1/python
|
cc320fddea962fec97eb928e2c4ebbd5bad3ed43
|
[
"Apache-2.0"
] | null | null | null |
from pytest_bdd import scenario
from .steps import *
@scenario('contacts.feature', 'add contact')
def test_add_contact():
pass
@scenario('contacts.feature', 'delete contact')
def test_delete_contact():
pass
@scenario('contacts.feature', 'update contact')
def test_update_contact():
pass
| 16.944444
| 47
| 0.731148
| 38
| 305
| 5.684211
| 0.394737
| 0.222222
| 0.319444
| 0.25
| 0.314815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144262
| 305
| 17
| 48
| 17.941176
| 0.827586
| 0
| 0
| 0.272727
| 0
| 0
| 0.285246
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| true
| 0.272727
| 0.181818
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
b43b67cd4b5c3ec1adf9c36f8678e32bf2e82120
| 1,527
|
py
|
Python
|
DBmanagement_scripts/cleancsv2019.py
|
cemac/SWIFTDB
|
5c6bc0ae4ff674c2eede44783ca1738630d97ebb
|
[
"MIT"
] | 2
|
2020-07-14T14:14:45.000Z
|
2021-05-13T13:01:51.000Z
|
DBmanagement_scripts/cleancsv2019.py
|
cemac-tech/SWIFTDB
|
5c6bc0ae4ff674c2eede44783ca1738630d97ebb
|
[
"MIT"
] | 18
|
2019-02-07T10:28:19.000Z
|
2020-06-18T18:31:41.000Z
|
DBmanagement_scripts/cleancsv2019.py
|
cemac-tech/SWIFTDB
|
5c6bc0ae4ff674c2eede44783ca1738630d97ebb
|
[
"MIT"
] | 1
|
2019-03-25T14:54:26.000Z
|
2019-03-25T14:54:26.000Z
|
'''
A script to take tab deliminated dump Lorraines excel work sheets and
tidy them up for postgres storage.
**Inprogess**
Known issues
Encoding with excel, python defaults to utf-8
excel uses something else, current fix is to resave
the file in atom!
Generalise:
Pass in file name and extract header names rather than hard code
'''
import pandas as pd
import re
file_name = 'deliverables.tab'
# Read in tab deliminated file
df = pd.read_csv(file_name, sep='\t')
# percents are Integer
df.percent = df.percent.fillna(0).astype(int)
# theres some trailing white spaces
df.partner = df.partner.str.strip()
# Partners need to match existing keys
# make everything upper case
df.partner = df.partner.apply(lambda x: x.upper())
up = ['UOL', 'UOR', 'GMET', 'NIMET', 'UON']
mixp = ['UoL', 'UoR', 'GMet', 'NiMet', 'UoN']
for i, p in enumerate(up):
df.partner = df.partner.replace(p, mixp[i])
df.to_csv(file_name, sep='\t', index=False, header=False)
file_name = 'tasks.tab'
# Read in tab deliminated file
df = pd.read_csv(file_name, sep='\t')
# percents are Integer
df.percent = df.percent.fillna(0).astype(int)
# theres some trailing white spaces
df.partner = df.partner.str.strip()
# Partners need to match existing keys
# make everything upper case
df.partner = df.partner.apply(lambda x: x.upper())
up = ['UOL', 'UOR', 'GMET', 'NIMET', 'UON']
mixp = ['UoL', 'UoR', 'GMet', 'NiMet', 'UoN']
for i, p in enumerate(up):
df.partner = df.partner.replace(p, mixp[i])
df.to_csv(file_name, sep='\t', index=False, header=False)
| 31.163265
| 69
| 0.709889
| 251
| 1,527
| 4.278884
| 0.398406
| 0.100559
| 0.061453
| 0.100559
| 0.703911
| 0.703911
| 0.703911
| 0.703911
| 0.703911
| 0.703911
| 0
| 0.002297
| 0.144728
| 1,527
| 48
| 70
| 31.8125
| 0.820061
| 0.409299
| 0
| 0.818182
| 0
| 0
| 0.118644
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b44749f0c586def93090ab449574879aea07b2ee
| 229
|
py
|
Python
|
src/python/WMCore/WMBS/Oracle/Workflow/InsertOutput.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 21
|
2015-11-19T16:18:45.000Z
|
2021-12-02T18:20:39.000Z
|
src/python/WMCore/WMBS/Oracle/Workflow/InsertOutput.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 5,671
|
2015-01-06T14:38:52.000Z
|
2022-03-31T22:11:14.000Z
|
src/python/WMCore/WMBS/Oracle/Workflow/InsertOutput.py
|
khurtado/WMCore
|
f74e252412e49189a92962945a94f93bec81cd1e
|
[
"Apache-2.0"
] | 67
|
2015-01-21T15:55:38.000Z
|
2022-02-03T19:53:13.000Z
|
#!/usr/bin/env python
"""
_InsertOutput_
Oracle implementation of Workflow.InsertOutput
"""
from WMCore.WMBS.MySQL.Workflow.InsertOutput import InsertOutput as InsertOutputMySQL
class InsertOutput(InsertOutputMySQL):
pass
| 19.083333
| 85
| 0.80786
| 24
| 229
| 7.625
| 0.75
| 0.218579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10917
| 229
| 11
| 86
| 20.818182
| 0.897059
| 0.362445
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b461a899f59ec2b1b1ba6985f723c5422f0fe3af
| 42,731
|
py
|
Python
|
airflow/providers/google/cloud/operators/datafusion.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 8,092
|
2016-04-27T20:32:29.000Z
|
2019-01-05T07:39:33.000Z
|
airflow/providers/google/cloud/operators/datafusion.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 2,961
|
2016-05-05T07:16:16.000Z
|
2019-01-05T08:47:59.000Z
|
airflow/providers/google/cloud/operators/datafusion.py
|
npodewitz/airflow
|
511ea702d5f732582d018dad79754b54d5e53f9d
|
[
"Apache-2.0"
] | 3,546
|
2016-05-04T20:33:16.000Z
|
2019-01-05T05:14:26.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google DataFusion operators."""
from time import sleep
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from google.api_core.retry import exponential_sleep_generator
from googleapiclient.errors import HttpError
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.datafusion import SUCCESS_STATES, DataFusionHook, PipelineStates
from airflow.providers.google.cloud.links.base import BaseGoogleLink
if TYPE_CHECKING:
from airflow.utils.context import Context
BASE_LINK = "https://console.cloud.google.com/data-fusion"
DATAFUSION_INSTANCE_LINK = BASE_LINK + "/locations/{region}/instances/{instance_name}?project={project_id}"
DATAFUSION_PIPELINES_LINK = "{uri}/cdap/ns/default/pipelines"
DATAFUSION_PIPELINE_LINK = "{uri}/pipelines/ns/default/view/{pipeline_name}"
class DataFusionPipelineLinkHelper:
"""Helper class for Pipeline links"""
@staticmethod
def get_project_id(instance):
instance = instance["name"]
project_id = [x for x in instance.split("/") if x.startswith("airflow")][0]
return project_id
class DataFusionInstanceLink(BaseGoogleLink):
"""Helper class for constructing Data Fusion Instance link"""
name = "Data Fusion Instance"
key = "instance_conf"
format_str = DATAFUSION_INSTANCE_LINK
@staticmethod
def persist(
context: "Context",
task_instance: Union[
"CloudDataFusionRestartInstanceOperator",
"CloudDataFusionCreateInstanceOperator",
"CloudDataFusionUpdateInstanceOperator",
"CloudDataFusionGetInstanceOperator",
],
project_id: str,
):
task_instance.xcom_push(
context=context,
key=DataFusionInstanceLink.key,
value={
"region": task_instance.location,
"instance_name": task_instance.instance_name,
"project_id": project_id,
},
)
class DataFusionPipelineLink(BaseGoogleLink):
"""Helper class for constructing Data Fusion Pipeline link"""
name = "Data Fusion Pipeline"
key = "pipeline_conf"
format_str = DATAFUSION_PIPELINE_LINK
@staticmethod
def persist(
context: "Context",
task_instance: Union[
"CloudDataFusionCreatePipelineOperator",
"CloudDataFusionStartPipelineOperator",
"CloudDataFusionStopPipelineOperator",
],
uri: str,
):
task_instance.xcom_push(
context=context,
key=DataFusionPipelineLink.key,
value={
"uri": uri,
"pipeline_name": task_instance.pipeline_name,
},
)
class DataFusionPipelinesLink(BaseGoogleLink):
"""Helper class for constructing list of Data Fusion Pipelines link"""
name = "Data Fusion Pipelines"
key = "pipelines_conf"
format_str = DATAFUSION_PIPELINES_LINK
@staticmethod
def persist(
context: "Context",
task_instance: "CloudDataFusionListPipelinesOperator",
uri: str,
):
task_instance.xcom_push(
context=context,
key=DataFusionPipelinesLink.key,
value={
"uri": uri,
},
)
class CloudDataFusionRestartInstanceOperator(BaseOperator):
"""
Restart a single Data Fusion instance.
At the end of an operation instance is fully restarted.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionRestartInstanceOperator`
:param instance_name: The name of the instance to restart.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"impersonation_chain",
)
operator_extra_links = (DataFusionInstanceLink(),)
def __init__(
self,
*,
instance_name: str,
location: str,
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Restarting Data Fusion instance: %s", self.instance_name)
operation = hook.restart_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
instance = hook.wait_for_operation(operation)
self.log.info("Instance %s restarted successfully", self.instance_name)
project_id = self.project_id or DataFusionPipelineLinkHelper.get_project_id(instance)
DataFusionInstanceLink.persist(context=context, task_instance=self, project_id=project_id)
class CloudDataFusionDeleteInstanceOperator(BaseOperator):
"""
Deletes a single Date Fusion instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionDeleteInstanceOperator`
:param instance_name: The name of the instance to restart.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"impersonation_chain",
)
def __init__(
self,
*,
instance_name: str,
location: str,
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Data Fusion instance: %s", self.instance_name)
operation = hook.delete_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
hook.wait_for_operation(operation)
self.log.info("Instance %s deleted successfully", self.instance_name)
class CloudDataFusionCreateInstanceOperator(BaseOperator):
"""
Creates a new Data Fusion instance in the specified project and location.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionCreateInstanceOperator`
:param instance_name: The name of the instance to create.
:param instance: An instance of Instance.
https://cloud.google.com/data-fusion/docs/reference/rest/v1beta1/projects.locations.instances#Instance
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"instance",
"impersonation_chain",
)
operator_extra_links = (DataFusionInstanceLink(),)
def __init__(
self,
*,
instance_name: str,
instance: Dict[str, Any],
location: str,
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.instance = instance
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> dict:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating Data Fusion instance: %s", self.instance_name)
try:
operation = hook.create_instance(
instance_name=self.instance_name,
instance=self.instance,
location=self.location,
project_id=self.project_id,
)
instance = hook.wait_for_operation(operation)
self.log.info("Instance %s created successfully", self.instance_name)
except HttpError as err:
if err.resp.status not in (409, '409'):
raise
self.log.info("Instance %s already exists", self.instance_name)
instance = hook.get_instance(
instance_name=self.instance_name, location=self.location, project_id=self.project_id
)
# Wait for instance to be ready
for time_to_wait in exponential_sleep_generator(initial=10, maximum=120):
if instance['state'] != 'CREATING':
break
sleep(time_to_wait)
instance = hook.get_instance(
instance_name=self.instance_name, location=self.location, project_id=self.project_id
)
project_id = self.project_id or DataFusionPipelineLinkHelper.get_project_id(instance)
DataFusionInstanceLink.persist(context=context, task_instance=self, project_id=project_id)
return instance
class CloudDataFusionUpdateInstanceOperator(BaseOperator):
"""
Updates a single Data Fusion instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionUpdateInstanceOperator`
:param instance_name: The name of the instance to create.
:param instance: An instance of Instance.
https://cloud.google.com/data-fusion/docs/reference/rest/v1beta1/projects.locations.instances#Instance
:param update_mask: Field mask is used to specify the fields that the update will overwrite
in an instance resource. The fields specified in the updateMask are relative to the resource,
not the full request. A field will be overwritten if it is in the mask. If the user does not
provide a mask, all the supported fields (labels and options currently) will be overwritten.
A comma-separated list of fully qualified names of fields. Example: "user.displayName,photo".
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?_ga=2.205612571.-968688242.1573564810#google.protobuf.FieldMask
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"instance",
"impersonation_chain",
)
operator_extra_links = (DataFusionInstanceLink(),)
def __init__(
self,
*,
instance_name: str,
instance: Dict[str, Any],
update_mask: str,
location: str,
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.update_mask = update_mask
self.instance_name = instance_name
self.instance = instance
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Updating Data Fusion instance: %s", self.instance_name)
operation = hook.patch_instance(
instance_name=self.instance_name,
instance=self.instance,
update_mask=self.update_mask,
location=self.location,
project_id=self.project_id,
)
instance = hook.wait_for_operation(operation)
self.log.info("Instance %s updated successfully", self.instance_name)
project_id = self.project_id or DataFusionPipelineLinkHelper.get_project_id(instance)
DataFusionInstanceLink.persist(context=context, task_instance=self, project_id=project_id)
class CloudDataFusionGetInstanceOperator(BaseOperator):
"""
Gets details of a single Data Fusion instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionGetInstanceOperator`
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param project_id: The ID of the Google Cloud project that the instance belongs to.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"impersonation_chain",
)
operator_extra_links = (DataFusionInstanceLink(),)
def __init__(
self,
*,
instance_name: str,
location: str,
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> dict:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Retrieving Data Fusion instance: %s", self.instance_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
project_id = self.project_id or DataFusionPipelineLinkHelper.get_project_id(instance)
DataFusionInstanceLink.persist(context=context, task_instance=self, project_id=project_id)
return instance
class CloudDataFusionCreatePipelineOperator(BaseOperator):
"""
Creates a Cloud Data Fusion pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionCreatePipelineOperator`
:param pipeline_name: Your pipeline name.
:param pipeline: The pipeline definition. For more information check:
https://docs.cdap.io/cdap/current/en/developer-manual/pipelines/developing-pipelines.html#pipeline-configuration-file-format
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"pipeline_name",
"impersonation_chain",
)
operator_extra_links = (DataFusionPipelineLink(),)
def __init__(
self,
*,
pipeline_name: str,
pipeline: Dict[str, Any],
instance_name: str,
location: str,
namespace: str = "default",
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.pipeline = pipeline
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating Data Fusion pipeline: %s", self.pipeline_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
hook.create_pipeline(
pipeline_name=self.pipeline_name,
pipeline=self.pipeline,
instance_url=api_url,
namespace=self.namespace,
)
DataFusionPipelineLink.persist(context=context, task_instance=self, uri=instance["serviceEndpoint"])
self.log.info("Pipeline created")
class CloudDataFusionDeletePipelineOperator(BaseOperator):
"""
Deletes a Cloud Data Fusion pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionDeletePipelineOperator`
:param pipeline_name: Your pipeline name.
:param version_id: Version of pipeline to delete
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"version_id",
"pipeline_name",
"impersonation_chain",
)
def __init__(
self,
*,
pipeline_name: str,
instance_name: str,
location: str,
version_id: Optional[str] = None,
namespace: str = "default",
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.version_id = version_id
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Data Fusion pipeline: %s", self.pipeline_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
hook.delete_pipeline(
pipeline_name=self.pipeline_name,
version_id=self.version_id,
instance_url=api_url,
namespace=self.namespace,
)
self.log.info("Pipeline deleted")
class CloudDataFusionListPipelinesOperator(BaseOperator):
"""
Lists Cloud Data Fusion pipelines.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionListPipelinesOperator`
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param artifact_version: Artifact version to filter instances
:param artifact_name: Artifact name to filter instances
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"artifact_name",
"artifact_version",
"impersonation_chain",
)
operator_extra_links = (DataFusionPipelinesLink(),)
def __init__(
self,
*,
instance_name: str,
location: str,
artifact_name: Optional[str] = None,
artifact_version: Optional[str] = None,
namespace: str = "default",
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.artifact_version = artifact_version
self.artifact_name = artifact_name
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> dict:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Listing Data Fusion pipelines")
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
pipelines = hook.list_pipelines(
instance_url=api_url,
namespace=self.namespace,
artifact_version=self.artifact_version,
artifact_name=self.artifact_name,
)
self.log.info("%s", pipelines)
DataFusionPipelinesLink.persist(context=context, task_instance=self, uri=instance["serviceEndpoint"])
return pipelines
class CloudDataFusionStartPipelineOperator(BaseOperator):
"""
Starts a Cloud Data Fusion pipeline. Works for both batch and stream pipelines.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionStartPipelineOperator`
:param pipeline_name: Your pipeline name.
:param instance_name: The name of the instance.
:param success_states: If provided the operator will wait for pipeline to be in one of
the provided states.
:param pipeline_timeout: How long (in seconds) operator should wait for the pipeline to be in one of
``success_states``. Works only if ``success_states`` are provided.
:param location: The Cloud Data Fusion location in which to handle the request.
:param runtime_args: Optional runtime args to be passed to the pipeline
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag to return after submitting the pipeline Id to the Data Fusion API.
This is useful for submitting long running pipelines and
waiting on them asynchronously using the CloudDataFusionPipelineStateSensor
"""
template_fields: Sequence[str] = (
"instance_name",
"pipeline_name",
"runtime_args",
"impersonation_chain",
)
operator_extra_links = (DataFusionPipelineLink(),)
def __init__(
self,
*,
pipeline_name: str,
instance_name: str,
location: str,
runtime_args: Optional[Dict[str, Any]] = None,
success_states: Optional[List[str]] = None,
namespace: str = "default",
pipeline_timeout: int = 5 * 60,
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
asynchronous=False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.runtime_args = runtime_args
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
self.asynchronous = asynchronous
self.pipeline_timeout = pipeline_timeout
if success_states:
self.success_states = success_states
else:
self.success_states = SUCCESS_STATES + [PipelineStates.RUNNING]
def execute(self, context: 'Context') -> str:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Starting Data Fusion pipeline: %s", self.pipeline_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
pipeline_id = hook.start_pipeline(
pipeline_name=self.pipeline_name,
instance_url=api_url,
namespace=self.namespace,
runtime_args=self.runtime_args,
)
self.log.info("Pipeline %s submitted successfully.", pipeline_id)
DataFusionPipelineLink.persist(context=context, task_instance=self, uri=instance["serviceEndpoint"])
if not self.asynchronous:
self.log.info("Waiting when pipeline %s will be in one of the success states", pipeline_id)
hook.wait_for_pipeline_state(
success_states=self.success_states,
pipeline_id=pipeline_id,
pipeline_name=self.pipeline_name,
namespace=self.namespace,
instance_url=api_url,
timeout=self.pipeline_timeout,
)
self.log.info("Job %s discover success state.", pipeline_id)
return pipeline_id
class CloudDataFusionStopPipelineOperator(BaseOperator):
"""
Stops a Cloud Data Fusion pipeline. Works for both batch and stream pipelines.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataFusionStopPipelineOperator`
:param pipeline_name: Your pipeline name.
:param instance_name: The name of the instance.
:param location: The Cloud Data Fusion location in which to handle the request.
:param namespace: If your pipeline belongs to a Basic edition instance, the namespace ID
is always default. If your pipeline belongs to an Enterprise edition instance, you
can create a namespace.
:param api_version: The version of the api that will be requested for example 'v3'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"instance_name",
"pipeline_name",
"impersonation_chain",
)
operator_extra_links = (DataFusionPipelineLink(),)
def __init__(
self,
*,
pipeline_name: str,
instance_name: str,
location: str,
namespace: str = "default",
project_id: Optional[str] = None,
api_version: str = "v1beta1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.pipeline_name = pipeline_name
self.namespace = namespace
self.instance_name = instance_name
self.location = location
self.project_id = project_id
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
hook = DataFusionHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Data Fusion pipeline: %s is going to be stopped", self.pipeline_name)
instance = hook.get_instance(
instance_name=self.instance_name,
location=self.location,
project_id=self.project_id,
)
api_url = instance["apiEndpoint"]
DataFusionPipelineLink.persist(context=context, task_instance=self, uri=instance["serviceEndpoint"])
hook.stop_pipeline(
pipeline_name=self.pipeline_name,
instance_url=api_url,
namespace=self.namespace,
)
self.log.info("Pipeline stopped")
| 42.816633
| 149
| 0.678383
| 5,166
| 42,731
| 5.453349
| 0.074913
| 0.037484
| 0.019168
| 0.017748
| 0.789543
| 0.77254
| 0.767855
| 0.757277
| 0.750958
| 0.732891
| 0
| 0.002569
| 0.253048
| 42,731
| 997
| 150
| 42.859579
| 0.880068
| 0.413821
| 0
| 0.705691
| 0
| 0
| 0.095264
| 0.018126
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039024
| false
| 0
| 0.013008
| 0
| 0.126829
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c35e120d31acd0d1ae05806ded43352553a41220
| 112
|
py
|
Python
|
user/models.py
|
ashwin-bitsathy/django---ecommerce
|
49ebb518ed5831639d560c27f2313f983fe27e86
|
[
"BSD-3-Clause"
] | null | null | null |
user/models.py
|
ashwin-bitsathy/django---ecommerce
|
49ebb518ed5831639d560c27f2313f983fe27e86
|
[
"BSD-3-Clause"
] | null | null | null |
user/models.py
|
ashwin-bitsathy/django---ecommerce
|
49ebb518ed5831639d560c27f2313f983fe27e86
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
| 22.4
| 44
| 0.803571
| 17
| 112
| 5.294118
| 0.588235
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151786
| 112
| 4
| 45
| 28
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c37098b3e27127a9c247f787f7b23e329f49b646
| 42
|
py
|
Python
|
pello/__init__.py
|
encukou/Pello
|
bac192da6288cea103eab05fc2ca0b7f37b78abf
|
[
"CC0-1.0"
] | null | null | null |
pello/__init__.py
|
encukou/Pello
|
bac192da6288cea103eab05fc2ca0b7f37b78abf
|
[
"CC0-1.0"
] | 3
|
2020-05-11T11:25:40.000Z
|
2020-08-05T09:26:01.000Z
|
pello/__init__.py
|
hrnciar/pello
|
d93051c271a2c163f903d7c3676739a0a331549e
|
[
"MIT"
] | null | null | null |
from pello.pello_greeting import greeting
| 21
| 41
| 0.880952
| 6
| 42
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c37d8a6b933810f9d544027015017376c08c6d6a
| 97
|
py
|
Python
|
editor/lib/juma/AssetEditor/converters/__init__.py
|
RazielSun/juma-editor
|
125720f7386f9f0a4cd3466a45c883d6d6020e33
|
[
"MIT"
] | null | null | null |
editor/lib/juma/AssetEditor/converters/__init__.py
|
RazielSun/juma-editor
|
125720f7386f9f0a4cd3466a45c883d6d6020e33
|
[
"MIT"
] | null | null | null |
editor/lib/juma/AssetEditor/converters/__init__.py
|
RazielSun/juma-editor
|
125720f7386f9f0a4cd3466a45c883d6d6020e33
|
[
"MIT"
] | 1
|
2022-03-31T00:50:23.000Z
|
2022-03-31T00:50:23.000Z
|
from PyAssimpConverter import PyAssimpConverter
from AnimationConverter import AnimationConverter
| 48.5
| 49
| 0.927835
| 8
| 97
| 11.25
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072165
| 97
| 2
| 49
| 48.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6f044e619fc0b5359a90973e556edad7df74210a
| 22
|
py
|
Python
|
src/__init__.py
|
michalsta/mocos_helper
|
e33f29246c70ce1dbd477eeed2374ba902e46cfe
|
[
"MIT"
] | 89
|
2020-02-06T09:24:10.000Z
|
2021-09-11T22:49:34.000Z
|
src/__init__.py
|
michalsta/mocos_helper
|
e33f29246c70ce1dbd477eeed2374ba902e46cfe
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
michalsta/mocos_helper
|
e33f29246c70ce1dbd477eeed2374ba902e46cfe
|
[
"MIT"
] | 8
|
2020-04-15T19:07:49.000Z
|
2020-10-12T10:06:55.000Z
|
from .random import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6f1e64a90a4fa6b96a01f729ac30110014ef1343
| 2,017
|
py
|
Python
|
test/test_index.py
|
grijul/arch-security-tracker
|
132d1088a982d406436a2d1a047e977d0b2f18ed
|
[
"MIT"
] | 80
|
2017-10-09T13:39:34.000Z
|
2022-03-30T08:28:25.000Z
|
test/test_index.py
|
grijul/arch-security-tracker
|
132d1088a982d406436a2d1a047e977d0b2f18ed
|
[
"MIT"
] | 103
|
2017-07-20T23:01:23.000Z
|
2022-03-29T15:45:44.000Z
|
test/test_index.py
|
grijul/arch-security-tracker
|
132d1088a982d406436a2d1a047e977d0b2f18ed
|
[
"MIT"
] | 28
|
2017-10-18T17:03:42.000Z
|
2022-03-18T17:36:48.000Z
|
from flask import url_for
from werkzeug.exceptions import NotFound
from .conftest import DEFAULT_GROUP_ID
from .conftest import DEFAULT_GROUP_NAME
from .conftest import create_group
from .conftest import create_package
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_index(db, client):
resp = client.get(url_for('tracker.index'), follow_redirects=True)
assert 200 == resp.status_code
assert DEFAULT_GROUP_NAME not in resp.data.decode()
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3')
def test_index_vulnerable(db, client):
resp = client.get(url_for('tracker.index_vulnerable'), follow_redirects=True)
assert 200 == resp.status_code
assert DEFAULT_GROUP_NAME in resp.data.decode()
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3')
def test_index_all(db, client):
resp = client.get(url_for('tracker.index_all'), follow_redirects=True)
assert 200 == resp.status_code
assert DEFAULT_GROUP_NAME in resp.data.decode()
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3')
def test_index_json(db, client):
resp = client.get(url_for('tracker.index_json', only_vulernable=False, path='all.json'), follow_redirects=True)
assert 200 == resp.status_code
data = resp.get_json()
assert len(data) == 1
assert data[0]['name'] == DEFAULT_GROUP_NAME
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3')
def test_index_vulnerable_json(db, client):
resp = client.get(url_for('tracker.index_vulnerable_json', path='vulnerable.json'), follow_redirects=True)
assert 200 == resp.status_code
data = resp.get_json()
assert len(data) == 1
assert data[0]['name'] == DEFAULT_GROUP_NAME
| 38.788462
| 115
| 0.733267
| 321
| 2,017
| 4.389408
| 0.161994
| 0.1022
| 0.023421
| 0.017033
| 0.857346
| 0.814762
| 0.814762
| 0.814762
| 0.814762
| 0.759404
| 0
| 0.035255
| 0.114031
| 2,017
| 51
| 116
| 39.54902
| 0.753218
| 0
| 0
| 0.55
| 0
| 0
| 0.118493
| 0.026277
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.125
| false
| 0
| 0.15
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f220c3cf0fed5d4ce8be1d1fdbb0ec07a827600
| 129
|
py
|
Python
|
homebot/dash/__init__.py
|
bearylogical/homebot
|
07e01df8de20764b3850a6181529a109c020f855
|
[
"MIT"
] | null | null | null |
homebot/dash/__init__.py
|
bearylogical/homebot
|
07e01df8de20764b3850a6181529a109c020f855
|
[
"MIT"
] | null | null | null |
homebot/dash/__init__.py
|
bearylogical/homebot
|
07e01df8de20764b3850a6181529a109c020f855
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
dash = Blueprint('dash', __name__, static_folder='static', static_url_path='')
from . import views
| 21.5
| 78
| 0.75969
| 17
| 129
| 5.352941
| 0.647059
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124031
| 129
| 5
| 79
| 25.8
| 0.80531
| 0
| 0
| 0
| 0
| 0
| 0.077519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
6f24d41f765bfb7415f1e24bec70c028dc933391
| 2,338
|
py
|
Python
|
tests/test_question_answering.py
|
easynlp/easynlp
|
4b3b405a64ca166cc19ee9c43b79a475cf699996
|
[
"MIT"
] | 6
|
2021-07-09T08:13:44.000Z
|
2021-11-10T04:09:33.000Z
|
tests/test_question_answering.py
|
easynlp/easynlp
|
4b3b405a64ca166cc19ee9c43b79a475cf699996
|
[
"MIT"
] | 1
|
2021-07-09T17:18:16.000Z
|
2021-07-09T17:18:16.000Z
|
tests/test_question_answering.py
|
easynlp/easynlp
|
4b3b405a64ca166cc19ee9c43b79a475cf699996
|
[
"MIT"
] | 1
|
2022-02-09T15:37:14.000Z
|
2022-02-09T15:37:14.000Z
|
import easynlp
def test_single_question_answering():
data = {
"text": [
"What is extractive question answering?",
],
"context": [
"""Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a
question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune
a model on a SQuAD task, you may leverage the examples/pytorch/question-answering/run_squad.py script.""",
],
}
input_column = "text"
context_column = "context"
output_column = "answer"
output_dataset = easynlp.question_answering(
data, input_column, context_column, output_column
)
answers = [
"the task of extracting an answer from a text given a question",
]
assert len(output_dataset) == 1
assert output_dataset[output_column] == answers
def test_question_answering():
data = {
"text": [
"What is extractive question answering?",
"What is a good example of a question answering dataset?",
],
"context": [
"""Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a
question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune
a model on a SQuAD task, you may leverage the examples/pytorch/question-answering/run_squad.py script.""",
"""Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a
question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune
a model on a SQuAD task, you may leverage the examples/pytorch/question-answering/run_squad.py script.""",
],
}
input_column = "text"
context_column = "context"
output_column = "answer"
output_dataset = easynlp.question_answering(
data, input_column, context_column, output_column
)
answers = [
"the task of extracting an answer from a text given a question",
"SQuAD dataset",
]
assert len(output_dataset) == 2
assert output_dataset[output_column] == answers
| 42.509091
| 128
| 0.664243
| 306
| 2,338
| 4.977124
| 0.176471
| 0.178595
| 0.088641
| 0.062377
| 0.933027
| 0.933027
| 0.860801
| 0.860801
| 0.860801
| 0.784636
| 0
| 0.001164
| 0.265184
| 2,338
| 54
| 129
| 43.296296
| 0.885332
| 0
| 0
| 0.636364
| 0
| 0
| 0.255962
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.045455
| false
| 0
| 0.022727
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f2de558f0fb56b6b1140309e954d10f16785d66
| 97
|
py
|
Python
|
recursion/reverse_string.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
recursion/reverse_string.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
recursion/reverse_string.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
s = 'tomclancy'
def print_letter(strings):
print(strings[-1])
print_letter(strings[:-1])
| 19.4
| 30
| 0.670103
| 13
| 97
| 4.846154
| 0.538462
| 0.349206
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.154639
| 97
| 5
| 30
| 19.4
| 0.743902
| 0
| 0
| 0
| 0
| 0
| 0.091837
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.25
| 0.75
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6f3f84e67888ee647792baa9bd5d3d853847bb79
| 313
|
py
|
Python
|
utils/__init__.py
|
edwardyehuang/iSeg
|
256b0f7fdb6e854fe026fa8df41d9a4a55db34d5
|
[
"MIT"
] | 4
|
2021-12-13T09:49:26.000Z
|
2022-02-19T11:16:50.000Z
|
utils/__init__.py
|
edwardyehuang/iSeg
|
256b0f7fdb6e854fe026fa8df41d9a4a55db34d5
|
[
"MIT"
] | 1
|
2021-07-28T10:40:56.000Z
|
2021-08-09T07:14:06.000Z
|
utils/__init__.py
|
edwardyehuang/iSeg
|
256b0f7fdb6e854fe026fa8df41d9a4a55db34d5
|
[
"MIT"
] | null | null | null |
# ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
from iseg.utils.sugars import *
from iseg.utils.common import resize_image, simple_load_image
| 39.125
| 69
| 0.456869
| 25
| 313
| 5.6
| 0.76
| 0.114286
| 0.185714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013793
| 0.073482
| 313
| 7
| 70
| 44.714286
| 0.468966
| 0.667732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
6f6143b3480544510ea0cd43ede8c061b44a5767
| 1,964
|
py
|
Python
|
axitom/tests/test_rotate_coordinates.py
|
PolymerGuy/AXITOM
|
7682be5b21fa933b9bea4082fe9a830076431feb
|
[
"MIT"
] | 4
|
2019-09-06T16:31:11.000Z
|
2022-02-04T12:18:47.000Z
|
axitom/tests/test_rotate_coordinates.py
|
PolymerGuy/AXITOM
|
7682be5b21fa933b9bea4082fe9a830076431feb
|
[
"MIT"
] | 1
|
2019-08-08T12:30:33.000Z
|
2019-08-08T12:34:55.000Z
|
axitom/tests/test_rotate_coordinates.py
|
PolymerGuy/AXITOM
|
7682be5b21fa933b9bea4082fe9a830076431feb
|
[
"MIT"
] | 7
|
2019-08-21T20:51:12.000Z
|
2020-02-04T14:20:42.000Z
|
from unittest import TestCase
import numpy as np
from axitom.backprojection import rotate_coordinates
class Test_RotateCoordinates(TestCase):
def test_rotate_coordinates_90deg(self):
tol = 1e-9
n_coordinates = 10
rotation_angle = np.pi/2.
xs, ys = np.meshgrid(np.arange(n_coordinates),np.arange(n_coordinates))
xr,yr = rotate_coordinates(xs,ys,rotation_angle)
error_xs = np.abs(xr-ys)
error_ys = np.abs(yr+xs)
if np.max(error_xs) >tol or np.max(error_ys) >tol:
print("The maximum error in X after rotation was:",np.max(error_xs) )
print("The maximum error in Y after rotation was:",np.max(error_ys) )
self.fail()
def test_rotate_coordinates_0deg(self):
tol = 1e-9
n_coordinates = 10
rotation_angle = 0.0
xs, ys = np.meshgrid(np.arange(n_coordinates),np.arange(n_coordinates))
xr,yr = rotate_coordinates(xs,ys,rotation_angle)
error_xs = np.abs(xr-xs)
error_ys = np.abs(yr-ys)
if np.max(error_xs) >tol or np.max(error_ys) >tol:
print("The maximum error in X after rotation was:",np.max(error_xs) )
print("The maximum error in Y after rotation was:",np.max(error_ys) )
self.fail()
def test_rotate_coordinates_forward_and_reverse(self):
tol = 1e-9
n_coordinates = 10
rotation_angle = np.pi/4
xs, ys = np.meshgrid(np.arange(n_coordinates),np.arange(n_coordinates))
xr_forw,yr_forw = rotate_coordinates(xs,ys,rotation_angle)
xr,yr = rotate_coordinates(xr_forw,yr_forw,-rotation_angle)
error_xs = np.abs(xr-xs)
error_ys = np.abs(yr-ys)
if np.max(error_xs) >tol or np.max(error_ys) >tol:
print("The maximum error in X after rotation was:",np.max(error_xs) )
print("The maximum error in Y after rotation was:",np.max(error_ys) )
self.fail()
| 36.37037
| 81
| 0.639511
| 298
| 1,964
| 4.030201
| 0.177852
| 0.049958
| 0.099917
| 0.099917
| 0.841799
| 0.802664
| 0.774355
| 0.774355
| 0.774355
| 0.743547
| 0
| 0.012943
| 0.252546
| 1,964
| 53
| 82
| 37.056604
| 0.805177
| 0
| 0
| 0.658537
| 0
| 0
| 0.128375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.073171
| 0
| 0.170732
| 0.146341
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48ad9da2050df47a06c5487872f04c3641ae6370
| 9,227
|
py
|
Python
|
tests/test_karma_parser.py
|
leoriviera/apollo
|
acd10d0e238b34bb61aaefa5d29db181fd171b79
|
[
"MIT"
] | null | null | null |
tests/test_karma_parser.py
|
leoriviera/apollo
|
acd10d0e238b34bb61aaefa5d29db181fd171b79
|
[
"MIT"
] | null | null | null |
tests/test_karma_parser.py
|
leoriviera/apollo
|
acd10d0e238b34bb61aaefa5d29db181fd171b79
|
[
"MIT"
] | null | null | null |
import os
import pytest
from alembic import command
from alembic.config import Config
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from karma.parser import RawKarma, parse_message
from models import Base
@pytest.fixture(scope="module")
def database():
# Locate the testing config for Alembic
config = Config(os.path.join(os.path.dirname(__file__), "../alembic.tests.ini"))
# Set the migration secret key here
if not os.environ.get("SECRET_KEY", None):
os.environ["SECRET_KEY"] = "test"
# Start up the in-memory database instance
db_engine = create_engine("sqlite:///:memory:")
Base.metadata.create_all(db_engine)
db_session = Session(bind=db_engine)
# Mark it as up-to-date with migrations
command.stamp(config, "head")
return db_session
def test_empty(database):
assert parse_message("", database) is None
def test_no_karma(database):
assert parse_message("Hello, world!", database) is None
def test_no_karma_complex_sentence(database):
assert (
parse_message(
"Hello, world! This is a test input string with 30+ characters", database
)
is None
)
def test_empty_with_code_block(database):
assert parse_message("```FoobarBaz```", database) is None
def test_empty_with_inline_block(database):
assert parse_message("`FoobarBaz`", database) is None
def test_simple_positive(database):
assert parse_message("Foobar++", database) == [
RawKarma(name="Foobar", op="++", reason=None)
]
def test_simple_negative(database):
assert parse_message("Foobar--", database) == [
RawKarma(name="Foobar", op="--", reason=None)
]
def test_simple_neutral_pm(database):
assert parse_message("Foobar+-", database) == [
RawKarma(name="Foobar", op="+-", reason=None)
]
def test_simple_neutral_mp(database):
assert parse_message("Foobar-+", database) == [
RawKarma(name="Foobar", op="-+", reason=None)
]
def test_quoted_positive(database):
assert parse_message('"Foobar"++', database) == [
RawKarma(name="Foobar", op="++", reason=None)
]
def test_quoted_negative(database):
assert parse_message('"Foobar"--', database) == [
RawKarma(name="Foobar", op="--", reason=None)
]
def test_quoted_neutral_pm(database):
assert parse_message('"Foobar"+-', database) == [
RawKarma(name="Foobar", op="+-", reason=None)
]
def test_quoted_sentence_neutral_pm(database):
assert parse_message('"Foobar Baz"+-', database) == [
RawKarma(name="Foobar Baz", op="+-", reason=None)
]
def test_quoted_neutral_mp(database):
assert parse_message('"Foobar"-+', database) == [
RawKarma(name="Foobar", op="-+", reason=None)
]
def test_simple_positive_with_text_after(database):
assert parse_message("Foobar++ since it's pretty cool", database) == [
RawKarma(name="Foobar", op="++", reason=None)
]
def test_simple_positive_with_text_before(database):
assert parse_message("Since its pretty cool, foobar++", database) == [
RawKarma(name="foobar", op="++", reason=None)
]
def test_simple_positive_with_paren_reason(database):
assert parse_message("Foobar++ (hella cool)", database) == [
RawKarma(name="Foobar", op="++", reason="hella cool")
]
def test_simple_positive_with_quote_reason(database):
assert parse_message('Foobar++ "\'hella cool"', database) == [
RawKarma(name="Foobar", op="++", reason="'hella cool")
]
def test_simple_positive_with_paren_reason_and_comma(database):
assert parse_message("Foobar++ (hella, cool)", database) == [
RawKarma(name="Foobar", op="++", reason="hella, cool")
]
def test_simple_positive_with_empty_paren_reason(database):
assert parse_message("Foobar++ ()", database) == [
RawKarma(name="Foobar", op="++", reason=None)
]
def test_simple_positive_with_compound_reason(database):
assert parse_message("Foobar++ because it is (hella cool)", database) == [
RawKarma(name="Foobar", op="++", reason="it is (hella cool)")
]
def test_simple_positive_with_compound_reason_comma(database):
assert parse_message("Foobar++ because it, is (hella cool)", database) == [
RawKarma(name="Foobar", op="++", reason="it")
]
def test_simple_positive_with_reason(database):
assert parse_message("Foobar++ because baz", database) == [
RawKarma(name="Foobar", op="++", reason="baz")
]
def test_simple_positive_with_reason_quoted(database):
assert parse_message('Foobar++ because "baz"', database) == [
RawKarma(name="Foobar", op="++", reason="baz")
]
def test_simple_positive_with_reason_quoted_comma(database):
assert parse_message('Foobar++ because "baz, blat"', database) == [
RawKarma(name="Foobar", op="++", reason="baz, blat")
]
def test_simple_negative_with_reason(database):
assert parse_message("Foobar-- because baz", database) == [
RawKarma(name="Foobar", op="--", reason="baz")
]
def test_simple_neutral_pm_with_reason(database):
assert parse_message("Foobar+- because baz", database) == [
RawKarma(name="Foobar", op="+-", reason="baz")
]
def test_simple_neutral_mp_with_reason(database):
assert parse_message("Foobar-+ because baz", database) == [
RawKarma(name="Foobar", op="-+", reason="baz")
]
def test_quoted_positive_with_reason(database):
assert parse_message('"Foobar"++ because baz', database) == [
RawKarma(name="Foobar", op="++", reason="baz")
]
def test_quoted_negative_with_reason(database):
assert parse_message('"Foobar"-- because baz', database) == [
RawKarma(name="Foobar", op="--", reason="baz")
]
def test_quoted_neutral_pm_with_reason(database):
assert parse_message('"Foobar"+- because baz', database) == [
RawKarma(name="Foobar", op="+-", reason="baz")
]
def test_quoted_neutral_mp_with_reason(database):
assert parse_message('"Foobar"-+ because baz', database) == [
RawKarma(name="Foobar", op="-+", reason="baz")
]
def test_simple_multiple_karma(database):
assert parse_message("Foobar++, Baz-- Blat+-", database) == [
RawKarma(name="Foobar", op="++", reason=None),
RawKarma(name="Baz", op="--", reason=None),
RawKarma(name="Blat", op="+-", reason=None),
]
def test_simple_multiple_karma_with_reasons_and_quotes(database):
assert parse_message('Foobar++ because baz blat, "Hello world"--', database) == [
RawKarma(name="Foobar", op="++", reason="baz blat"),
RawKarma(name="Hello world", op="--", reason=None),
]
def test_complex_multiple_karma_no_reasons_quotes(database): # The Sinjo input
assert parse_message('Foobar++ "Hello world"--', database) == [
RawKarma(name="Foobar", op="++", reason=None),
RawKarma(name="Hello world", op="--", reason=None),
]
def test_complex_multiple_karma_no_reasons_quotes_no_comma_separation(database):
assert parse_message(
'"easy lover"++ "phil collins"++ "philip bailey"++', database
) == [
RawKarma(name="easy lover", op="++", reason=None),
RawKarma(name="phil collins", op="++", reason=None),
RawKarma(name="philip bailey", op="++", reason=None),
]
def test_complex_multiple_karma_with_reasons_and_quotes(database):
assert parse_message(
'Foobar++ because baz blat, "Hello world"-- for "foo, bar"', database
) == [
RawKarma(name="Foobar", op="++", reason="baz blat"),
RawKarma(name="Hello world", op="--", reason="foo, bar"),
]
def test_karma_op_no_token(database):
assert parse_message("++", database) is None
def test_simple_invalid(database):
assert parse_message("Foo+", database) is None
def test_simple_invalid_with_reason(database):
assert parse_message("Foo+ because baz", database) is None
def test_simple_quoted_invalid_with_reason(database):
assert parse_message('"Foo" because baz', database) is None
def test_string_starts_quoted_no_karma(database):
assert (
parse_message(
'"Starting the sentence with a quote but there is no karma here', database
)
is None
)
def test_start_simple_mid_message(database):
assert parse_message(
"Hello, world! Foo++ this is a mid-message karma", database
) == [RawKarma(name="Foo", op="++", reason=None)]
def test_start_simple_mid_message_with_reason(database):
assert parse_message(
"Hello, world! Foo++ because bar, this is a mid-message karma", database
) == [RawKarma(name="Foo", op="++", reason="bar")]
def test_code_block_with_internal_reason(database):
assert parse_message("```Foobar++ baz because foo```", database) is None
def test_code_block_with_karma_op_after(database):
assert parse_message("```Foobar baz```++", database) is None
def test_code_block_external_reason(database):
assert parse_message("```Foobar baz``` because foo", database) is None
def test_code_block_with_karma_op_after_and_external_reason(database):
assert parse_message("```Foobar baz```++ because foo", database) is None
| 29.573718
| 86
| 0.670641
| 1,132
| 9,227
| 5.219965
| 0.117491
| 0.099509
| 0.146218
| 0.206803
| 0.817566
| 0.790658
| 0.749704
| 0.664918
| 0.642918
| 0.612117
| 0
| 0.000264
| 0.17839
| 9,227
| 311
| 87
| 29.66881
| 0.779185
| 0.017991
| 0
| 0.182266
| 0
| 0
| 0.18222
| 0
| 0
| 0
| 0
| 0
| 0.236453
| 1
| 0.241379
| false
| 0
| 0.039409
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48c1563c45b1d57c5c559f77f493731d9c7a45e1
| 10,454
|
py
|
Python
|
services/events/migrations/versions/5da14a4e4c7a_.py
|
conbon/my-dev-space
|
604e0311bb006d8b5efe3322657ab631be3a3c02
|
[
"MIT"
] | 24
|
2018-06-27T22:50:04.000Z
|
2020-10-27T21:06:41.000Z
|
services/events/migrations/versions/5da14a4e4c7a_.py
|
conbon/my-dev-space
|
604e0311bb006d8b5efe3322657ab631be3a3c02
|
[
"MIT"
] | 398
|
2018-05-01T06:00:11.000Z
|
2021-03-01T21:31:26.000Z
|
services/events/migrations/versions/5da14a4e4c7a_.py
|
conbon/my-dev-space
|
604e0311bb006d8b5efe3322657ab631be3a3c02
|
[
"MIT"
] | 22
|
2018-06-27T20:42:07.000Z
|
2019-02-10T14:30:36.000Z
|
"""empty message
Revision ID: 5da14a4e4c7a
Revises:
Create Date: 2018-07-26 18:58:46.019785
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "5da14a4e4c7a"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"channel",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("name", sa.String(length=128), nullable=False),
sa.Column("url", sa.String(length=2048), nullable=False),
sa.Column("description", sa.String(length=50000), nullable=False),
sa.Column("created", sa.DateTime(), nullable=False),
sa.Column("updated", sa.DateTime(), nullable=False),
sa.Column("deleted", sa.DateTime(), nullable=True),
sa.Column("source", sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"diversity",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("name", sa.String(length=128), nullable=False),
sa.Column("description", sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"entry",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("type", sa.String(length=128), nullable=False),
sa.Column("description", sa.String(length=1000), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"event",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("name", sa.String(length=128), nullable=False),
sa.Column("description", sa.String(length=50000), nullable=False),
sa.Column("url", sa.String(length=2048), nullable=False),
sa.Column("start", sa.DateTime(), nullable=False),
sa.Column("end", sa.DateTime(), nullable=False),
sa.Column("duration", sa.Integer(), nullable=False),
sa.Column("category", sa.String(length=256), nullable=False),
sa.Column("created", sa.DateTime(), nullable=False),
sa.Column("updated", sa.DateTime(), nullable=False),
sa.Column("deleted", sa.DateTime(), nullable=True),
sa.Column("source", sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"meetup",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("name", sa.String(length=128), nullable=False),
sa.Column("logo", sa.String(length=1000), nullable=False),
sa.Column("url", sa.String(length=2048), nullable=False),
sa.Column("description", sa.String(length=50000), nullable=False),
sa.Column("created", sa.DateTime(), nullable=False),
sa.Column("updated", sa.DateTime(), nullable=False),
sa.Column("deleted", sa.DateTime(), nullable=True),
sa.Column("source", sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"speaker",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("name", sa.String(length=128), nullable=False),
sa.Column("avatar", sa.String(length=1024), nullable=False),
sa.Column("bio", sa.String(length=1024), nullable=False),
sa.Column("contact", sa.String(length=128), nullable=False),
sa.Column("role", sa.String(length=128), nullable=False),
sa.Column("location", sa.String(length=128), nullable=False),
sa.Column("created", sa.DateTime(), nullable=False),
sa.Column("updated", sa.DateTime(), nullable=False),
sa.Column("deleted", sa.DateTime(), nullable=True),
sa.Column("source", sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"topic",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("name", sa.String(length=128), nullable=False),
sa.Column("description", sa.String(length=1000), nullable=True),
sa.Column("abbreviation", sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"video",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column("name", sa.String(length=128), nullable=False),
sa.Column("url", sa.String(length=2048), nullable=False),
sa.Column("description", sa.String(length=50000), nullable=False),
sa.Column("created", sa.DateTime(), nullable=False),
sa.Column("updated", sa.DateTime(), nullable=False),
sa.Column("deleted", sa.DateTime(), nullable=True),
sa.Column("source", sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"channel_topic_association",
sa.Column("channel_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("topic_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["channel_id"], ["channel.id"]),
sa.ForeignKeyConstraint(["topic_id"], ["topic.id"]),
)
op.create_table(
"event_entry_association",
sa.Column("event_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("entry_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["entry_id"], ["entry.id"]),
sa.ForeignKeyConstraint(["event_id"], ["event.id"]),
)
op.create_table(
"event_meetup_association",
sa.Column("event_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("meetup_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["event_id"], ["event.id"]),
sa.ForeignKeyConstraint(["meetup_id"], ["meetup.id"]),
)
op.create_table(
"event_topic_association",
sa.Column("event_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("topic_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["event_id"], ["event.id"]),
sa.ForeignKeyConstraint(["topic_id"], ["topic.id"]),
)
op.create_table(
"meetup_channel_association",
sa.Column("meetup_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("channel_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["channel_id"], ["channel.id"]),
sa.ForeignKeyConstraint(["meetup_id"], ["meetup.id"]),
)
op.create_table(
"meetup_event_association",
sa.Column("meetup_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("event_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["event_id"], ["event.id"]),
sa.ForeignKeyConstraint(["meetup_id"], ["meetup.id"]),
)
op.create_table(
"meetup_topic_association",
sa.Column("meetup_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("topic_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["meetup_id"], ["meetup.id"]),
sa.ForeignKeyConstraint(["topic_id"], ["topic.id"]),
)
op.create_table(
"speaker_diversity_association",
sa.Column("speaker_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("diversity_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["diversity_id"], ["diversity.id"]),
sa.ForeignKeyConstraint(["speaker_id"], ["speaker.id"]),
)
op.create_table(
"speaker_topic_association",
sa.Column("speaker_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("topic_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["speaker_id"], ["speaker.id"]),
sa.ForeignKeyConstraint(["topic_id"], ["topic.id"]),
)
op.create_table(
"video_channel_association",
sa.Column("video_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("channel_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["channel_id"], ["channel.id"]),
sa.ForeignKeyConstraint(["video_id"], ["video.id"]),
)
op.create_table(
"video_topic_association",
sa.Column("video_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.Column("topic_id", postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(["topic_id"], ["topic.id"]),
sa.ForeignKeyConstraint(["video_id"], ["video.id"]),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("video_topic_association")
op.drop_table("video_channel_association")
op.drop_table("speaker_topic_association")
op.drop_table("speaker_diversity_association")
op.drop_table("meetup_topic_association")
op.drop_table("meetup_event_association")
op.drop_table("meetup_channel_association")
op.drop_table("event_topic_association")
op.drop_table("event_meetup_association")
op.drop_table("event_entry_association")
op.drop_table("channel_topic_association")
op.drop_table("video")
op.drop_table("topic")
op.drop_table("speaker")
op.drop_table("meetup")
op.drop_table("event")
op.drop_table("entry")
op.drop_table("diversity")
op.drop_table("channel")
# ### end Alembic commands ###
| 41.320158
| 80
| 0.623876
| 1,212
| 10,454
| 5.230198
| 0.078383
| 0.100962
| 0.115949
| 0.145764
| 0.873482
| 0.814009
| 0.775517
| 0.767313
| 0.718725
| 0.718725
| 0
| 0.017937
| 0.210733
| 10,454
| 252
| 81
| 41.484127
| 0.750333
| 0.027071
| 0
| 0.651064
| 0
| 0
| 0.171107
| 0.053483
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008511
| false
| 0
| 0.012766
| 0
| 0.021277
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48ea7a5c78b44d58ffa20048da1398281f5072a1
| 73,164
|
py
|
Python
|
gen/PSLexer.py
|
Na2CuCl4/latex2sympy
|
40f3b16ad13f8ab12d7704bb422cf8580b45b380
|
[
"MIT"
] | 26
|
2021-05-12T09:48:28.000Z
|
2022-03-31T08:33:57.000Z
|
gen/PSLexer.py
|
Na2CuCl4/latex2sympy
|
40f3b16ad13f8ab12d7704bb422cf8580b45b380
|
[
"MIT"
] | null | null | null |
gen/PSLexer.py
|
Na2CuCl4/latex2sympy
|
40f3b16ad13f8ab12d7704bb422cf8580b45b380
|
[
"MIT"
] | 3
|
2021-10-09T03:16:53.000Z
|
2022-02-18T13:23:40.000Z
|
# Generated from PS.g4 by ANTLR 4.7.2
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2")
buf.write(u"\u0087\u06c3\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6")
buf.write(u"\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t")
buf.write(u"\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4")
buf.write(u"\22\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27")
buf.write(u"\t\27\4\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t")
buf.write(u"\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"")
buf.write(u"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4")
buf.write(u"+\t+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62")
buf.write(u"\t\62\4\63\t\63\4\64\t\64\4\65\t\65\4\66\t\66\4\67\t")
buf.write(u"\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4")
buf.write(u"@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH")
buf.write(u"\4I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\t")
buf.write(u"Q\4R\tR\4S\tS\4T\tT\4U\tU\4V\tV\4W\tW\4X\tX\4Y\tY\4Z")
buf.write(u"\tZ\4[\t[\4\\\t\\\4]\t]\4^\t^\4_\t_\4`\t`\4a\ta\4b\t")
buf.write(u"b\4c\tc\4d\td\4e\te\4f\tf\4g\tg\4h\th\4i\ti\4j\tj\4k")
buf.write(u"\tk\4l\tl\4m\tm\4n\tn\4o\to\4p\tp\4q\tq\4r\tr\4s\ts\4")
buf.write(u"t\tt\4u\tu\4v\tv\4w\tw\4x\tx\4y\ty\4z\tz\4{\t{\4|\t|")
buf.write(u"\4}\t}\4~\t~\4\177\t\177\4\u0080\t\u0080\4\u0081\t\u0081")
buf.write(u"\4\u0082\t\u0082\4\u0083\t\u0083\4\u0084\t\u0084\4\u0085")
buf.write(u"\t\u0085\4\u0086\t\u0086\4\u0087\t\u0087\4\u0088\t\u0088")
buf.write(u"\4\u0089\t\u0089\4\u008a\t\u008a\4\u008b\t\u008b\4\u008c")
buf.write(u"\t\u008c\4\u008d\t\u008d\4\u008e\t\u008e\4\u008f\t\u008f")
buf.write(u"\4\u0090\t\u0090\4\u0091\t\u0091\3\2\3\2\3\2\3\3\3\3")
buf.write(u"\3\4\6\4\u012a\n\4\r\4\16\4\u012b\3\4\3\4\3\5\3\5\3\5")
buf.write(u"\3\5\3\5\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13")
buf.write(u"\3\13\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3\r\3\r")
buf.write(u"\3\r\3\r\3\r\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\20\3")
buf.write(u"\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22")
buf.write(u"\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3")
buf.write(u"\25\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27")
buf.write(u"\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\31\3")
buf.write(u"\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32")
buf.write(u"\3\32\3\32\3\33\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3")
buf.write(u"\34\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35")
buf.write(u"\3\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\36\3\36\3")
buf.write(u"\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37")
buf.write(u"\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3!")
buf.write(u"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3#\3#")
buf.write(u"\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\3$\3$\3%\3%\3%\3%\3%\3")
buf.write(u"%\3%\3&\3&\3&\3&\3&\3&\3&\3\'\3\'\3\'\3\'\3\'\3\'\3\'")
buf.write(u"\3\'\3(\3(\3(\3(\3(\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)")
buf.write(u"\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3")
buf.write(u")\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)\3)")
buf.write(u"\3)\3)\3)\3)\3)\3)\3)\3)\3)\5)\u0233\n)\3*\3*\3*\3*\3")
buf.write(u"*\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3.")
buf.write(u"\3.\3.\3.\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3\60\3\61")
buf.write(u"\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\62\3\62\3\63\3")
buf.write(u"\63\3\63\3\63\3\63\3\64\3\64\3\64\3\64\3\64\3\65\3\65")
buf.write(u"\3\65\3\65\3\65\3\66\3\66\3\66\3\66\3\66\3\66\3\66\3")
buf.write(u"\66\3\67\3\67\3\67\3\67\3\67\3\67\3\67\3\67\38\38\38")
buf.write(u"\38\38\38\38\38\39\39\39\39\39\39\39\39\3:\3:\3:\3:\3")
buf.write(u":\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3;\3<\3<\3<\3<\3<\3<")
buf.write(u"\3=\3=\3=\3=\3=\3=\3>\3>\3>\3>\3>\3>\3?\3?\3?\3?\3?\3")
buf.write(u"?\3?\3?\3@\3@\3@\3@\3@\3@\3@\3@\3A\3A\3A\3A\3A\3A\3A")
buf.write(u"\3A\3B\3B\3B\3B\3B\3B\3B\3B\3B\3C\3C\3C\3C\3C\3C\3C\3")
buf.write(u"C\3C\3D\3D\3D\3D\3D\3D\3D\3D\3D\3E\3E\3E\3E\3E\3E\3E")
buf.write(u"\3F\3F\3F\3F\3F\3F\3F\3F\3G\3G\3G\3G\3G\3G\3G\3H\3H\3")
buf.write(u"H\3H\3H\3H\3H\3H\3I\3I\3I\3I\3I\3I\3I\3J\3J\3J\3J\3J")
buf.write(u"\3J\3J\3J\3K\3K\3K\3K\3L\3L\3L\3L\3M\3M\3M\3M\3M\3M\3")
buf.write(u"N\3N\3N\3N\3N\3O\3O\3O\3O\3O\3O\3P\3P\3P\3P\3P\3Q\3Q")
buf.write(u"\3Q\3Q\3Q\3R\3R\3R\3R\3R\3R\3R\3S\3S\3S\3S\3S\3S\3T\3")
buf.write(u"T\3T\3T\3T\3U\3U\3U\3U\3U\3V\3V\3V\3V\3V\3V\3V\3W\3W")
buf.write(u"\3W\3W\3W\3W\3X\3X\3X\3X\3X\3Y\3Y\3Y\3Y\3Y\3Y\3Z\3Z\3")
buf.write(u"Z\3Z\3Z\3Z\3Z\3[\3[\3[\3[\3[\3[\3[\3[\3\\\3\\\3\\\3\\")
buf.write(u"\3\\\3]\3]\3]\3]\3]\3]\3]\3]\3^\3^\3^\3^\3^\3^\3^\3^")
buf.write(u"\3^\3^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3_\3`\3`\3`\3`\3")
buf.write(u"`\3`\3`\3`\3a\3a\3a\3a\3a\3a\3a\3a\3b\3b\3b\3b\3b\3b")
buf.write(u"\3b\3b\3c\3c\3c\5c\u03b1\nc\3d\3d\3d\3d\3d\3d\3d\3d\3")
buf.write(u"d\3d\3d\3e\3e\3e\3e\3e\3e\3e\3e\3e\3f\3f\3f\3f\3f\3f")
buf.write(u"\3f\3f\3f\3f\3f\3g\3g\3g\3g\3g\3g\3g\3g\3g\3h\3h\3i\3")
buf.write(u"i\3i\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j\3j")
buf.write(u"\3j\3j\3j\3j\3j\3j\3j\3j\5j\u03f8\nj\3k\3k\3k\3k\3k\3")
buf.write(u"k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k")
buf.write(u"\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\3k\5k\u041e\nk\3")
buf.write(u"l\3l\3m\3m\3m\3m\3m\3m\3m\3m\3m\3m\3n\3n\3n\3n\3n\3o")
buf.write(u"\3o\3p\3p\3q\3q\3r\3r\3s\3s\3t\3t\3u\3u\3v\3v\7v\u0441")
buf.write(u"\nv\fv\16v\u0444\13v\3v\3v\3v\6v\u0449\nv\rv\16v\u044a")
buf.write(u"\5v\u044d\nv\3w\3w\3w\3w\3w\3w\3w\3w\3w\3w\3w\3w\3w\3")
buf.write(u"w\5w\u045d\nw\3x\3x\3y\3y\3z\3z\3{\3{\3|\6|\u0468\n|")
buf.write(u"\r|\16|\u0469\3|\3|\3|\3|\3|\7|\u0471\n|\f|\16|\u0474")
buf.write(u"\13|\3|\7|\u0477\n|\f|\16|\u047a\13|\3|\3|\3|\3|\3|\7")
buf.write(u"|\u0481\n|\f|\16|\u0484\13|\3|\3|\6|\u0488\n|\r|\16|")
buf.write(u"\u0489\5|\u048c\n|\3}\3}\3}\3}\5}\u0492\n}\3}\6}\u0495")
buf.write(u"\n}\r}\16}\u0496\3~\3~\3\177\3\177\3\177\3\177\3\177")
buf.write(u"\3\177\3\177\3\177\5\177\u04a3\n\177\3\u0080\3\u0080")
buf.write(u"\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081")
buf.write(u"\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081\3\u0081")
buf.write(u"\3\u0081\3\u0081\5\u0081\u04b7\n\u0081\3\u0082\3\u0082")
buf.write(u"\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083")
buf.write(u"\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083\3\u0083")
buf.write(u"\3\u0083\3\u0083\5\u0083\u04cb\n\u0083\3\u0084\3\u0084")
buf.write(u"\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084")
buf.write(u"\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084")
buf.write(u"\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\3\u0084\5\u0084")
buf.write(u"\u04e3\n\u0084\3\u0085\3\u0085\3\u0086\3\u0086\3\u0086")
buf.write(u"\3\u0087\3\u0087\3\u0087\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088")
buf.write(u"\3\u0088\3\u0088\3\u0088\5\u0088\u066e\n\u0088\3\u0089")
buf.write(u"\3\u0089\5\u0089\u0672\n\u0089\3\u008a\3\u008a\3\u008a")
buf.write(u"\3\u008a\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b\3\u008b")
buf.write(u"\3\u008b\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c\3\u008c")
buf.write(u"\3\u008c\5\u008c\u0686\n\u008c\3\u008d\3\u008d\3\u008d")
buf.write(u"\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d")
buf.write(u"\3\u008e\3\u008e\3\u008e\5\u008e\u0695\n\u008e\3\u008f")
buf.write(u"\3\u008f\3\u008f\3\u008f\3\u008f\3\u008f\3\u008f\3\u008f")
buf.write(u"\3\u008f\3\u008f\3\u0090\3\u0090\3\u0090\6\u0090\u06a4")
buf.write(u"\n\u0090\r\u0090\16\u0090\u06a5\3\u0090\3\u0090\3\u0090")
buf.write(u"\3\u0090\3\u0090\3\u0090\6\u0090\u06ae\n\u0090\r\u0090")
buf.write(u"\16\u0090\u06af\3\u0090\3\u0090\3\u0090\3\u0090\3\u0090")
buf.write(u"\5\u0090\u06b7\n\u0090\5\u0090\u06b9\n\u0090\5\u0090")
buf.write(u"\u06bb\n\u0090\3\u0091\3\u0091\3\u0091\3\u0091\3\u0091")
buf.write(u"\5\u0091\u06c2\n\u0091\3\u0442\2\u0092\3\3\5\4\7\5\t")
buf.write(u"\6\13\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35")
buf.write(u"\20\37\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33")
buf.write(u"\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.")
buf.write(u"[/]\60_\61a\62c\63e\64g\65i\66k\67m8o9q:s;u<w=y>{?}@")
buf.write(u"\177A\u0081B\u0083C\u0085D\u0087E\u0089F\u008bG\u008d")
buf.write(u"H\u008fI\u0091J\u0093K\u0095L\u0097M\u0099N\u009bO\u009d")
buf.write(u"P\u009fQ\u00a1R\u00a3S\u00a5T\u00a7U\u00a9V\u00abW\u00ad")
buf.write(u"X\u00afY\u00b1Z\u00b3[\u00b5\\\u00b7]\u00b9^\u00bb_\u00bd")
buf.write(u"`\u00bfa\u00c1b\u00c3c\u00c5d\u00c7e\u00c9f\u00cbg\u00cd")
buf.write(u"h\u00cfi\u00d1j\u00d3k\u00d5l\u00d7m\u00d9n\u00dbo\u00dd")
buf.write(u"p\u00dfq\u00e1r\u00e3s\u00e5t\u00e7u\u00e9\2\u00ebv\u00ed")
buf.write(u"w\u00efx\u00f1y\u00f3\2\u00f5\2\u00f7z\u00f9{\u00fb|")
buf.write(u"\u00fd}\u00ff~\u0101\177\u0103\u0080\u0105\u0081\u0107")
buf.write(u"\u0082\u0109\u0083\u010b\2\u010d\u0084\u010f\2\u0111")
buf.write(u"\u0085\u0113\2\u0115\2\u0117\2\u0119\2\u011b\u0086\u011d")
buf.write(u"\2\u011f\2\u0121\u0087\3\2\b\5\2\13\f\17\17\"\"\4\2e")
buf.write(u"ett\4\2C\\c|\6\2CFH\\cfh|\3\2\62;\3\2\"\"\2\u071d\2\3")
buf.write(u"\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2")
buf.write(u"\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2")
buf.write(u"\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2")
buf.write(u"\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2")
buf.write(u"%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2")
buf.write(u"\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2")
buf.write(u"\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2")
buf.write(u"\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3")
buf.write(u"\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2")
buf.write(u"S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2")
buf.write(u"\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2")
buf.write(u"\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2")
buf.write(u"\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3")
buf.write(u"\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081\3")
buf.write(u"\2\2\2\2\u0083\3\2\2\2\2\u0085\3\2\2\2\2\u0087\3\2\2")
buf.write(u"\2\2\u0089\3\2\2\2\2\u008b\3\2\2\2\2\u008d\3\2\2\2\2")
buf.write(u"\u008f\3\2\2\2\2\u0091\3\2\2\2\2\u0093\3\2\2\2\2\u0095")
buf.write(u"\3\2\2\2\2\u0097\3\2\2\2\2\u0099\3\2\2\2\2\u009b\3\2")
buf.write(u"\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2\2\2\u00a1\3\2\2\2")
buf.write(u"\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7\3\2\2\2\2\u00a9")
buf.write(u"\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2\2\2\u00af\3\2")
buf.write(u"\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5\3\2\2\2")
buf.write(u"\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2\2\2\u00bd")
buf.write(u"\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3\3\2")
buf.write(u"\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2\2")
buf.write(u"\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\2\u00d1")
buf.write(u"\3\2\2\2\2\u00d3\3\2\2\2\2\u00d5\3\2\2\2\2\u00d7\3\2")
buf.write(u"\2\2\2\u00d9\3\2\2\2\2\u00db\3\2\2\2\2\u00dd\3\2\2\2")
buf.write(u"\2\u00df\3\2\2\2\2\u00e1\3\2\2\2\2\u00e3\3\2\2\2\2\u00e5")
buf.write(u"\3\2\2\2\2\u00e7\3\2\2\2\2\u00eb\3\2\2\2\2\u00ed\3\2")
buf.write(u"\2\2\2\u00ef\3\2\2\2\2\u00f1\3\2\2\2\2\u00f7\3\2\2\2")
buf.write(u"\2\u00f9\3\2\2\2\2\u00fb\3\2\2\2\2\u00fd\3\2\2\2\2\u00ff")
buf.write(u"\3\2\2\2\2\u0101\3\2\2\2\2\u0103\3\2\2\2\2\u0105\3\2")
buf.write(u"\2\2\2\u0107\3\2\2\2\2\u0109\3\2\2\2\2\u010d\3\2\2\2")
buf.write(u"\2\u0111\3\2\2\2\2\u011b\3\2\2\2\2\u0121\3\2\2\2\3\u0123")
buf.write(u"\3\2\2\2\5\u0126\3\2\2\2\7\u0129\3\2\2\2\t\u012f\3\2")
buf.write(u"\2\2\13\u0134\3\2\2\2\r\u0136\3\2\2\2\17\u0138\3\2\2")
buf.write(u"\2\21\u013a\3\2\2\2\23\u013c\3\2\2\2\25\u013e\3\2\2\2")
buf.write(u"\27\u0140\3\2\2\2\31\u0148\3\2\2\2\33\u0150\3\2\2\2\35")
buf.write(u"\u0152\3\2\2\2\37\u0154\3\2\2\2!\u0157\3\2\2\2#\u015a")
buf.write(u"\3\2\2\2%\u0162\3\2\2\2\'\u016a\3\2\2\2)\u016c\3\2\2")
buf.write(u"\2+\u016e\3\2\2\2-\u0176\3\2\2\2/\u017e\3\2\2\2\61\u0180")
buf.write(u"\3\2\2\2\63\u0187\3\2\2\2\65\u018e\3\2\2\2\67\u0194\3")
buf.write(u"\2\2\29\u019c\3\2\2\2;\u01a4\3\2\2\2=\u01ae\3\2\2\2?")
buf.write(u"\u01b8\3\2\2\2A\u01bf\3\2\2\2C\u01c6\3\2\2\2E\u01d0\3")
buf.write(u"\2\2\2G\u01da\3\2\2\2I\u01e0\3\2\2\2K\u01e7\3\2\2\2M")
buf.write(u"\u01ee\3\2\2\2O\u01f6\3\2\2\2Q\u0232\3\2\2\2S\u0234\3")
buf.write(u"\2\2\2U\u0239\3\2\2\2W\u023e\3\2\2\2Y\u0244\3\2\2\2[")
buf.write(u"\u0249\3\2\2\2]\u024d\3\2\2\2_\u0252\3\2\2\2a\u0257\3")
buf.write(u"\2\2\2c\u025c\3\2\2\2e\u0261\3\2\2\2g\u0266\3\2\2\2i")
buf.write(u"\u026b\3\2\2\2k\u0270\3\2\2\2m\u0278\3\2\2\2o\u0280\3")
buf.write(u"\2\2\2q\u0288\3\2\2\2s\u0290\3\2\2\2u\u0298\3\2\2\2w")
buf.write(u"\u02a0\3\2\2\2y\u02a6\3\2\2\2{\u02ac\3\2\2\2}\u02b2\3")
buf.write(u"\2\2\2\177\u02ba\3\2\2\2\u0081\u02c2\3\2\2\2\u0083\u02ca")
buf.write(u"\3\2\2\2\u0085\u02d3\3\2\2\2\u0087\u02dc\3\2\2\2\u0089")
buf.write(u"\u02e5\3\2\2\2\u008b\u02ec\3\2\2\2\u008d\u02f4\3\2\2")
buf.write(u"\2\u008f\u02fb\3\2\2\2\u0091\u0303\3\2\2\2\u0093\u030a")
buf.write(u"\3\2\2\2\u0095\u0312\3\2\2\2\u0097\u0316\3\2\2\2\u0099")
buf.write(u"\u031a\3\2\2\2\u009b\u0320\3\2\2\2\u009d\u0325\3\2\2")
buf.write(u"\2\u009f\u032b\3\2\2\2\u00a1\u0330\3\2\2\2\u00a3\u0335")
buf.write(u"\3\2\2\2\u00a5\u033c\3\2\2\2\u00a7\u0342\3\2\2\2\u00a9")
buf.write(u"\u0347\3\2\2\2\u00ab\u034c\3\2\2\2\u00ad\u0353\3\2\2")
buf.write(u"\2\u00af\u0359\3\2\2\2\u00b1\u035e\3\2\2\2\u00b3\u0364")
buf.write(u"\3\2\2\2\u00b5\u036b\3\2\2\2\u00b7\u0373\3\2\2\2\u00b9")
buf.write(u"\u0378\3\2\2\2\u00bb\u0380\3\2\2\2\u00bd\u038e\3\2\2")
buf.write(u"\2\u00bf\u0395\3\2\2\2\u00c1\u039d\3\2\2\2\u00c3\u03a5")
buf.write(u"\3\2\2\2\u00c5\u03b0\3\2\2\2\u00c7\u03b2\3\2\2\2\u00c9")
buf.write(u"\u03bd\3\2\2\2\u00cb\u03c6\3\2\2\2\u00cd\u03d1\3\2\2")
buf.write(u"\2\u00cf\u03da\3\2\2\2\u00d1\u03dc\3\2\2\2\u00d3\u03f7")
buf.write(u"\3\2\2\2\u00d5\u041d\3\2\2\2\u00d7\u041f\3\2\2\2\u00d9")
buf.write(u"\u0421\3\2\2\2\u00db\u042b\3\2\2\2\u00dd\u0430\3\2\2")
buf.write(u"\2\u00df\u0432\3\2\2\2\u00e1\u0434\3\2\2\2\u00e3\u0436")
buf.write(u"\3\2\2\2\u00e5\u0438\3\2\2\2\u00e7\u043a\3\2\2\2\u00e9")
buf.write(u"\u043c\3\2\2\2\u00eb\u043e\3\2\2\2\u00ed\u045c\3\2\2")
buf.write(u"\2\u00ef\u045e\3\2\2\2\u00f1\u0460\3\2\2\2\u00f3\u0462")
buf.write(u"\3\2\2\2\u00f5\u0464\3\2\2\2\u00f7\u048b\3\2\2\2\u00f9")
buf.write(u"\u048d\3\2\2\2\u00fb\u0498\3\2\2\2\u00fd\u04a2\3\2\2")
buf.write(u"\2\u00ff\u04a4\3\2\2\2\u0101\u04b6\3\2\2\2\u0103\u04b8")
buf.write(u"\3\2\2\2\u0105\u04ca\3\2\2\2\u0107\u04e2\3\2\2\2\u0109")
buf.write(u"\u04e4\3\2\2\2\u010b\u04e6\3\2\2\2\u010d\u04e9\3\2\2")
buf.write(u"\2\u010f\u066d\3\2\2\2\u0111\u066f\3\2\2\2\u0113\u0673")
buf.write(u"\3\2\2\2\u0115\u0677\3\2\2\2\u0117\u0685\3\2\2\2\u0119")
buf.write(u"\u0687\3\2\2\2\u011b\u0694\3\2\2\2\u011d\u0696\3\2\2")
buf.write(u"\2\u011f\u06a3\3\2\2\2\u0121\u06bc\3\2\2\2\u0123\u0124")
buf.write(u"\7`\2\2\u0124\u0125\7V\2\2\u0125\4\3\2\2\2\u0126\u0127")
buf.write(u"\7)\2\2\u0127\6\3\2\2\2\u0128\u012a\t\2\2\2\u0129\u0128")
buf.write(u"\3\2\2\2\u012a\u012b\3\2\2\2\u012b\u0129\3\2\2\2\u012b")
buf.write(u"\u012c\3\2\2\2\u012c\u012d\3\2\2\2\u012d\u012e\b\4\2")
buf.write(u"\2\u012e\b\3\2\2\2\u012f\u0130\7^\2\2\u0130\u0131\7&")
buf.write(u"\2\2\u0131\u0132\3\2\2\2\u0132\u0133\b\5\2\2\u0133\n")
buf.write(u"\3\2\2\2\u0134\u0135\7-\2\2\u0135\f\3\2\2\2\u0136\u0137")
buf.write(u"\7/\2\2\u0137\16\3\2\2\2\u0138\u0139\7,\2\2\u0139\20")
buf.write(u"\3\2\2\2\u013a\u013b\7\61\2\2\u013b\22\3\2\2\2\u013c")
buf.write(u"\u013d\7*\2\2\u013d\24\3\2\2\2\u013e\u013f\7+\2\2\u013f")
buf.write(u"\26\3\2\2\2\u0140\u0141\7^\2\2\u0141\u0142\7n\2\2\u0142")
buf.write(u"\u0143\7i\2\2\u0143\u0144\7t\2\2\u0144\u0145\7q\2\2\u0145")
buf.write(u"\u0146\7w\2\2\u0146\u0147\7r\2\2\u0147\30\3\2\2\2\u0148")
buf.write(u"\u0149\7^\2\2\u0149\u014a\7t\2\2\u014a\u014b\7i\2\2\u014b")
buf.write(u"\u014c\7t\2\2\u014c\u014d\7q\2\2\u014d\u014e\7w\2\2\u014e")
buf.write(u"\u014f\7r\2\2\u014f\32\3\2\2\2\u0150\u0151\7}\2\2\u0151")
buf.write(u"\34\3\2\2\2\u0152\u0153\7\177\2\2\u0153\36\3\2\2\2\u0154")
buf.write(u"\u0155\7^\2\2\u0155\u0156\7}\2\2\u0156 \3\2\2\2\u0157")
buf.write(u"\u0158\7^\2\2\u0158\u0159\7\177\2\2\u0159\"\3\2\2\2\u015a")
buf.write(u"\u015b\7^\2\2\u015b\u015c\7n\2\2\u015c\u015d\7d\2\2\u015d")
buf.write(u"\u015e\7t\2\2\u015e\u015f\7c\2\2\u015f\u0160\7e\2\2\u0160")
buf.write(u"\u0161\7g\2\2\u0161$\3\2\2\2\u0162\u0163\7^\2\2\u0163")
buf.write(u"\u0164\7t\2\2\u0164\u0165\7d\2\2\u0165\u0166\7t\2\2\u0166")
buf.write(u"\u0167\7c\2\2\u0167\u0168\7e\2\2\u0168\u0169\7g\2\2\u0169")
buf.write(u"&\3\2\2\2\u016a\u016b\7]\2\2\u016b(\3\2\2\2\u016c\u016d")
buf.write(u"\7_\2\2\u016d*\3\2\2\2\u016e\u016f\7^\2\2\u016f\u0170")
buf.write(u"\7n\2\2\u0170\u0171\7d\2\2\u0171\u0172\7t\2\2\u0172\u0173")
buf.write(u"\7c\2\2\u0173\u0174\7e\2\2\u0174\u0175\7m\2\2\u0175,")
buf.write(u"\3\2\2\2\u0176\u0177\7^\2\2\u0177\u0178\7t\2\2\u0178")
buf.write(u"\u0179\7d\2\2\u0179\u017a\7t\2\2\u017a\u017b\7c\2\2\u017b")
buf.write(u"\u017c\7e\2\2\u017c\u017d\7m\2\2\u017d.\3\2\2\2\u017e")
buf.write(u"\u017f\7~\2\2\u017f\60\3\2\2\2\u0180\u0181\7^\2\2\u0181")
buf.write(u"\u0182\7n\2\2\u0182\u0183\7x\2\2\u0183\u0184\7g\2\2\u0184")
buf.write(u"\u0185\7t\2\2\u0185\u0186\7v\2\2\u0186\62\3\2\2\2\u0187")
buf.write(u"\u0188\7^\2\2\u0188\u0189\7t\2\2\u0189\u018a\7x\2\2\u018a")
buf.write(u"\u018b\7g\2\2\u018b\u018c\7t\2\2\u018c\u018d\7v\2\2\u018d")
buf.write(u"\64\3\2\2\2\u018e\u018f\7^\2\2\u018f\u0190\7x\2\2\u0190")
buf.write(u"\u0191\7g\2\2\u0191\u0192\7t\2\2\u0192\u0193\7v\2\2\u0193")
buf.write(u"\66\3\2\2\2\u0194\u0195\7^\2\2\u0195\u0196\7n\2\2\u0196")
buf.write(u"\u0197\7h\2\2\u0197\u0198\7n\2\2\u0198\u0199\7q\2\2\u0199")
buf.write(u"\u019a\7q\2\2\u019a\u019b\7t\2\2\u019b8\3\2\2\2\u019c")
buf.write(u"\u019d\7^\2\2\u019d\u019e\7t\2\2\u019e\u019f\7h\2\2\u019f")
buf.write(u"\u01a0\7n\2\2\u01a0\u01a1\7q\2\2\u01a1\u01a2\7q\2\2\u01a2")
buf.write(u"\u01a3\7t\2\2\u01a3:\3\2\2\2\u01a4\u01a5\7^\2\2\u01a5")
buf.write(u"\u01a6\7n\2\2\u01a6\u01a7\7n\2\2\u01a7\u01a8\7e\2\2\u01a8")
buf.write(u"\u01a9\7q\2\2\u01a9\u01aa\7t\2\2\u01aa\u01ab\7p\2\2\u01ab")
buf.write(u"\u01ac\7g\2\2\u01ac\u01ad\7t\2\2\u01ad<\3\2\2\2\u01ae")
buf.write(u"\u01af\7^\2\2\u01af\u01b0\7n\2\2\u01b0\u01b1\7t\2\2\u01b1")
buf.write(u"\u01b2\7e\2\2\u01b2\u01b3\7q\2\2\u01b3\u01b4\7t\2\2\u01b4")
buf.write(u"\u01b5\7p\2\2\u01b5\u01b6\7g\2\2\u01b6\u01b7\7t\2\2\u01b7")
buf.write(u">\3\2\2\2\u01b8\u01b9\7^\2\2\u01b9\u01ba\7n\2\2\u01ba")
buf.write(u"\u01bb\7e\2\2\u01bb\u01bc\7g\2\2\u01bc\u01bd\7k\2\2\u01bd")
buf.write(u"\u01be\7n\2\2\u01be@\3\2\2\2\u01bf\u01c0\7^\2\2\u01c0")
buf.write(u"\u01c1\7t\2\2\u01c1\u01c2\7e\2\2\u01c2\u01c3\7g\2\2\u01c3")
buf.write(u"\u01c4\7k\2\2\u01c4\u01c5\7n\2\2\u01c5B\3\2\2\2\u01c6")
buf.write(u"\u01c7\7^\2\2\u01c7\u01c8\7w\2\2\u01c8\u01c9\7n\2\2\u01c9")
buf.write(u"\u01ca\7e\2\2\u01ca\u01cb\7q\2\2\u01cb\u01cc\7t\2\2\u01cc")
buf.write(u"\u01cd\7p\2\2\u01cd\u01ce\7g\2\2\u01ce\u01cf\7t\2\2\u01cf")
buf.write(u"D\3\2\2\2\u01d0\u01d1\7^\2\2\u01d1\u01d2\7w\2\2\u01d2")
buf.write(u"\u01d3\7t\2\2\u01d3\u01d4\7e\2\2\u01d4\u01d5\7q\2\2\u01d5")
buf.write(u"\u01d6\7t\2\2\u01d6\u01d7\7p\2\2\u01d7\u01d8\7g\2\2\u01d8")
buf.write(u"\u01d9\7t\2\2\u01d9F\3\2\2\2\u01da\u01db\7^\2\2\u01db")
buf.write(u"\u01dc\7n\2\2\u01dc\u01dd\7g\2\2\u01dd\u01de\7h\2\2\u01de")
buf.write(u"\u01df\7v\2\2\u01dfH\3\2\2\2\u01e0\u01e1\7^\2\2\u01e1")
buf.write(u"\u01e2\7t\2\2\u01e2\u01e3\7k\2\2\u01e3\u01e4\7i\2\2\u01e4")
buf.write(u"\u01e5\7j\2\2\u01e5\u01e6\7v\2\2\u01e6J\3\2\2\2\u01e7")
buf.write(u"\u01e8\7^\2\2\u01e8\u01e9\7o\2\2\u01e9\u01ea\7n\2\2\u01ea")
buf.write(u"\u01eb\7g\2\2\u01eb\u01ec\7h\2\2\u01ec\u01ed\7v\2\2\u01ed")
buf.write(u"L\3\2\2\2\u01ee\u01ef\7^\2\2\u01ef\u01f0\7o\2\2\u01f0")
buf.write(u"\u01f1\7t\2\2\u01f1\u01f2\7k\2\2\u01f2\u01f3\7i\2\2\u01f3")
buf.write(u"\u01f4\7j\2\2\u01f4\u01f5\7v\2\2\u01f5N\3\2\2\2\u01f6")
buf.write(u"\u01f7\7^\2\2\u01f7\u01f8\7n\2\2\u01f8\u01f9\7k\2\2\u01f9")
buf.write(u"\u01fa\7o\2\2\u01faP\3\2\2\2\u01fb\u01fc\7^\2\2\u01fc")
buf.write(u"\u01fd\7v\2\2\u01fd\u0233\7q\2\2\u01fe\u01ff\7^\2\2\u01ff")
buf.write(u"\u0200\7t\2\2\u0200\u0201\7k\2\2\u0201\u0202\7i\2\2\u0202")
buf.write(u"\u0203\7j\2\2\u0203\u0204\7v\2\2\u0204\u0205\7c\2\2\u0205")
buf.write(u"\u0206\7t\2\2\u0206\u0207\7t\2\2\u0207\u0208\7q\2\2\u0208")
buf.write(u"\u0233\7y\2\2\u0209\u020a\7^\2\2\u020a\u020b\7T\2\2\u020b")
buf.write(u"\u020c\7k\2\2\u020c\u020d\7i\2\2\u020d\u020e\7j\2\2\u020e")
buf.write(u"\u020f\7v\2\2\u020f\u0210\7c\2\2\u0210\u0211\7t\2\2\u0211")
buf.write(u"\u0212\7t\2\2\u0212\u0213\7q\2\2\u0213\u0233\7y\2\2\u0214")
buf.write(u"\u0215\7^\2\2\u0215\u0216\7n\2\2\u0216\u0217\7q\2\2\u0217")
buf.write(u"\u0218\7p\2\2\u0218\u0219\7i\2\2\u0219\u021a\7t\2\2\u021a")
buf.write(u"\u021b\7k\2\2\u021b\u021c\7i\2\2\u021c\u021d\7j\2\2\u021d")
buf.write(u"\u021e\7v\2\2\u021e\u021f\7c\2\2\u021f\u0220\7t\2\2\u0220")
buf.write(u"\u0221\7t\2\2\u0221\u0222\7q\2\2\u0222\u0233\7y\2\2\u0223")
buf.write(u"\u0224\7^\2\2\u0224\u0225\7N\2\2\u0225\u0226\7q\2\2\u0226")
buf.write(u"\u0227\7p\2\2\u0227\u0228\7i\2\2\u0228\u0229\7t\2\2\u0229")
buf.write(u"\u022a\7k\2\2\u022a\u022b\7i\2\2\u022b\u022c\7j\2\2\u022c")
buf.write(u"\u022d\7v\2\2\u022d\u022e\7c\2\2\u022e\u022f\7t\2\2\u022f")
buf.write(u"\u0230\7t\2\2\u0230\u0231\7q\2\2\u0231\u0233\7y\2\2\u0232")
buf.write(u"\u01fb\3\2\2\2\u0232\u01fe\3\2\2\2\u0232\u0209\3\2\2")
buf.write(u"\2\u0232\u0214\3\2\2\2\u0232\u0223\3\2\2\2\u0233R\3\2")
buf.write(u"\2\2\u0234\u0235\7^\2\2\u0235\u0236\7k\2\2\u0236\u0237")
buf.write(u"\7p\2\2\u0237\u0238\7v\2\2\u0238T\3\2\2\2\u0239\u023a")
buf.write(u"\7^\2\2\u023a\u023b\7u\2\2\u023b\u023c\7w\2\2\u023c\u023d")
buf.write(u"\7o\2\2\u023dV\3\2\2\2\u023e\u023f\7^\2\2\u023f\u0240")
buf.write(u"\7r\2\2\u0240\u0241\7t\2\2\u0241\u0242\7q\2\2\u0242\u0243")
buf.write(u"\7f\2\2\u0243X\3\2\2\2\u0244\u0245\7^\2\2\u0245\u0246")
buf.write(u"\7n\2\2\u0246\u0247\7q\2\2\u0247\u0248\7i\2\2\u0248Z")
buf.write(u"\3\2\2\2\u0249\u024a\7^\2\2\u024a\u024b\7n\2\2\u024b")
buf.write(u"\u024c\7p\2\2\u024c\\\3\2\2\2\u024d\u024e\7^\2\2\u024e")
buf.write(u"\u024f\7g\2\2\u024f\u0250\7z\2\2\u0250\u0251\7r\2\2\u0251")
buf.write(u"^\3\2\2\2\u0252\u0253\7^\2\2\u0253\u0254\7u\2\2\u0254")
buf.write(u"\u0255\7k\2\2\u0255\u0256\7p\2\2\u0256`\3\2\2\2\u0257")
buf.write(u"\u0258\7^\2\2\u0258\u0259\7e\2\2\u0259\u025a\7q\2\2\u025a")
buf.write(u"\u025b\7u\2\2\u025bb\3\2\2\2\u025c\u025d\7^\2\2\u025d")
buf.write(u"\u025e\7v\2\2\u025e\u025f\7c\2\2\u025f\u0260\7p\2\2\u0260")
buf.write(u"d\3\2\2\2\u0261\u0262\7^\2\2\u0262\u0263\7e\2\2\u0263")
buf.write(u"\u0264\7u\2\2\u0264\u0265\7e\2\2\u0265f\3\2\2\2\u0266")
buf.write(u"\u0267\7^\2\2\u0267\u0268\7u\2\2\u0268\u0269\7g\2\2\u0269")
buf.write(u"\u026a\7e\2\2\u026ah\3\2\2\2\u026b\u026c\7^\2\2\u026c")
buf.write(u"\u026d\7e\2\2\u026d\u026e\7q\2\2\u026e\u026f\7v\2\2\u026f")
buf.write(u"j\3\2\2\2\u0270\u0271\7^\2\2\u0271\u0272\7c\2\2\u0272")
buf.write(u"\u0273\7t\2\2\u0273\u0274\7e\2\2\u0274\u0275\7u\2\2\u0275")
buf.write(u"\u0276\7k\2\2\u0276\u0277\7p\2\2\u0277l\3\2\2\2\u0278")
buf.write(u"\u0279\7^\2\2\u0279\u027a\7c\2\2\u027a\u027b\7t\2\2\u027b")
buf.write(u"\u027c\7e\2\2\u027c\u027d\7e\2\2\u027d\u027e\7q\2\2\u027e")
buf.write(u"\u027f\7u\2\2\u027fn\3\2\2\2\u0280\u0281\7^\2\2\u0281")
buf.write(u"\u0282\7c\2\2\u0282\u0283\7t\2\2\u0283\u0284\7e\2\2\u0284")
buf.write(u"\u0285\7v\2\2\u0285\u0286\7c\2\2\u0286\u0287\7p\2\2\u0287")
buf.write(u"p\3\2\2\2\u0288\u0289\7^\2\2\u0289\u028a\7c\2\2\u028a")
buf.write(u"\u028b\7t\2\2\u028b\u028c\7e\2\2\u028c\u028d\7e\2\2\u028d")
buf.write(u"\u028e\7u\2\2\u028e\u028f\7e\2\2\u028fr\3\2\2\2\u0290")
buf.write(u"\u0291\7^\2\2\u0291\u0292\7c\2\2\u0292\u0293\7t\2\2\u0293")
buf.write(u"\u0294\7e\2\2\u0294\u0295\7u\2\2\u0295\u0296\7g\2\2\u0296")
buf.write(u"\u0297\7e\2\2\u0297t\3\2\2\2\u0298\u0299\7^\2\2\u0299")
buf.write(u"\u029a\7c\2\2\u029a\u029b\7t\2\2\u029b\u029c\7e\2\2\u029c")
buf.write(u"\u029d\7e\2\2\u029d\u029e\7q\2\2\u029e\u029f\7v\2\2\u029f")
buf.write(u"v\3\2\2\2\u02a0\u02a1\7^\2\2\u02a1\u02a2\7u\2\2\u02a2")
buf.write(u"\u02a3\7k\2\2\u02a3\u02a4\7p\2\2\u02a4\u02a5\7j\2\2\u02a5")
buf.write(u"x\3\2\2\2\u02a6\u02a7\7^\2\2\u02a7\u02a8\7e\2\2\u02a8")
buf.write(u"\u02a9\7q\2\2\u02a9\u02aa\7u\2\2\u02aa\u02ab\7j\2\2\u02ab")
buf.write(u"z\3\2\2\2\u02ac\u02ad\7^\2\2\u02ad\u02ae\7v\2\2\u02ae")
buf.write(u"\u02af\7c\2\2\u02af\u02b0\7p\2\2\u02b0\u02b1\7j\2\2\u02b1")
buf.write(u"|\3\2\2\2\u02b2\u02b3\7^\2\2\u02b3\u02b4\7c\2\2\u02b4")
buf.write(u"\u02b5\7t\2\2\u02b5\u02b6\7u\2\2\u02b6\u02b7\7k\2\2\u02b7")
buf.write(u"\u02b8\7p\2\2\u02b8\u02b9\7j\2\2\u02b9~\3\2\2\2\u02ba")
buf.write(u"\u02bb\7^\2\2\u02bb\u02bc\7c\2\2\u02bc\u02bd\7t\2\2\u02bd")
buf.write(u"\u02be\7e\2\2\u02be\u02bf\7q\2\2\u02bf\u02c0\7u\2\2\u02c0")
buf.write(u"\u02c1\7j\2\2\u02c1\u0080\3\2\2\2\u02c2\u02c3\7^\2\2")
buf.write(u"\u02c3\u02c4\7c\2\2\u02c4\u02c5\7t\2\2\u02c5\u02c6\7")
buf.write(u"v\2\2\u02c6\u02c7\7c\2\2\u02c7\u02c8\7p\2\2\u02c8\u02c9")
buf.write(u"\7j\2\2\u02c9\u0082\3\2\2\2\u02ca\u02cb\7^\2\2\u02cb")
buf.write(u"\u02cc\7c\2\2\u02cc\u02cd\7t\2\2\u02cd\u02ce\7e\2\2\u02ce")
buf.write(u"\u02cf\7u\2\2\u02cf\u02d0\7k\2\2\u02d0\u02d1\7p\2\2\u02d1")
buf.write(u"\u02d2\7j\2\2\u02d2\u0084\3\2\2\2\u02d3\u02d4\7^\2\2")
buf.write(u"\u02d4\u02d5\7c\2\2\u02d5\u02d6\7t\2\2\u02d6\u02d7\7")
buf.write(u"e\2\2\u02d7\u02d8\7e\2\2\u02d8\u02d9\7q\2\2\u02d9\u02da")
buf.write(u"\7u\2\2\u02da\u02db\7j\2\2\u02db\u0086\3\2\2\2\u02dc")
buf.write(u"\u02dd\7^\2\2\u02dd\u02de\7c\2\2\u02de\u02df\7t\2\2\u02df")
buf.write(u"\u02e0\7e\2\2\u02e0\u02e1\7v\2\2\u02e1\u02e2\7c\2\2\u02e2")
buf.write(u"\u02e3\7p\2\2\u02e3\u02e4\7j\2\2\u02e4\u0088\3\2\2\2")
buf.write(u"\u02e5\u02e6\7c\2\2\u02e6\u02e7\7t\2\2\u02e7\u02e8\7")
buf.write(u"u\2\2\u02e8\u02e9\7k\2\2\u02e9\u02ea\7p\2\2\u02ea\u02eb")
buf.write(u"\7j\2\2\u02eb\u008a\3\2\2\2\u02ec\u02ed\7c\2\2\u02ed")
buf.write(u"\u02ee\7t\2\2\u02ee\u02ef\7e\2\2\u02ef\u02f0\7u\2\2\u02f0")
buf.write(u"\u02f1\7k\2\2\u02f1\u02f2\7p\2\2\u02f2\u02f3\7j\2\2\u02f3")
buf.write(u"\u008c\3\2\2\2\u02f4\u02f5\7c\2\2\u02f5\u02f6\7t\2\2")
buf.write(u"\u02f6\u02f7\7e\2\2\u02f7\u02f8\7q\2\2\u02f8\u02f9\7")
buf.write(u"u\2\2\u02f9\u02fa\7j\2\2\u02fa\u008e\3\2\2\2\u02fb\u02fc")
buf.write(u"\7c\2\2\u02fc\u02fd\7t\2\2\u02fd\u02fe\7e\2\2\u02fe\u02ff")
buf.write(u"\7e\2\2\u02ff\u0300\7q\2\2\u0300\u0301\7u\2\2\u0301\u0302")
buf.write(u"\7j\2\2\u0302\u0090\3\2\2\2\u0303\u0304\7c\2\2\u0304")
buf.write(u"\u0305\7t\2\2\u0305\u0306\7v\2\2\u0306\u0307\7c\2\2\u0307")
buf.write(u"\u0308\7p\2\2\u0308\u0309\7j\2\2\u0309\u0092\3\2\2\2")
buf.write(u"\u030a\u030b\7c\2\2\u030b\u030c\7t\2\2\u030c\u030d\7")
buf.write(u"e\2\2\u030d\u030e\7v\2\2\u030e\u030f\7c\2\2\u030f\u0310")
buf.write(u"\7p\2\2\u0310\u0311\7j\2\2\u0311\u0094\3\2\2\2\u0312")
buf.write(u"\u0313\7i\2\2\u0313\u0314\7e\2\2\u0314\u0315\7f\2\2\u0315")
buf.write(u"\u0096\3\2\2\2\u0316\u0317\7n\2\2\u0317\u0318\7e\2\2")
buf.write(u"\u0318\u0319\7o\2\2\u0319\u0098\3\2\2\2\u031a\u031b\7")
buf.write(u"h\2\2\u031b\u031c\7n\2\2\u031c\u031d\7q\2\2\u031d\u031e")
buf.write(u"\7q\2\2\u031e\u031f\7t\2\2\u031f\u009a\3\2\2\2\u0320")
buf.write(u"\u0321\7e\2\2\u0321\u0322\7g\2\2\u0322\u0323\7k\2\2\u0323")
buf.write(u"\u0324\7n\2\2\u0324\u009c\3\2\2\2\u0325\u0326\7^\2\2")
buf.write(u"\u0326\u0327\7u\2\2\u0327\u0328\7s\2\2\u0328\u0329\7")
buf.write(u"t\2\2\u0329\u032a\7v\2\2\u032a\u009e\3\2\2\2\u032b\u032c")
buf.write(u"\7^\2\2\u032c\u032d\7i\2\2\u032d\u032e\7e\2\2\u032e\u032f")
buf.write(u"\7f\2\2\u032f\u00a0\3\2\2\2\u0330\u0331\7^\2\2\u0331")
buf.write(u"\u0332\7n\2\2\u0332\u0333\7e\2\2\u0333\u0334\7o\2\2\u0334")
buf.write(u"\u00a2\3\2\2\2\u0335\u0336\7^\2\2\u0336\u0337\7h\2\2")
buf.write(u"\u0337\u0338\7n\2\2\u0338\u0339\7q\2\2\u0339\u033a\7")
buf.write(u"q\2\2\u033a\u033b\7t\2\2\u033b\u00a4\3\2\2\2\u033c\u033d")
buf.write(u"\7^\2\2\u033d\u033e\7e\2\2\u033e\u033f\7g\2\2\u033f\u0340")
buf.write(u"\7k\2\2\u0340\u0341\7n\2\2\u0341\u00a6\3\2\2\2\u0342")
buf.write(u"\u0343\7^\2\2\u0343\u0344\7o\2\2\u0344\u0345\7c\2\2\u0345")
buf.write(u"\u0346\7z\2\2\u0346\u00a8\3\2\2\2\u0347\u0348\7^\2\2")
buf.write(u"\u0348\u0349\7o\2\2\u0349\u034a\7k\2\2\u034a\u034b\7")
buf.write(u"p\2\2\u034b\u00aa\3\2\2\2\u034c\u034d\7^\2\2\u034d\u034e")
buf.write(u"\7v\2\2\u034e\u034f\7k\2\2\u034f\u0350\7o\2\2\u0350\u0351")
buf.write(u"\7g\2\2\u0351\u0352\7u\2\2\u0352\u00ac\3\2\2\2\u0353")
buf.write(u"\u0354\7^\2\2\u0354\u0355\7e\2\2\u0355\u0356\7f\2\2\u0356")
buf.write(u"\u0357\7q\2\2\u0357\u0358\7v\2\2\u0358\u00ae\3\2\2\2")
buf.write(u"\u0359\u035a\7^\2\2\u035a\u035b\7f\2\2\u035b\u035c\7")
buf.write(u"k\2\2\u035c\u035d\7x\2\2\u035d\u00b0\3\2\2\2\u035e\u035f")
buf.write(u"\7^\2\2\u035f\u0360\7h\2\2\u0360\u0361\7t\2\2\u0361\u0362")
buf.write(u"\7c\2\2\u0362\u0363\7e\2\2\u0363\u00b2\3\2\2\2\u0364")
buf.write(u"\u0365\7^\2\2\u0365\u0366\7d\2\2\u0366\u0367\7k\2\2\u0367")
buf.write(u"\u0368\7p\2\2\u0368\u0369\7q\2\2\u0369\u036a\7o\2\2\u036a")
buf.write(u"\u00b4\3\2\2\2\u036b\u036c\7^\2\2\u036c\u036d\7e\2\2")
buf.write(u"\u036d\u036e\7j\2\2\u036e\u036f\7q\2\2\u036f\u0370\7")
buf.write(u"q\2\2\u0370\u0371\7u\2\2\u0371\u0372\7g\2\2\u0372\u00b6")
buf.write(u"\3\2\2\2\u0373\u0374\7^\2\2\u0374\u0375\7o\2\2\u0375")
buf.write(u"\u0376\7q\2\2\u0376\u0377\7f\2\2\u0377\u00b8\3\2\2\2")
buf.write(u"\u0378\u0379\7^\2\2\u0379\u037a\7o\2\2\u037a\u037b\7")
buf.write(u"c\2\2\u037b\u037c\7v\2\2\u037c\u037d\7j\2\2\u037d\u037e")
buf.write(u"\7k\2\2\u037e\u037f\7v\2\2\u037f\u00ba\3\2\2\2\u0380")
buf.write(u"\u0381\7^\2\2\u0381\u0382\7q\2\2\u0382\u0383\7r\2\2\u0383")
buf.write(u"\u0384\7g\2\2\u0384\u0385\7t\2\2\u0385\u0386\7c\2\2\u0386")
buf.write(u"\u0387\7v\2\2\u0387\u0388\7q\2\2\u0388\u0389\7t\2\2\u0389")
buf.write(u"\u038a\7p\2\2\u038a\u038b\7c\2\2\u038b\u038c\7o\2\2\u038c")
buf.write(u"\u038d\7g\2\2\u038d\u00bc\3\2\2\2\u038e\u038f\7o\2\2")
buf.write(u"\u038f\u0390\7c\2\2\u0390\u0391\7v\2\2\u0391\u0392\7")
buf.write(u"t\2\2\u0392\u0393\7k\2\2\u0393\u0394\7z\2\2\u0394\u00be")
buf.write(u"\3\2\2\2\u0395\u0396\7r\2\2\u0396\u0397\7o\2\2\u0397")
buf.write(u"\u0398\7c\2\2\u0398\u0399\7v\2\2\u0399\u039a\7t\2\2\u039a")
buf.write(u"\u039b\7k\2\2\u039b\u039c\7z\2\2\u039c\u00c0\3\2\2\2")
buf.write(u"\u039d\u039e\7d\2\2\u039e\u039f\7o\2\2\u039f\u03a0\7")
buf.write(u"c\2\2\u03a0\u03a1\7v\2\2\u03a1\u03a2\7t\2\2\u03a2\u03a3")
buf.write(u"\7k\2\2\u03a3\u03a4\7z\2\2\u03a4\u00c2\3\2\2\2\u03a5")
buf.write(u"\u03a6\7x\2\2\u03a6\u03a7\7o\2\2\u03a7\u03a8\7c\2\2\u03a8")
buf.write(u"\u03a9\7v\2\2\u03a9\u03aa\7t\2\2\u03aa\u03ab\7k\2\2\u03ab")
buf.write(u"\u03ac\7z\2\2\u03ac\u00c4\3\2\2\2\u03ad\u03b1\5\u00bd")
buf.write(u"_\2\u03ae\u03b1\5\u00bf`\2\u03af\u03b1\5\u00c1a\2\u03b0")
buf.write(u"\u03ad\3\2\2\2\u03b0\u03ae\3\2\2\2\u03b0\u03af\3\2\2")
buf.write(u"\2\u03b1\u00c6\3\2\2\2\u03b2\u03b3\7^\2\2\u03b3\u03b4")
buf.write(u"\7d\2\2\u03b4\u03b5\7g\2\2\u03b5\u03b6\7i\2\2\u03b6\u03b7")
buf.write(u"\7k\2\2\u03b7\u03b8\7p\2\2\u03b8\u03b9\3\2\2\2\u03b9")
buf.write(u"\u03ba\5\33\16\2\u03ba\u03bb\5\u00c5c\2\u03bb\u03bc\5")
buf.write(u"\35\17\2\u03bc\u00c8\3\2\2\2\u03bd\u03be\7^\2\2\u03be")
buf.write(u"\u03bf\7g\2\2\u03bf\u03c0\7p\2\2\u03c0\u03c1\7f\2\2\u03c1")
buf.write(u"\u03c2\3\2\2\2\u03c2\u03c3\5\33\16\2\u03c3\u03c4\5\u00c5")
buf.write(u"c\2\u03c4\u03c5\5\35\17\2\u03c5\u00ca\3\2\2\2\u03c6\u03c7")
buf.write(u"\7^\2\2\u03c7\u03c8\7d\2\2\u03c8\u03c9\7g\2\2\u03c9\u03ca")
buf.write(u"\7i\2\2\u03ca\u03cb\7k\2\2\u03cb\u03cc\7p\2\2\u03cc\u03cd")
buf.write(u"\3\2\2\2\u03cd\u03ce\5\33\16\2\u03ce\u03cf\5\u00c3b\2")
buf.write(u"\u03cf\u03d0\5\35\17\2\u03d0\u00cc\3\2\2\2\u03d1\u03d2")
buf.write(u"\7^\2\2\u03d2\u03d3\7g\2\2\u03d3\u03d4\7p\2\2\u03d4\u03d5")
buf.write(u"\7f\2\2\u03d5\u03d6\3\2\2\2\u03d6\u03d7\5\33\16\2\u03d7")
buf.write(u"\u03d8\5\u00c3b\2\u03d8\u03d9\5\35\17\2\u03d9\u00ce\3")
buf.write(u"\2\2\2\u03da\u03db\7(\2\2\u03db\u00d0\3\2\2\2\u03dc\u03dd")
buf.write(u"\7^\2\2\u03dd\u03de\7^\2\2\u03de\u00d2\3\2\2\2\u03df")
buf.write(u"\u03e0\7^\2\2\u03e0\u03e1\7z\2\2\u03e1\u03e2\7t\2\2\u03e2")
buf.write(u"\u03e3\7k\2\2\u03e3\u03e4\7i\2\2\u03e4\u03e5\7j\2\2\u03e5")
buf.write(u"\u03e6\7v\2\2\u03e6\u03e7\7c\2\2\u03e7\u03e8\7t\2\2\u03e8")
buf.write(u"\u03e9\7t\2\2\u03e9\u03ea\7q\2\2\u03ea\u03f8\7y\2\2\u03eb")
buf.write(u"\u03ec\7^\2\2\u03ec\u03ed\7z\2\2\u03ed\u03ee\7T\2\2\u03ee")
buf.write(u"\u03ef\7k\2\2\u03ef\u03f0\7i\2\2\u03f0\u03f1\7j\2\2\u03f1")
buf.write(u"\u03f2\7v\2\2\u03f2\u03f3\7c\2\2\u03f3\u03f4\7t\2\2\u03f4")
buf.write(u"\u03f5\7t\2\2\u03f5\u03f6\7q\2\2\u03f6\u03f8\7y\2\2\u03f7")
buf.write(u"\u03df\3\2\2\2\u03f7\u03eb\3\2\2\2\u03f8\u00d4\3\2\2")
buf.write(u"\2\u03f9\u03fa\7>\2\2\u03fa\u03fb\7/\2\2\u03fb\u041e")
buf.write(u"\7@\2\2\u03fc\u03fd\7>\2\2\u03fd\u03fe\7?\2\2\u03fe\u041e")
buf.write(u"\7@\2\2\u03ff\u0400\7^\2\2\u0400\u0401\7n\2\2\u0401\u0402")
buf.write(u"\7g\2\2\u0402\u0403\7h\2\2\u0403\u0404\7v\2\2\u0404\u0405")
buf.write(u"\7t\2\2\u0405\u0406\7k\2\2\u0406\u0407\7i\2\2\u0407\u0408")
buf.write(u"\7j\2\2\u0408\u0409\7v\2\2\u0409\u040a\7c\2\2\u040a\u040b")
buf.write(u"\7t\2\2\u040b\u040c\7t\2\2\u040c\u040d\7q\2\2\u040d\u041e")
buf.write(u"\7y\2\2\u040e\u040f\7^\2\2\u040f\u0410\7N\2\2\u0410\u0411")
buf.write(u"\7g\2\2\u0411\u0412\7h\2\2\u0412\u0413\7v\2\2\u0413\u0414")
buf.write(u"\7t\2\2\u0414\u0415\7k\2\2\u0415\u0416\7i\2\2\u0416\u0417")
buf.write(u"\7j\2\2\u0417\u0418\7v\2\2\u0418\u0419\7c\2\2\u0419\u041a")
buf.write(u"\7t\2\2\u041a\u041b\7t\2\2\u041b\u041c\7q\2\2\u041c\u041e")
buf.write(u"\7y\2\2\u041d\u03f9\3\2\2\2\u041d\u03fc\3\2\2\2\u041d")
buf.write(u"\u03ff\3\2\2\2\u041d\u040e\3\2\2\2\u041e\u00d6\3\2\2")
buf.write(u"\2\u041f\u0420\t\3\2\2\u0420\u00d8\3\2\2\2\u0421\u0422")
buf.write(u"\7^\2\2\u0422\u0423\7q\2\2\u0423\u0424\7x\2\2\u0424\u0425")
buf.write(u"\7g\2\2\u0425\u0426\7t\2\2\u0426\u0427\7n\2\2\u0427\u0428")
buf.write(u"\7k\2\2\u0428\u0429\7p\2\2\u0429\u042a\7g\2\2\u042a\u00da")
buf.write(u"\3\2\2\2\u042b\u042c\7^\2\2\u042c\u042d\7d\2\2\u042d")
buf.write(u"\u042e\7c\2\2\u042e\u042f\7t\2\2\u042f\u00dc\3\2\2\2")
buf.write(u"\u0430\u0431\7a\2\2\u0431\u00de\3\2\2\2\u0432\u0433\7")
buf.write(u"`\2\2\u0433\u00e0\3\2\2\2\u0434\u0435\7<\2\2\u0435\u00e2")
buf.write(u"\3\2\2\2\u0436\u0437\7=\2\2\u0437\u00e4\3\2\2\2\u0438")
buf.write(u"\u0439\7.\2\2\u0439\u00e6\3\2\2\2\u043a\u043b\7\60\2")
buf.write(u"\2\u043b\u00e8\3\2\2\2\u043c\u043d\t\2\2\2\u043d\u00ea")
buf.write(u"\3\2\2\2\u043e\u0442\7f\2\2\u043f\u0441\5\u00e9u\2\u0440")
buf.write(u"\u043f\3\2\2\2\u0441\u0444\3\2\2\2\u0442\u0443\3\2\2")
buf.write(u"\2\u0442\u0440\3\2\2\2\u0443\u044c\3\2\2\2\u0444\u0442")
buf.write(u"\3\2\2\2\u0445\u044d\t\4\2\2\u0446\u0448\7^\2\2\u0447")
buf.write(u"\u0449\t\4\2\2\u0448\u0447\3\2\2\2\u0449\u044a\3\2\2")
buf.write(u"\2\u044a\u0448\3\2\2\2\u044a\u044b\3\2\2\2\u044b\u044d")
buf.write(u"\3\2\2\2\u044c\u0445\3\2\2\2\u044c\u0446\3\2\2\2\u044d")
buf.write(u"\u00ec\3\2\2\2\u044e\u045d\7g\2\2\u044f\u0450\7^\2\2")
buf.write(u"\u0450\u0451\7g\2\2\u0451\u0452\7z\2\2\u0452\u0453\7")
buf.write(u"r\2\2\u0453\u0454\7q\2\2\u0454\u0455\7p\2\2\u0455\u0456")
buf.write(u"\7g\2\2\u0456\u0457\7p\2\2\u0457\u0458\7v\2\2\u0458\u0459")
buf.write(u"\7k\2\2\u0459\u045a\7c\2\2\u045a\u045b\7n\2\2\u045b\u045d")
buf.write(u"\7G\2\2\u045c\u044e\3\2\2\2\u045c\u044f\3\2\2\2\u045d")
buf.write(u"\u00ee\3\2\2\2\u045e\u045f\7G\2\2\u045f\u00f0\3\2\2\2")
buf.write(u"\u0460\u0461\t\5\2\2\u0461\u00f2\3\2\2\2\u0462\u0463")
buf.write(u"\t\4\2\2\u0463\u00f4\3\2\2\2\u0464\u0465\t\6\2\2\u0465")
buf.write(u"\u00f6\3\2\2\2\u0466\u0468\5\u00f5{\2\u0467\u0466\3\2")
buf.write(u"\2\2\u0468\u0469\3\2\2\2\u0469\u0467\3\2\2\2\u0469\u046a")
buf.write(u"\3\2\2\2\u046a\u0472\3\2\2\2\u046b\u046c\5\u00e5s\2\u046c")
buf.write(u"\u046d\5\u00f5{\2\u046d\u046e\5\u00f5{\2\u046e\u046f")
buf.write(u"\5\u00f5{\2\u046f\u0471\3\2\2\2\u0470\u046b\3\2\2\2\u0471")
buf.write(u"\u0474\3\2\2\2\u0472\u0470\3\2\2\2\u0472\u0473\3\2\2")
buf.write(u"\2\u0473\u048c\3\2\2\2\u0474\u0472\3\2\2\2\u0475\u0477")
buf.write(u"\5\u00f5{\2\u0476\u0475\3\2\2\2\u0477\u047a\3\2\2\2\u0478")
buf.write(u"\u0476\3\2\2\2\u0478\u0479\3\2\2\2\u0479\u0482\3\2\2")
buf.write(u"\2\u047a\u0478\3\2\2\2\u047b\u047c\5\u00e5s\2\u047c\u047d")
buf.write(u"\5\u00f5{\2\u047d\u047e\5\u00f5{\2\u047e\u047f\5\u00f5")
buf.write(u"{\2\u047f\u0481\3\2\2\2\u0480\u047b\3\2\2\2\u0481\u0484")
buf.write(u"\3\2\2\2\u0482\u0480\3\2\2\2\u0482\u0483\3\2\2\2\u0483")
buf.write(u"\u0485\3\2\2\2\u0484\u0482\3\2\2\2\u0485\u0487\5\u00e7")
buf.write(u"t\2\u0486\u0488\5\u00f5{\2\u0487\u0486\3\2\2\2\u0488")
buf.write(u"\u0489\3\2\2\2\u0489\u0487\3\2\2\2\u0489\u048a\3\2\2")
buf.write(u"\2\u048a\u048c\3\2\2\2\u048b\u0467\3\2\2\2\u048b\u0478")
buf.write(u"\3\2\2\2\u048c\u00f8\3\2\2\2\u048d\u048e\5\u00f7|\2\u048e")
buf.write(u"\u0491\5\u00efx\2\u048f\u0492\5\r\7\2\u0490\u0492\5\13")
buf.write(u"\6\2\u0491\u048f\3\2\2\2\u0491\u0490\3\2\2\2\u0491\u0492")
buf.write(u"\3\2\2\2\u0492\u0494\3\2\2\2\u0493\u0495\5\u00f5{\2\u0494")
buf.write(u"\u0493\3\2\2\2\u0495\u0496\3\2\2\2\u0496\u0494\3\2\2")
buf.write(u"\2\u0496\u0497\3\2\2\2\u0497\u00fa\3\2\2\2\u0498\u0499")
buf.write(u"\7?\2\2\u0499\u00fc\3\2\2\2\u049a\u049b\7?\2\2\u049b")
buf.write(u"\u04a3\7?\2\2\u049c\u049d\7^\2\2\u049d\u049e\7g\2\2\u049e")
buf.write(u"\u049f\7s\2\2\u049f\u04a0\7w\2\2\u04a0\u04a1\7k\2\2\u04a1")
buf.write(u"\u04a3\7x\2\2\u04a2\u049a\3\2\2\2\u04a2\u049c\3\2\2\2")
buf.write(u"\u04a3\u00fe\3\2\2\2\u04a4\u04a5\7>\2\2\u04a5\u0100\3")
buf.write(u"\2\2\2\u04a6\u04a7\7^\2\2\u04a7\u04a8\7n\2\2\u04a8\u04a9")
buf.write(u"\7g\2\2\u04a9\u04b7\7s\2\2\u04aa\u04ab\7^\2\2\u04ab\u04ac")
buf.write(u"\7n\2\2\u04ac\u04b7\7g\2\2\u04ad\u04ae\7^\2\2\u04ae\u04af")
buf.write(u"\7n\2\2\u04af\u04b0\7g\2\2\u04b0\u04b1\7s\2\2\u04b1\u04b2")
buf.write(u"\7u\2\2\u04b2\u04b3\7n\2\2\u04b3\u04b4\7c\2\2\u04b4\u04b5")
buf.write(u"\7p\2\2\u04b5\u04b7\7v\2\2\u04b6\u04a6\3\2\2\2\u04b6")
buf.write(u"\u04aa\3\2\2\2\u04b6\u04ad\3\2\2\2\u04b7\u0102\3\2\2")
buf.write(u"\2\u04b8\u04b9\7@\2\2\u04b9\u0104\3\2\2\2\u04ba\u04bb")
buf.write(u"\7^\2\2\u04bb\u04bc\7i\2\2\u04bc\u04bd\7g\2\2\u04bd\u04cb")
buf.write(u"\7s\2\2\u04be\u04bf\7^\2\2\u04bf\u04c0\7i\2\2\u04c0\u04cb")
buf.write(u"\7g\2\2\u04c1\u04c2\7^\2\2\u04c2\u04c3\7i\2\2\u04c3\u04c4")
buf.write(u"\7g\2\2\u04c4\u04c5\7s\2\2\u04c5\u04c6\7u\2\2\u04c6\u04c7")
buf.write(u"\7n\2\2\u04c7\u04c8\7c\2\2\u04c8\u04c9\7p\2\2\u04c9\u04cb")
buf.write(u"\7v\2\2\u04ca\u04ba\3\2\2\2\u04ca\u04be\3\2\2\2\u04ca")
buf.write(u"\u04c1\3\2\2\2\u04cb\u0106\3\2\2\2\u04cc\u04cd\7#\2\2")
buf.write(u"\u04cd\u04e3\7?\2\2\u04ce\u04cf\7#\2\2\u04cf\u04d0\7")
buf.write(u"?\2\2\u04d0\u04e3\7?\2\2\u04d1\u04d2\7^\2\2\u04d2\u04d3")
buf.write(u"\7p\2\2\u04d3\u04e3\7g\2\2\u04d4\u04d5\7^\2\2\u04d5\u04d6")
buf.write(u"\7p\2\2\u04d6\u04d7\7g\2\2\u04d7\u04e3\7s\2\2\u04d8\u04d9")
buf.write(u"\7^\2\2\u04d9\u04da\7p\2\2\u04da\u04db\7q\2\2\u04db\u04dc")
buf.write(u"\7v\2\2\u04dc\u04dd\7^\2\2\u04dd\u04de\7g\2\2\u04de\u04df")
buf.write(u"\7s\2\2\u04df\u04e0\7w\2\2\u04e0\u04e1\7k\2\2\u04e1\u04e3")
buf.write(u"\7x\2\2\u04e2\u04cc\3\2\2\2\u04e2\u04ce\3\2\2\2\u04e2")
buf.write(u"\u04d1\3\2\2\2\u04e2\u04d4\3\2\2\2\u04e2\u04d8\3\2\2")
buf.write(u"\2\u04e3\u0108\3\2\2\2\u04e4\u04e5\7#\2\2\u04e5\u010a")
buf.write(u"\3\2\2\2\u04e6\u04e7\7^\2\2\u04e7\u04e8\7\'\2\2\u04e8")
buf.write(u"\u010c\3\2\2\2\u04e9\u04ea\5\u00f7|\2\u04ea\u04eb\5\u010b")
buf.write(u"\u0086\2\u04eb\u010e\3\2\2\2\u04ec\u04ed\7^\2\2\u04ed")
buf.write(u"\u04ee\7e\2\2\u04ee\u04ef\7j\2\2\u04ef\u04f0\7c\2\2\u04f0")
buf.write(u"\u04f1\7t\2\2\u04f1\u04f2\7$\2\2\u04f2\u04f3\7\62\2\2")
buf.write(u"\u04f3\u04f4\7\62\2\2\u04f4\u04f5\7\62\2\2\u04f5\u04f6")
buf.write(u"\7\65\2\2\u04f6\u04f7\7;\2\2\u04f7\u066e\7\63\2\2\u04f8")
buf.write(u"\u04f9\7^\2\2\u04f9\u04fa\7c\2\2\u04fa\u04fb\7n\2\2\u04fb")
buf.write(u"\u04fc\7r\2\2\u04fc\u04fd\7j\2\2\u04fd\u066e\7c\2\2\u04fe")
buf.write(u"\u04ff\7^\2\2\u04ff\u0500\7e\2\2\u0500\u0501\7j\2\2\u0501")
buf.write(u"\u0502\7c\2\2\u0502\u0503\7t\2\2\u0503\u0504\7$\2\2\u0504")
buf.write(u"\u0505\7\62\2\2\u0505\u0506\7\62\2\2\u0506\u0507\7\62")
buf.write(u"\2\2\u0507\u0508\7\65\2\2\u0508\u0509\7;\2\2\u0509\u066e")
buf.write(u"\7\64\2\2\u050a\u050b\7^\2\2\u050b\u050c\7d\2\2\u050c")
buf.write(u"\u050d\7g\2\2\u050d\u050e\7v\2\2\u050e\u066e\7c\2\2\u050f")
buf.write(u"\u0510\7^\2\2\u0510\u0511\7I\2\2\u0511\u0512\7c\2\2\u0512")
buf.write(u"\u0513\7o\2\2\u0513\u0514\7o\2\2\u0514\u066e\7c\2\2\u0515")
buf.write(u"\u0516\7^\2\2\u0516\u0517\7i\2\2\u0517\u0518\7c\2\2\u0518")
buf.write(u"\u0519\7o\2\2\u0519\u051a\7o\2\2\u051a\u066e\7c\2\2\u051b")
buf.write(u"\u051c\7^\2\2\u051c\u051d\7F\2\2\u051d\u051e\7g\2\2\u051e")
buf.write(u"\u051f\7n\2\2\u051f\u0520\7v\2\2\u0520\u066e\7c\2\2\u0521")
buf.write(u"\u0522\7^\2\2\u0522\u0523\7f\2\2\u0523\u0524\7g\2\2\u0524")
buf.write(u"\u0525\7n\2\2\u0525\u0526\7v\2\2\u0526\u066e\7c\2\2\u0527")
buf.write(u"\u0528\7^\2\2\u0528\u0529\7e\2\2\u0529\u052a\7j\2\2\u052a")
buf.write(u"\u052b\7c\2\2\u052b\u052c\7t\2\2\u052c\u052d\7$\2\2\u052d")
buf.write(u"\u052e\7\62\2\2\u052e\u052f\7\62\2\2\u052f\u0530\7\62")
buf.write(u"\2\2\u0530\u0531\7\63\2\2\u0531\u0532\7;\2\2\u0532\u066e")
buf.write(u"\7\62\2\2\u0533\u0534\7^\2\2\u0534\u0535\7g\2\2\u0535")
buf.write(u"\u0536\7r\2\2\u0536\u0537\7u\2\2\u0537\u0538\7k\2\2\u0538")
buf.write(u"\u0539\7n\2\2\u0539\u053a\7q\2\2\u053a\u066e\7p\2\2\u053b")
buf.write(u"\u053c\7^\2\2\u053c\u053d\7x\2\2\u053d\u053e\7c\2\2\u053e")
buf.write(u"\u053f\7t\2\2\u053f\u0540\7g\2\2\u0540\u0541\7r\2\2\u0541")
buf.write(u"\u0542\7u\2\2\u0542\u0543\7k\2\2\u0543\u0544\7n\2\2\u0544")
buf.write(u"\u0545\7q\2\2\u0545\u066e\7p\2\2\u0546\u0547\7^\2\2\u0547")
buf.write(u"\u0548\7e\2\2\u0548\u0549\7j\2\2\u0549\u054a\7c\2\2\u054a")
buf.write(u"\u054b\7t\2\2\u054b\u054c\7$\2\2\u054c\u054d\7\62\2\2")
buf.write(u"\u054d\u054e\7\62\2\2\u054e\u054f\7\62\2\2\u054f\u0550")
buf.write(u"\7\65\2\2\u0550\u0551\7;\2\2\u0551\u066e\78\2\2\u0552")
buf.write(u"\u0553\7^\2\2\u0553\u0554\7|\2\2\u0554\u0555\7g\2\2\u0555")
buf.write(u"\u0556\7v\2\2\u0556\u066e\7c\2\2\u0557\u0558\7^\2\2\u0558")
buf.write(u"\u0559\7e\2\2\u0559\u055a\7j\2\2\u055a\u055b\7c\2\2\u055b")
buf.write(u"\u055c\7t\2\2\u055c\u055d\7$\2\2\u055d\u055e\7\62\2\2")
buf.write(u"\u055e\u055f\7\62\2\2\u055f\u0560\7\62\2\2\u0560\u0561")
buf.write(u"\7\65\2\2\u0561\u0562\7;\2\2\u0562\u066e\79\2\2\u0563")
buf.write(u"\u0564\7^\2\2\u0564\u0565\7g\2\2\u0565\u0566\7v\2\2\u0566")
buf.write(u"\u066e\7c\2\2\u0567\u0568\7^\2\2\u0568\u0569\7V\2\2\u0569")
buf.write(u"\u056a\7j\2\2\u056a\u056b\7g\2\2\u056b\u056c\7v\2\2\u056c")
buf.write(u"\u066e\7c\2\2\u056d\u056e\7^\2\2\u056e\u056f\7v\2\2\u056f")
buf.write(u"\u0570\7j\2\2\u0570\u0571\7g\2\2\u0571\u0572\7v\2\2\u0572")
buf.write(u"\u066e\7c\2\2\u0573\u0574\7^\2\2\u0574\u0575\7x\2\2\u0575")
buf.write(u"\u0576\7c\2\2\u0576\u0577\7t\2\2\u0577\u0578\7v\2\2\u0578")
buf.write(u"\u0579\7j\2\2\u0579\u057a\7g\2\2\u057a\u057b\7v\2\2\u057b")
buf.write(u"\u066e\7c\2\2\u057c\u057d\7^\2\2\u057d\u057e\7e\2\2\u057e")
buf.write(u"\u057f\7j\2\2\u057f\u0580\7c\2\2\u0580\u0581\7t\2\2\u0581")
buf.write(u"\u0582\7$\2\2\u0582\u0583\7\62\2\2\u0583\u0584\7\62\2")
buf.write(u"\2\u0584\u0585\7\62\2\2\u0585\u0586\7\65\2\2\u0586\u0587")
buf.write(u"\7;\2\2\u0587\u066e\7;\2\2\u0588\u0589\7^\2\2\u0589\u058a")
buf.write(u"\7k\2\2\u058a\u058b\7q\2\2\u058b\u058c\7v\2\2\u058c\u066e")
buf.write(u"\7c\2\2\u058d\u058e\7^\2\2\u058e\u058f\7e\2\2\u058f\u0590")
buf.write(u"\7j\2\2\u0590\u0591\7c\2\2\u0591\u0592\7t\2\2\u0592\u0593")
buf.write(u"\7$\2\2\u0593\u0594\7\62\2\2\u0594\u0595\7\62\2\2\u0595")
buf.write(u"\u0596\7\62\2\2\u0596\u0597\7\65\2\2\u0597\u0598\7;\2")
buf.write(u"\2\u0598\u066e\7C\2\2\u0599\u059a\7^\2\2\u059a\u059b")
buf.write(u"\7m\2\2\u059b\u059c\7c\2\2\u059c\u059d\7r\2\2\u059d\u059e")
buf.write(u"\7r\2\2\u059e\u066e\7c\2\2\u059f\u05a0\7^\2\2\u05a0\u05a1")
buf.write(u"\7N\2\2\u05a1\u05a2\7c\2\2\u05a2\u05a3\7o\2\2\u05a3\u05a4")
buf.write(u"\7d\2\2\u05a4\u05a5\7f\2\2\u05a5\u066e\7c\2\2\u05a6\u05a7")
buf.write(u"\7^\2\2\u05a7\u05a8\7n\2\2\u05a8\u05a9\7c\2\2\u05a9\u05aa")
buf.write(u"\7o\2\2\u05aa\u05ab\7d\2\2\u05ab\u05ac\7f\2\2\u05ac\u066e")
buf.write(u"\7c\2\2\u05ad\u05ae\7^\2\2\u05ae\u05af\7e\2\2\u05af\u05b0")
buf.write(u"\7j\2\2\u05b0\u05b1\7c\2\2\u05b1\u05b2\7t\2\2\u05b2\u05b3")
buf.write(u"\7$\2\2\u05b3\u05b4\7\62\2\2\u05b4\u05b5\7\62\2\2\u05b5")
buf.write(u"\u05b6\7\62\2\2\u05b6\u05b7\7\65\2\2\u05b7\u05b8\7;\2")
buf.write(u"\2\u05b8\u066e\7E\2\2\u05b9\u05ba\7^\2\2\u05ba\u05bb")
buf.write(u"\7o\2\2\u05bb\u066e\7w\2\2\u05bc\u05bd\7^\2\2\u05bd\u05be")
buf.write(u"\7e\2\2\u05be\u05bf\7j\2\2\u05bf\u05c0\7c\2\2\u05c0\u05c1")
buf.write(u"\7t\2\2\u05c1\u05c2\7$\2\2\u05c2\u05c3\7\62\2\2\u05c3")
buf.write(u"\u05c4\7\62\2\2\u05c4\u05c5\7\62\2\2\u05c5\u05c6\7\65")
buf.write(u"\2\2\u05c6\u05c7\7;\2\2\u05c7\u066e\7F\2\2\u05c8\u05c9")
buf.write(u"\7^\2\2\u05c9\u05ca\7p\2\2\u05ca\u066e\7w\2\2\u05cb\u05cc")
buf.write(u"\7^\2\2\u05cc\u05cd\7Z\2\2\u05cd\u066e\7k\2\2\u05ce\u05cf")
buf.write(u"\7^\2\2\u05cf\u05d0\7z\2\2\u05d0\u066e\7k\2\2\u05d1\u05d2")
buf.write(u"\7^\2\2\u05d2\u05d3\7e\2\2\u05d3\u05d4\7j\2\2\u05d4\u05d5")
buf.write(u"\7c\2\2\u05d5\u05d6\7t\2\2\u05d6\u05d7\7$\2\2\u05d7\u05d8")
buf.write(u"\7\62\2\2\u05d8\u05d9\7\62\2\2\u05d9\u05da\7\62\2\2\u05da")
buf.write(u"\u05db\7\65\2\2\u05db\u05dc\7;\2\2\u05dc\u066e\7H\2\2")
buf.write(u"\u05dd\u05de\7^\2\2\u05de\u05df\7q\2\2\u05df\u05e0\7")
buf.write(u"o\2\2\u05e0\u05e1\7k\2\2\u05e1\u05e2\7e\2\2\u05e2\u05e3")
buf.write(u"\7t\2\2\u05e3\u05e4\7q\2\2\u05e4\u066e\7p\2\2\u05e5\u05e6")
buf.write(u"\7^\2\2\u05e6\u05e7\7R\2\2\u05e7\u066e\7k\2\2\u05e8\u05e9")
buf.write(u"\7^\2\2\u05e9\u05ea\7x\2\2\u05ea\u05eb\7c\2\2\u05eb\u05ec")
buf.write(u"\7t\2\2\u05ec\u05ed\7r\2\2\u05ed\u066e\7k\2\2\u05ee\u05ef")
buf.write(u"\7^\2\2\u05ef\u05f0\7e\2\2\u05f0\u05f1\7j\2\2\u05f1\u05f2")
buf.write(u"\7c\2\2\u05f2\u05f3\7t\2\2\u05f3\u05f4\7$\2\2\u05f4\u05f5")
buf.write(u"\7\62\2\2\u05f5\u05f6\7\62\2\2\u05f6\u05f7\7\62\2\2\u05f7")
buf.write(u"\u05f8\7\65\2\2\u05f8\u05f9\7C\2\2\u05f9\u066e\7\63\2")
buf.write(u"\2\u05fa\u05fb\7^\2\2\u05fb\u05fc\7t\2\2\u05fc\u05fd")
buf.write(u"\7j\2\2\u05fd\u066e\7q\2\2\u05fe\u05ff\7^\2\2\u05ff\u0600")
buf.write(u"\7x\2\2\u0600\u0601\7c\2\2\u0601\u0602\7t\2\2\u0602\u0603")
buf.write(u"\7t\2\2\u0603\u0604\7j\2\2\u0604\u066e\7q\2\2\u0605\u0606")
buf.write(u"\7^\2\2\u0606\u0607\7U\2\2\u0607\u0608\7k\2\2\u0608\u0609")
buf.write(u"\7i\2\2\u0609\u060a\7o\2\2\u060a\u066e\7c\2\2\u060b\u060c")
buf.write(u"\7^\2\2\u060c\u060d\7u\2\2\u060d\u060e\7k\2\2\u060e\u060f")
buf.write(u"\7i\2\2\u060f\u0610\7o\2\2\u0610\u066e\7c\2\2\u0611\u0612")
buf.write(u"\7^\2\2\u0612\u0613\7x\2\2\u0613\u0614\7c\2\2\u0614\u0615")
buf.write(u"\7t\2\2\u0615\u0616\7u\2\2\u0616\u0617\7k\2\2\u0617\u0618")
buf.write(u"\7i\2\2\u0618\u0619\7o\2\2\u0619\u066e\7c\2\2\u061a\u061b")
buf.write(u"\7^\2\2\u061b\u061c\7e\2\2\u061c\u061d\7j\2\2\u061d\u061e")
buf.write(u"\7c\2\2\u061e\u061f\7t\2\2\u061f\u0620\7$\2\2\u0620\u0621")
buf.write(u"\7\62\2\2\u0621\u0622\7\62\2\2\u0622\u0623\7\62\2\2\u0623")
buf.write(u"\u0624\7\65\2\2\u0624\u0625\7C\2\2\u0625\u066e\7\66\2")
buf.write(u"\2\u0626\u0627\7^\2\2\u0627\u0628\7v\2\2\u0628\u0629")
buf.write(u"\7c\2\2\u0629\u066e\7w\2\2\u062a\u062b\7^\2\2\u062b\u062c")
buf.write(u"\7W\2\2\u062c\u062d\7r\2\2\u062d\u062e\7u\2\2\u062e\u062f")
buf.write(u"\7k\2\2\u062f\u0630\7n\2\2\u0630\u0631\7q\2\2\u0631\u066e")
buf.write(u"\7p\2\2\u0632\u0633\7^\2\2\u0633\u0634\7w\2\2\u0634\u0635")
buf.write(u"\7r\2\2\u0635\u0636\7u\2\2\u0636\u0637\7k\2\2\u0637\u0638")
buf.write(u"\7n\2\2\u0638\u0639\7q\2\2\u0639\u066e\7p\2\2\u063a\u063b")
buf.write(u"\7^\2\2\u063b\u063c\7R\2\2\u063c\u063d\7j\2\2\u063d\u066e")
buf.write(u"\7k\2\2\u063e\u063f\7^\2\2\u063f\u0640\7r\2\2\u0640\u0641")
buf.write(u"\7j\2\2\u0641\u066e\7k\2\2\u0642\u0643\7^\2\2\u0643\u0644")
buf.write(u"\7x\2\2\u0644\u0645\7c\2\2\u0645\u0646\7t\2\2\u0646\u0647")
buf.write(u"\7r\2\2\u0647\u0648\7j\2\2\u0648\u066e\7k\2\2\u0649\u064a")
buf.write(u"\7^\2\2\u064a\u064b\7e\2\2\u064b\u064c\7j\2\2\u064c\u064d")
buf.write(u"\7c\2\2\u064d\u064e\7t\2\2\u064e\u064f\7$\2\2\u064f\u0650")
buf.write(u"\7\62\2\2\u0650\u0651\7\62\2\2\u0651\u0652\7\62\2\2\u0652")
buf.write(u"\u0653\7\65\2\2\u0653\u0654\7C\2\2\u0654\u066e\79\2\2")
buf.write(u"\u0655\u0656\7^\2\2\u0656\u0657\7e\2\2\u0657\u0658\7")
buf.write(u"j\2\2\u0658\u066e\7k\2\2\u0659\u065a\7^\2\2\u065a\u065b")
buf.write(u"\7R\2\2\u065b\u065c\7u\2\2\u065c\u066e\7k\2\2\u065d\u065e")
buf.write(u"\7^\2\2\u065e\u065f\7r\2\2\u065f\u0660\7u\2\2\u0660\u066e")
buf.write(u"\7k\2\2\u0661\u0662\7^\2\2\u0662\u0663\7Q\2\2\u0663\u0664")
buf.write(u"\7o\2\2\u0664\u0665\7g\2\2\u0665\u0666\7i\2\2\u0666\u066e")
buf.write(u"\7c\2\2\u0667\u0668\7^\2\2\u0668\u0669\7q\2\2\u0669\u066a")
buf.write(u"\7o\2\2\u066a\u066b\7g\2\2\u066b\u066c\7i\2\2\u066c\u066e")
buf.write(u"\7c\2\2\u066d\u04ec\3\2\2\2\u066d\u04f8\3\2\2\2\u066d")
buf.write(u"\u04fe\3\2\2\2\u066d\u050a\3\2\2\2\u066d\u050f\3\2\2")
buf.write(u"\2\u066d\u0515\3\2\2\2\u066d\u051b\3\2\2\2\u066d\u0521")
buf.write(u"\3\2\2\2\u066d\u0527\3\2\2\2\u066d\u0533\3\2\2\2\u066d")
buf.write(u"\u053b\3\2\2\2\u066d\u0546\3\2\2\2\u066d\u0552\3\2\2")
buf.write(u"\2\u066d\u0557\3\2\2\2\u066d\u0563\3\2\2\2\u066d\u0567")
buf.write(u"\3\2\2\2\u066d\u056d\3\2\2\2\u066d\u0573\3\2\2\2\u066d")
buf.write(u"\u057c\3\2\2\2\u066d\u0588\3\2\2\2\u066d\u058d\3\2\2")
buf.write(u"\2\u066d\u0599\3\2\2\2\u066d\u059f\3\2\2\2\u066d\u05a6")
buf.write(u"\3\2\2\2\u066d\u05ad\3\2\2\2\u066d\u05b9\3\2\2\2\u066d")
buf.write(u"\u05bc\3\2\2\2\u066d\u05c8\3\2\2\2\u066d\u05cb\3\2\2")
buf.write(u"\2\u066d\u05ce\3\2\2\2\u066d\u05d1\3\2\2\2\u066d\u05dd")
buf.write(u"\3\2\2\2\u066d\u05e5\3\2\2\2\u066d\u05e8\3\2\2\2\u066d")
buf.write(u"\u05ee\3\2\2\2\u066d\u05fa\3\2\2\2\u066d\u05fe\3\2\2")
buf.write(u"\2\u066d\u0605\3\2\2\2\u066d\u060b\3\2\2\2\u066d\u0611")
buf.write(u"\3\2\2\2\u066d\u061a\3\2\2\2\u066d\u0626\3\2\2\2\u066d")
buf.write(u"\u062a\3\2\2\2\u066d\u0632\3\2\2\2\u066d\u063a\3\2\2")
buf.write(u"\2\u066d\u063e\3\2\2\2\u066d\u0642\3\2\2\2\u066d\u0649")
buf.write(u"\3\2\2\2\u066d\u0655\3\2\2\2\u066d\u0659\3\2\2\2\u066d")
buf.write(u"\u065d\3\2\2\2\u066d\u0661\3\2\2\2\u066d\u0667\3\2\2")
buf.write(u"\2\u066e\u0110\3\2\2\2\u066f\u0671\5\u010f\u0088\2\u0670")
buf.write(u"\u0672\t\7\2\2\u0671\u0670\3\2\2\2\u0671\u0672\3\2\2")
buf.write(u"\2\u0672\u0112\3\2\2\2\u0673\u0674\7^\2\2\u0674\u0675")
buf.write(u"\7r\2\2\u0675\u0676\7k\2\2\u0676\u0114\3\2\2\2\u0677")
buf.write(u"\u0678\7^\2\2\u0678\u0679\7k\2\2\u0679\u067a\7p\2\2\u067a")
buf.write(u"\u067b\7h\2\2\u067b\u067c\7v\2\2\u067c\u067d\7{\2\2\u067d")
buf.write(u"\u0116\3\2\2\2\u067e\u0686\5\u0115\u008b\2\u067f\u0680")
buf.write(u"\5\t\5\2\u0680\u0681\5\u0115\u008b\2\u0681\u0686\3\2")
buf.write(u"\2\2\u0682\u0683\5\u0115\u008b\2\u0683\u0684\5\u010b")
buf.write(u"\u0086\2\u0684\u0686\3\2\2\2\u0685\u067e\3\2\2\2\u0685")
buf.write(u"\u067f\3\2\2\2\u0685\u0682\3\2\2\2\u0686\u0118\3\2\2")
buf.write(u"\2\u0687\u0688\7^\2\2\u0688\u0689\7g\2\2\u0689\u068a")
buf.write(u"\7o\2\2\u068a\u068b\7r\2\2\u068b\u068c\7v\2\2\u068c\u068d")
buf.write(u"\7{\2\2\u068d\u068e\7u\2\2\u068e\u068f\7g\2\2\u068f\u0690")
buf.write(u"\7v\2\2\u0690\u011a\3\2\2\2\u0691\u0695\5\u0113\u008a")
buf.write(u"\2\u0692\u0695\5\u0117\u008c\2\u0693\u0695\5\u0119\u008d")
buf.write(u"\2\u0694\u0691\3\2\2\2\u0694\u0692\3\2\2\2\u0694\u0693")
buf.write(u"\3\2\2\2\u0695\u011c\3\2\2\2\u0696\u0697\7^\2\2\u0697")
buf.write(u"\u0698\7x\2\2\u0698\u0699\7c\2\2\u0699\u069a\7t\2\2\u069a")
buf.write(u"\u069b\7k\2\2\u069b\u069c\7c\2\2\u069c\u069d\7d\2\2\u069d")
buf.write(u"\u069e\7n\2\2\u069e\u069f\7g\2\2\u069f\u011e\3\2\2\2")
buf.write(u"\u06a0\u06a4\5\u0111\u0089\2\u06a1\u06a4\5\u00f3z\2\u06a2")
buf.write(u"\u06a4\5\u00f5{\2\u06a3\u06a0\3\2\2\2\u06a3\u06a1\3\2")
buf.write(u"\2\2\u06a3\u06a2\3\2\2\2\u06a4\u06a5\3\2\2\2\u06a5\u06a3")
buf.write(u"\3\2\2\2\u06a5\u06a6\3\2\2\2\u06a6\u06ba\3\2\2\2\u06a7")
buf.write(u"\u06b8\5\u00ddo\2\u06a8\u06ad\5\33\16\2\u06a9\u06ae\5")
buf.write(u"\u0111\u0089\2\u06aa\u06ae\5\u00f3z\2\u06ab\u06ae\5\u00f5")
buf.write(u"{\2\u06ac\u06ae\5\u00e5s\2\u06ad\u06a9\3\2\2\2\u06ad")
buf.write(u"\u06aa\3\2\2\2\u06ad\u06ab\3\2\2\2\u06ad\u06ac\3\2\2")
buf.write(u"\2\u06ae\u06af\3\2\2\2\u06af\u06ad\3\2\2\2\u06af\u06b0")
buf.write(u"\3\2\2\2\u06b0\u06b1\3\2\2\2\u06b1\u06b2\5\35\17\2\u06b2")
buf.write(u"\u06b9\3\2\2\2\u06b3\u06b7\5\u0111\u0089\2\u06b4\u06b7")
buf.write(u"\5\u00f3z\2\u06b5\u06b7\5\u00f5{\2\u06b6\u06b3\3\2\2")
buf.write(u"\2\u06b6\u06b4\3\2\2\2\u06b6\u06b5\3\2\2\2\u06b7\u06b9")
buf.write(u"\3\2\2\2\u06b8\u06a8\3\2\2\2\u06b8\u06b6\3\2\2\2\u06b9")
buf.write(u"\u06bb\3\2\2\2\u06ba\u06a7\3\2\2\2\u06ba\u06bb\3\2\2")
buf.write(u"\2\u06bb\u0120\3\2\2\2\u06bc\u06bd\5\u011d\u008f\2\u06bd")
buf.write(u"\u06be\5\33\16\2\u06be\u06bf\5\u011f\u0090\2\u06bf\u06c1")
buf.write(u"\5\35\17\2\u06c0\u06c2\5\u010b\u0086\2\u06c1\u06c0\3")
buf.write(u"\2\2\2\u06c1\u06c2\3\2\2\2\u06c2\u0122\3\2\2\2$\2\u012b")
buf.write(u"\u0232\u03b0\u03f7\u041d\u0442\u044a\u044c\u045c\u0469")
buf.write(u"\u0472\u0478\u0482\u0489\u048b\u0491\u0496\u04a2\u04b6")
buf.write(u"\u04ca\u04e2\u066d\u0671\u0685\u0694\u06a3\u06a5\u06ad")
buf.write(u"\u06af\u06b6\u06b8\u06ba\u06c1\3\b\2\2")
return buf.getvalue()
class PSLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
WS = 3
DOLLAR_SIGN = 4
ADD = 5
SUB = 6
MUL = 7
DIV = 8
L_PAREN = 9
R_PAREN = 10
L_GROUP = 11
R_GROUP = 12
L_BRACE = 13
R_BRACE = 14
L_BRACE_VISUAL = 15
R_BRACE_VISUAL = 16
L_BRACE_CMD = 17
R_BRACE_CMD = 18
L_BRACKET = 19
R_BRACKET = 20
L_BRACK = 21
R_BRACK = 22
BAR = 23
L_VERT = 24
R_VERT = 25
VERT = 26
L_FLOOR = 27
R_FLOOR = 28
LL_CORNER = 29
LR_CORNER = 30
L_CEIL = 31
R_CEIL = 32
UL_CORNER = 33
UR_CORNER = 34
L_LEFT = 35
R_RIGHT = 36
ML_LEFT = 37
MR_RIGHT = 38
FUNC_LIM = 39
LIM_APPROACH_SYM = 40
FUNC_INT = 41
FUNC_SUM = 42
FUNC_PROD = 43
FUNC_LOG = 44
FUNC_LN = 45
FUNC_EXP = 46
FUNC_SIN = 47
FUNC_COS = 48
FUNC_TAN = 49
FUNC_CSC = 50
FUNC_SEC = 51
FUNC_COT = 52
FUNC_ARCSIN = 53
FUNC_ARCCOS = 54
FUNC_ARCTAN = 55
FUNC_ARCCSC = 56
FUNC_ARCSEC = 57
FUNC_ARCCOT = 58
FUNC_SINH = 59
FUNC_COSH = 60
FUNC_TANH = 61
FUNC_ARSINH = 62
FUNC_ARCOSH = 63
FUNC_ARTANH = 64
FUNC_ARCSINH = 65
FUNC_ARCCOSH = 66
FUNC_ARCTANH = 67
FUNC_ARSINH_NAME = 68
FUNC_ARCSINH_NAME = 69
FUNC_ARCOSH_NAME = 70
FUNC_ARCCOSH_NAME = 71
FUNC_ARTANH_NAME = 72
FUNC_ARCTANH_NAME = 73
FUNC_GCD_NAME = 74
FUNC_LCM_NAME = 75
FUNC_FLOOR_NAME = 76
FUNC_CEIL_NAME = 77
FUNC_SQRT = 78
FUNC_GCD = 79
FUNC_LCM = 80
FUNC_FLOOR = 81
FUNC_CEIL = 82
FUNC_MAX = 83
FUNC_MIN = 84
CMD_TIMES = 85
CMD_CDOT = 86
CMD_DIV = 87
CMD_FRAC = 88
CMD_BINOM = 89
CMD_CHOOSE = 90
CMD_MOD = 91
CMD_MATHIT = 92
CMD_OPERATORNAME = 93
MATRIX_TYPE_MATRIX = 94
MATRIX_TYPE_PMATRIX = 95
MATRIX_TYPE_BMATRIX = 96
MATRIX_TYPE_DET = 97
MATRIX_TYPES = 98
CMD_MATRIX_START = 99
CMD_MATRIX_END = 100
CMD_DET_START = 101
CMD_DET_END = 102
MATRIX_DEL_COL = 103
MATRIX_DEL_ROW = 104
MATRIX_XRIGHTARROW = 105
TRANSFORM_EXCHANGE = 106
ROW_OR_COL = 107
ACCENT_OVERLINE = 108
ACCENT_BAR = 109
UNDERSCORE = 110
CARET = 111
COLON = 112
SEMICOLON = 113
COMMA = 114
PERIOD = 115
DIFFERENTIAL = 116
EXP_E = 117
E_NOTATION_E = 118
LETTER_NO_E = 119
NUMBER = 120
E_NOTATION = 121
ASSIGNMENT = 122
EQUAL = 123
LT = 124
LTE = 125
GT = 126
GTE = 127
UNEQUAL = 128
BANG = 129
PERCENT_NUMBER = 130
GREEK_CMD = 131
SYMBOL = 132
VARIABLE = 133
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"'^T'", u"'''", u"'\\$'", u"'+'", u"'-'", u"'*'", u"'/'", u"'('",
u"')'", u"'\\lgroup'", u"'\\rgroup'", u"'{'", u"'}'", u"'\\{'",
u"'\\}'", u"'\\lbrace'", u"'\\rbrace'", u"'['", u"']'", u"'\\lbrack'",
u"'\\rbrack'", u"'|'", u"'\\lvert'", u"'\\rvert'", u"'\\vert'",
u"'\\lfloor'", u"'\\rfloor'", u"'\\llcorner'", u"'\\lrcorner'",
u"'\\lceil'", u"'\\rceil'", u"'\\ulcorner'", u"'\\urcorner'",
u"'\\left'", u"'\\right'", u"'\\mleft'", u"'\\mright'", u"'\\lim'",
u"'\\int'", u"'\\sum'", u"'\\prod'", u"'\\log'", u"'\\ln'",
u"'\\exp'", u"'\\sin'", u"'\\cos'", u"'\\tan'", u"'\\csc'",
u"'\\sec'", u"'\\cot'", u"'\\arcsin'", u"'\\arccos'", u"'\\arctan'",
u"'\\arccsc'", u"'\\arcsec'", u"'\\arccot'", u"'\\sinh'", u"'\\cosh'",
u"'\\tanh'", u"'\\arsinh'", u"'\\arcosh'", u"'\\artanh'", u"'\\arcsinh'",
u"'\\arccosh'", u"'\\arctanh'", u"'arsinh'", u"'arcsinh'", u"'arcosh'",
u"'arccosh'", u"'artanh'", u"'arctanh'", u"'gcd'", u"'lcm'",
u"'floor'", u"'ceil'", u"'\\sqrt'", u"'\\gcd'", u"'\\lcm'",
u"'\\floor'", u"'\\ceil'", u"'\\max'", u"'\\min'", u"'\\times'",
u"'\\cdot'", u"'\\div'", u"'\\frac'", u"'\\binom'", u"'\\choose'",
u"'\\mod'", u"'\\mathit'", u"'\\operatorname'", u"'matrix'",
u"'pmatrix'", u"'bmatrix'", u"'vmatrix'", u"'&'", u"'\\\\'",
u"'\\overline'", u"'\\bar'", u"'_'", u"'^'", u"':'", u"';'",
u"','", u"'.'", u"'E'", u"'='", u"'<'", u"'>'", u"'!'" ]
symbolicNames = [ u"<INVALID>",
u"WS", u"DOLLAR_SIGN", u"ADD", u"SUB", u"MUL", u"DIV", u"L_PAREN",
u"R_PAREN", u"L_GROUP", u"R_GROUP", u"L_BRACE", u"R_BRACE",
u"L_BRACE_VISUAL", u"R_BRACE_VISUAL", u"L_BRACE_CMD", u"R_BRACE_CMD",
u"L_BRACKET", u"R_BRACKET", u"L_BRACK", u"R_BRACK", u"BAR",
u"L_VERT", u"R_VERT", u"VERT", u"L_FLOOR", u"R_FLOOR", u"LL_CORNER",
u"LR_CORNER", u"L_CEIL", u"R_CEIL", u"UL_CORNER", u"UR_CORNER",
u"L_LEFT", u"R_RIGHT", u"ML_LEFT", u"MR_RIGHT", u"FUNC_LIM",
u"LIM_APPROACH_SYM", u"FUNC_INT", u"FUNC_SUM", u"FUNC_PROD",
u"FUNC_LOG", u"FUNC_LN", u"FUNC_EXP", u"FUNC_SIN", u"FUNC_COS",
u"FUNC_TAN", u"FUNC_CSC", u"FUNC_SEC", u"FUNC_COT", u"FUNC_ARCSIN",
u"FUNC_ARCCOS", u"FUNC_ARCTAN", u"FUNC_ARCCSC", u"FUNC_ARCSEC",
u"FUNC_ARCCOT", u"FUNC_SINH", u"FUNC_COSH", u"FUNC_TANH", u"FUNC_ARSINH",
u"FUNC_ARCOSH", u"FUNC_ARTANH", u"FUNC_ARCSINH", u"FUNC_ARCCOSH",
u"FUNC_ARCTANH", u"FUNC_ARSINH_NAME", u"FUNC_ARCSINH_NAME",
u"FUNC_ARCOSH_NAME", u"FUNC_ARCCOSH_NAME", u"FUNC_ARTANH_NAME",
u"FUNC_ARCTANH_NAME", u"FUNC_GCD_NAME", u"FUNC_LCM_NAME", u"FUNC_FLOOR_NAME",
u"FUNC_CEIL_NAME", u"FUNC_SQRT", u"FUNC_GCD", u"FUNC_LCM", u"FUNC_FLOOR",
u"FUNC_CEIL", u"FUNC_MAX", u"FUNC_MIN", u"CMD_TIMES", u"CMD_CDOT",
u"CMD_DIV", u"CMD_FRAC", u"CMD_BINOM", u"CMD_CHOOSE", u"CMD_MOD",
u"CMD_MATHIT", u"CMD_OPERATORNAME", u"MATRIX_TYPE_MATRIX", u"MATRIX_TYPE_PMATRIX",
u"MATRIX_TYPE_BMATRIX", u"MATRIX_TYPE_DET", u"MATRIX_TYPES",
u"CMD_MATRIX_START", u"CMD_MATRIX_END", u"CMD_DET_START", u"CMD_DET_END",
u"MATRIX_DEL_COL", u"MATRIX_DEL_ROW", u"MATRIX_XRIGHTARROW",
u"TRANSFORM_EXCHANGE", u"ROW_OR_COL", u"ACCENT_OVERLINE", u"ACCENT_BAR",
u"UNDERSCORE", u"CARET", u"COLON", u"SEMICOLON", u"COMMA", u"PERIOD",
u"DIFFERENTIAL", u"EXP_E", u"E_NOTATION_E", u"LETTER_NO_E",
u"NUMBER", u"E_NOTATION", u"ASSIGNMENT", u"EQUAL", u"LT", u"LTE",
u"GT", u"GTE", u"UNEQUAL", u"BANG", u"PERCENT_NUMBER", u"GREEK_CMD",
u"SYMBOL", u"VARIABLE" ]
ruleNames = [ u"T__0", u"T__1", u"WS", u"DOLLAR_SIGN", u"ADD", u"SUB",
u"MUL", u"DIV", u"L_PAREN", u"R_PAREN", u"L_GROUP", u"R_GROUP",
u"L_BRACE", u"R_BRACE", u"L_BRACE_VISUAL", u"R_BRACE_VISUAL",
u"L_BRACE_CMD", u"R_BRACE_CMD", u"L_BRACKET", u"R_BRACKET",
u"L_BRACK", u"R_BRACK", u"BAR", u"L_VERT", u"R_VERT",
u"VERT", u"L_FLOOR", u"R_FLOOR", u"LL_CORNER", u"LR_CORNER",
u"L_CEIL", u"R_CEIL", u"UL_CORNER", u"UR_CORNER", u"L_LEFT",
u"R_RIGHT", u"ML_LEFT", u"MR_RIGHT", u"FUNC_LIM", u"LIM_APPROACH_SYM",
u"FUNC_INT", u"FUNC_SUM", u"FUNC_PROD", u"FUNC_LOG", u"FUNC_LN",
u"FUNC_EXP", u"FUNC_SIN", u"FUNC_COS", u"FUNC_TAN", u"FUNC_CSC",
u"FUNC_SEC", u"FUNC_COT", u"FUNC_ARCSIN", u"FUNC_ARCCOS",
u"FUNC_ARCTAN", u"FUNC_ARCCSC", u"FUNC_ARCSEC", u"FUNC_ARCCOT",
u"FUNC_SINH", u"FUNC_COSH", u"FUNC_TANH", u"FUNC_ARSINH",
u"FUNC_ARCOSH", u"FUNC_ARTANH", u"FUNC_ARCSINH", u"FUNC_ARCCOSH",
u"FUNC_ARCTANH", u"FUNC_ARSINH_NAME", u"FUNC_ARCSINH_NAME",
u"FUNC_ARCOSH_NAME", u"FUNC_ARCCOSH_NAME", u"FUNC_ARTANH_NAME",
u"FUNC_ARCTANH_NAME", u"FUNC_GCD_NAME", u"FUNC_LCM_NAME",
u"FUNC_FLOOR_NAME", u"FUNC_CEIL_NAME", u"FUNC_SQRT", u"FUNC_GCD",
u"FUNC_LCM", u"FUNC_FLOOR", u"FUNC_CEIL", u"FUNC_MAX",
u"FUNC_MIN", u"CMD_TIMES", u"CMD_CDOT", u"CMD_DIV", u"CMD_FRAC",
u"CMD_BINOM", u"CMD_CHOOSE", u"CMD_MOD", u"CMD_MATHIT",
u"CMD_OPERATORNAME", u"MATRIX_TYPE_MATRIX", u"MATRIX_TYPE_PMATRIX",
u"MATRIX_TYPE_BMATRIX", u"MATRIX_TYPE_DET", u"MATRIX_TYPES",
u"CMD_MATRIX_START", u"CMD_MATRIX_END", u"CMD_DET_START",
u"CMD_DET_END", u"MATRIX_DEL_COL", u"MATRIX_DEL_ROW",
u"MATRIX_XRIGHTARROW", u"TRANSFORM_EXCHANGE", u"ROW_OR_COL",
u"ACCENT_OVERLINE", u"ACCENT_BAR", u"UNDERSCORE", u"CARET",
u"COLON", u"SEMICOLON", u"COMMA", u"PERIOD", u"WS_CHAR",
u"DIFFERENTIAL", u"EXP_E", u"E_NOTATION_E", u"LETTER_NO_E",
u"LETTER", u"DIGIT", u"NUMBER", u"E_NOTATION", u"ASSIGNMENT",
u"EQUAL", u"LT", u"LTE", u"GT", u"GTE", u"UNEQUAL", u"BANG",
u"PERCENT_SIGN", u"PERCENT_NUMBER", u"GREEK_LETTER", u"GREEK_CMD",
u"PI", u"INFTY_CMD", u"INFTY", u"EMPTYSET", u"SYMBOL",
u"VARIABLE_CMD", u"VARIABLE_SYMBOL", u"VARIABLE" ]
grammarFileName = u"PS.g4"
def __init__(self, input=None, output=sys.stdout):
super(PSLexer, self).__init__(input, output=output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 68.37757
| 103
| 0.600459
| 16,572
| 73,164
| 2.623763
| 0.13197
| 0.109197
| 0.168073
| 0.047285
| 0.327706
| 0.287666
| 0.241232
| 0.195465
| 0.170097
| 0.165107
| 0
| 0.359872
| 0.138196
| 73,164
| 1,069
| 104
| 68.441534
| 0.329723
| 0.000697
| 0
| 0.066667
| 1
| 0.547619
| 0.652924
| 0.60438
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001905
| false
| 0
| 0.00381
| 0
| 0.141905
| 0.000952
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48efad98a7d9f8242a7f7682c73ca7e052674cf8
| 31
|
py
|
Python
|
crawler/39drug_crawler.py
|
wglassly/Hylobatidae
|
8bb998609c510ab3f32c58f59fc3469ef330aebd
|
[
"Apache-2.0"
] | 1
|
2021-02-08T07:50:45.000Z
|
2021-02-08T07:50:45.000Z
|
crawler/39drug_crawler.py
|
wglassly/Hylobatidae
|
8bb998609c510ab3f32c58f59fc3469ef330aebd
|
[
"Apache-2.0"
] | null | null | null |
crawler/39drug_crawler.py
|
wglassly/Hylobatidae
|
8bb998609c510ab3f32c58f59fc3469ef330aebd
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
| 15.5
| 30
| 0.870968
| 4
| 31
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
48fd6b270ae36b6dc52b188cda52e30974c3c454
| 19,032
|
py
|
Python
|
datadotworld/client/_swagger/apis/uploads_api.py
|
DanialBetres/data.world-py
|
0e3acf2be9a07c5ab62ecac9289eb662088d54c7
|
[
"Apache-2.0"
] | 99
|
2017-01-23T16:24:18.000Z
|
2022-03-30T22:51:58.000Z
|
datadotworld/client/_swagger/apis/uploads_api.py
|
DanialBetres/data.world-py
|
0e3acf2be9a07c5ab62ecac9289eb662088d54c7
|
[
"Apache-2.0"
] | 77
|
2017-01-26T04:33:06.000Z
|
2022-03-11T09:39:50.000Z
|
datadotworld/client/_swagger/apis/uploads_api.py
|
DanialBetres/data.world-py
|
0e3acf2be9a07c5ab62ecac9289eb662088d54c7
|
[
"Apache-2.0"
] | 29
|
2017-01-25T16:55:23.000Z
|
2022-01-31T01:44:15.000Z
|
# coding: utf-8
"""
data.world API
data.world is designed for data and the people who work with data. From professional projects to open data, data.world helps you host and share your data, collaborate with your team, and capture context and conclusions as you work. Using this API users are able to easily access data and manage their data projects regardless of language or tool of preference. Check out our [documentation](https://dwapi.apidocs.io) for tips on how to get started, tutorials and to interact with the API right within your browser.
OpenAPI spec version: 0.14.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class UploadsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def upload_file(self, owner, id, name, **kwargs):
"""
Upload file
Upload one file at a time to a dataset. This endpoint expects requests of type `application/octet-stream`. For example, assuming that you want to upload a local file named `file1.csv` to a hypothetical dataset `https://data.world/awesome-user/awesome-dataset` and choose its name on data.world to be `better-name.csv`, this is what the cURL command would look like. ```bash curl \\ -H \"Authorization: Bearer <YOUR_API_TOKEN>\" \\ -X PUT -H \"Content-Type: application/octet-stream\" \\ --data-binary @file1.csv \\ https://api.data.world/v0/uploads/awesome-user/awesome-dataset/files/better-name.csv ``` This method of upload is typically not supported by Swagger clients. Other HTTP clients can be used to supply the contents of the file directly in the body of the request.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_file(owner, id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:param str name: File name and unique identifier within dataset. (required)
:param bool expand_archive: Indicates whether a compressed file should be expanded upon upload.
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_file_with_http_info(owner, id, name, **kwargs)
else:
(data) = self.upload_file_with_http_info(owner, id, name, **kwargs)
return data
def upload_file_with_http_info(self, owner, id, name, **kwargs):
"""
Upload file
Upload one file at a time to a dataset. This endpoint expects requests of type `application/octet-stream`. For example, assuming that you want to upload a local file named `file1.csv` to a hypothetical dataset `https://data.world/awesome-user/awesome-dataset` and choose its name on data.world to be `better-name.csv`, this is what the cURL command would look like. ```bash curl \\ -H \"Authorization: Bearer <YOUR_API_TOKEN>\" \\ -X PUT -H \"Content-Type: application/octet-stream\" \\ --data-binary @file1.csv \\ https://api.data.world/v0/uploads/awesome-user/awesome-dataset/files/better-name.csv ``` This method of upload is typically not supported by Swagger clients. Other HTTP clients can be used to supply the contents of the file directly in the body of the request.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_file_with_http_info(owner, id, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:param str name: File name and unique identifier within dataset. (required)
:param bool expand_archive: Indicates whether a compressed file should be expanded upon upload.
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'id', 'name', 'expand_archive']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `upload_file`")
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `upload_file`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `upload_file`")
if 'owner' in params and not re.search('[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]', params['owner']):
raise ValueError("Invalid value for parameter `owner` when calling `upload_file`, must conform to the pattern `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`")
if 'id' in params and not re.search('[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]', params['id']):
raise ValueError("Invalid value for parameter `id` when calling `upload_file`, must conform to the pattern `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`")
if 'name' in params and len(params['name']) > 128:
raise ValueError("Invalid value for parameter `name` when calling `upload_file`, number of items must be less than or equal to `128`")
if 'name' in params and len(params['name']) < 1:
raise ValueError("Invalid value for parameter `name` when calling `upload_file`, number of items must be greater than or equal to `1`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'id' in params:
path_params['id'] = params['id']
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'expand_archive' in params:
query_params.append(('expandArchive', params['expand_archive']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/octet-stream', '*/*'])
# Authentication setting
auth_settings = ['token']
return self.api_client.call_api('/uploads/{owner}/{id}/files/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessMessage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_files(self, owner, id, file, **kwargs):
"""
Upload files
Upload multiple files at once to a dataset via multipart request. This endpoint expects requests of type `multipart/form-data` and you can include one or more parts named `file`, each containing a different file to be uploaded. For example, assuming that, you want to upload two local files named `file1.csv` and `file2.csv` to a hypothetical dataset `https://data.world/awesome-user/awesome-dataset`, this is what the cURL command would look like. ```bash curl \\ -H \"Authorization: Bearer <YOUR_API_TOKEN>\" \\ -F \"[email protected]\" \\ -F \"[email protected]\" \\ https://api.data.world/v0/uploads/awesome-user/awesome-dataset/files ``` Swagger clients will limit this method of upload to one file at a time. Other HTTP clients capable of making multipart/form-data requests can be used to upload multiple files in a single request.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_files(owner, id, file, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:param file file: Multipart-encoded file contents (required)
:param bool expand_archives: Indicates whether compressed files should be expanded upon upload.
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_files_with_http_info(owner, id, file, **kwargs)
else:
(data) = self.upload_files_with_http_info(owner, id, file, **kwargs)
return data
def upload_files_with_http_info(self, owner, id, file, **kwargs):
"""
Upload files
Upload multiple files at once to a dataset via multipart request. This endpoint expects requests of type `multipart/form-data` and you can include one or more parts named `file`, each containing a different file to be uploaded. For example, assuming that, you want to upload two local files named `file1.csv` and `file2.csv` to a hypothetical dataset `https://data.world/awesome-user/awesome-dataset`, this is what the cURL command would look like. ```bash curl \\ -H \"Authorization: Bearer <YOUR_API_TOKEN>\" \\ -F \"[email protected]\" \\ -F \"[email protected]\" \\ https://api.data.world/v0/uploads/awesome-user/awesome-dataset/files ``` Swagger clients will limit this method of upload to one file at a time. Other HTTP clients capable of making multipart/form-data requests can be used to upload multiple files in a single request.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_files_with_http_info(owner, id, file, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: User name and unique identifier of the creator of a dataset or project. For example, in the URL: [https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), jonloyens is the unique identifier of the owner. (required)
:param str id: Dataset unique identifier. For example, in the URL:[https://data.world/jonloyens/an-intro-to-dataworld-dataset](https://data.world/jonloyens/an-intro-to-dataworld-dataset), an-intro-to-dataworld-dataset is the unique identifier of the dataset. (required)
:param file file: Multipart-encoded file contents (required)
:param bool expand_archives: Indicates whether compressed files should be expanded upon upload.
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'id', 'file', 'expand_archives']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_files" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `upload_files`")
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `upload_files`")
# verify the required parameter 'file' is set
if ('file' not in params) or (params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `upload_files`")
if 'owner' in params and not re.search('[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]', params['owner']):
raise ValueError("Invalid value for parameter `owner` when calling `upload_files`, must conform to the pattern `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`")
if 'id' in params and not re.search('[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]', params['id']):
raise ValueError("Invalid value for parameter `id` when calling `upload_files`, must conform to the pattern `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'expand_archives' in params:
query_params.append(('expandArchives', params['expand_archives']))
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['token']
return self.api_client.call_api('/uploads/{owner}/{id}/files', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SuccessMessage',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 63.019868
| 854
| 0.635771
| 2,420
| 19,032
| 4.902066
| 0.124793
| 0.02276
| 0.008092
| 0.030346
| 0.891343
| 0.872713
| 0.868583
| 0.856276
| 0.83537
| 0.83537
| 0
| 0.005614
| 0.260666
| 19,032
| 301
| 855
| 63.229236
| 0.837467
| 0.502995
| 0
| 0.625
| 0
| 0.039474
| 0.257595
| 0.059727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032895
| false
| 0
| 0.046053
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5b1127a80b0cc97cf385c079bc30958c2c4a1653
| 220
|
py
|
Python
|
corehq/apps/export/exceptions.py
|
dborowiecki/commcare-hq
|
f2f4fa67faec09040a98502f5657444075b63f2e
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/export/exceptions.py
|
dborowiecki/commcare-hq
|
f2f4fa67faec09040a98502f5657444075b63f2e
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/export/exceptions.py
|
dborowiecki/commcare-hq
|
f2f4fa67faec09040a98502f5657444075b63f2e
|
[
"BSD-3-Clause"
] | null | null | null |
class ExportAppException(Exception):
pass
class BadExportConfiguration(ExportAppException):
pass
class ExportFormValidationException(Exception):
pass
class ExportAsyncException(Exception):
pass
| 11.578947
| 49
| 0.772727
| 16
| 220
| 10.625
| 0.4375
| 0.229412
| 0.211765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172727
| 220
| 18
| 50
| 12.222222
| 0.934066
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d2ac686e4cd80daee2a49f1281a2ec1d4a4a44ac
| 44
|
py
|
Python
|
files/DST du 05 02 2020/dsfzfdsfdsf.py
|
HenraL/NSI_1ereG6_Programme_Python
|
9f46b848fa2331daca57e5e2e11cba41da45a67f
|
[
"Unlicense"
] | 1
|
2021-06-15T13:44:47.000Z
|
2021-06-15T13:44:47.000Z
|
files/DST du 05 02 2020/dsfzfdsfdsf.py
|
HenraL/NSI_1ereG6_Programme_Python
|
9f46b848fa2331daca57e5e2e11cba41da45a67f
|
[
"Unlicense"
] | null | null | null |
files/DST du 05 02 2020/dsfzfdsfdsf.py
|
HenraL/NSI_1ereG6_Programme_Python
|
9f46b848fa2331daca57e5e2e11cba41da45a67f
|
[
"Unlicense"
] | null | null | null |
from math import*
def moyer(L):
average
| 11
| 17
| 0.681818
| 7
| 44
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227273
| 44
| 3
| 18
| 14.666667
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d2b8d15490f0dac9ecc4e35e5758b4c6daf42318
| 92
|
py
|
Python
|
test-registry/install/f.py
|
NathanTP/open-lambda
|
63b2f51bbe1ac121d14af9a5562c547227c4d2ab
|
[
"Apache-2.0"
] | 826
|
2016-06-18T04:42:13.000Z
|
2022-03-31T13:21:33.000Z
|
test-registry/install/f.py
|
NathanTP/open-lambda
|
63b2f51bbe1ac121d14af9a5562c547227c4d2ab
|
[
"Apache-2.0"
] | 62
|
2016-07-14T11:10:02.000Z
|
2022-02-12T18:33:55.000Z
|
test-registry/install/f.py
|
NathanTP/open-lambda
|
63b2f51bbe1ac121d14af9a5562c547227c4d2ab
|
[
"Apache-2.0"
] | 105
|
2016-06-20T15:36:22.000Z
|
2022-02-01T06:04:58.000Z
|
import requests
import urllib3
# ol-install: requests
def f(event):
return 'imported'
| 11.5
| 22
| 0.728261
| 12
| 92
| 5.583333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013333
| 0.184783
| 92
| 7
| 23
| 13.142857
| 0.88
| 0.217391
| 0
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.75
| 0.25
| 1.25
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
d2c590031ecc997be5e225a578049279cc6848d3
| 67
|
py
|
Python
|
src/ketrics_dev_tools/command_line.py
|
ketrics/ketrics-dev-tools
|
e1231615456a6fec8f71a0134ada1ea507984bd1
|
[
"MIT"
] | null | null | null |
src/ketrics_dev_tools/command_line.py
|
ketrics/ketrics-dev-tools
|
e1231615456a6fec8f71a0134ada1ea507984bd1
|
[
"MIT"
] | null | null | null |
src/ketrics_dev_tools/command_line.py
|
ketrics/ketrics-dev-tools
|
e1231615456a6fec8f71a0134ada1ea507984bd1
|
[
"MIT"
] | null | null | null |
from . import KetricsDevTools
def main():
KetricsDevTools()
| 9.571429
| 29
| 0.701493
| 6
| 67
| 7.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208955
| 67
| 6
| 30
| 11.166667
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d2cda599af5afa1f5e55bab4d4b114afd37eab3e
| 102
|
py
|
Python
|
basicsr/metrics/__init__.py
|
yuangan/Simple-SR
|
630d2f9441b116620af88ff882eca4673dedc047
|
[
"MIT"
] | 10
|
2021-06-24T12:03:33.000Z
|
2022-03-05T03:29:34.000Z
|
basicsr/metrics/__init__.py
|
yuangan/Simple-SR
|
630d2f9441b116620af88ff882eca4673dedc047
|
[
"MIT"
] | null | null | null |
basicsr/metrics/__init__.py
|
yuangan/Simple-SR
|
630d2f9441b116620af88ff882eca4673dedc047
|
[
"MIT"
] | 2
|
2021-07-01T09:08:40.000Z
|
2022-02-23T15:31:31.000Z
|
from .psnr_ssim import calculate_psnr, calculate_ssim
__all__ = ['calculate_psnr', 'calculate_ssim']
| 25.5
| 53
| 0.803922
| 13
| 102
| 5.615385
| 0.461538
| 0.356164
| 0.60274
| 0.712329
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098039
| 102
| 3
| 54
| 34
| 0.793478
| 0
| 0
| 0
| 0
| 0
| 0.27451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d2d06c18b99981c89ef1b5385e853aa6a731f149
| 3,601
|
py
|
Python
|
homework/yaryna/game_hw_1.py
|
aodarc/LIST-010
|
4579a047ca1ae0266f368349ea4536c6eb367f97
|
[
"MIT"
] | null | null | null |
homework/yaryna/game_hw_1.py
|
aodarc/LIST-010
|
4579a047ca1ae0266f368349ea4536c6eb367f97
|
[
"MIT"
] | 4
|
2018-12-19T13:41:12.000Z
|
2019-01-14T15:11:11.000Z
|
homework/yaryna/game_hw_1.py
|
aodarc/LIST-010
|
4579a047ca1ae0266f368349ea4536c6eb367f97
|
[
"MIT"
] | null | null | null |
print("Hello! This game is about emotions. Try to put letters in the correct order and create words")
n=0
word = "happy"
print("ypahp")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "exited"
print("txidee")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "lucky"
print("cyulk")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "angry"
print("ynrga")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "careful"
print("lfarcue")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "suprised"
print("priesdur")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "glad"
print("lgda")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "sleepy"
print("ypeels")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "shocked"
print("edcoshk")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "upset"
print("utsep")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "worried"
print("ordiwer")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "unpleasant"
print("taalnupesn")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "pessimistic"
print("esmiiisstcp")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "unhappy"
print("hyaupnp")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
word = "sorry"
print("rysor")
for i in range(1,3):
word1= input("Enter your answer:")
if word1==word:
print("Great!")
n+=1
break
else:
print("Try again!")
i=i+1
if n >= 13:
print("Congratulations! Yor result is {} !".format(n))
elif n >= 9 and n < 13:
print("It was great! Your result is {} !".format (n))
else:
print("You need be more careful! Your result is {} !".format(n))
| 18.186869
| 101
| 0.510691
| 507
| 3,601
| 3.627219
| 0.155819
| 0.078303
| 0.04894
| 0.089723
| 0.752583
| 0.73192
| 0.73192
| 0.73192
| 0.73192
| 0.73192
| 0
| 0.039801
| 0.330186
| 3,601
| 197
| 102
| 18.279188
| 0.722637
| 0
| 0
| 0.786127
| 0
| 0
| 0.253541
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.283237
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2fd925e580d01dde531681d96e9167fe4c49ae0
| 98
|
py
|
Python
|
src/footings_idi_model/outputs/__init__.py
|
dustindall/idi-model
|
5d026f4756f03f9cb797de5a8f0c3c6d2b349ccb
|
[
"BSD-3-Clause"
] | 2
|
2020-10-06T15:52:12.000Z
|
2020-11-30T19:07:35.000Z
|
src/footings_idi_model/outputs/__init__.py
|
dustindall/idi-model
|
5d026f4756f03f9cb797de5a8f0c3c6d2b349ccb
|
[
"BSD-3-Clause"
] | 29
|
2020-06-28T12:22:59.000Z
|
2021-04-21T11:03:07.000Z
|
src/footings_idi_model/outputs/__init__.py
|
footings/footings-idi-model
|
5d026f4756f03f9cb797de5a8f0c3c6d2b349ccb
|
[
"BSD-3-Clause"
] | 1
|
2020-06-24T09:54:46.000Z
|
2020-06-24T09:54:46.000Z
|
from .active_lives import ActiveLivesValOutput
from .disabled_lives import DisabledLivesValOutput
| 32.666667
| 50
| 0.897959
| 10
| 98
| 8.6
| 0.7
| 0.255814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 98
| 2
| 51
| 49
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d2ffa16a49ebd94da73b301b967d348825c96b17
| 1,029
|
py
|
Python
|
tests/test_vocab_tsv.py
|
juhoinkinen/Annif
|
6ac84312ce6f4fbdfbbb681a62fe218d90abde93
|
[
"Apache-2.0"
] | null | null | null |
tests/test_vocab_tsv.py
|
juhoinkinen/Annif
|
6ac84312ce6f4fbdfbbb681a62fe218d90abde93
|
[
"Apache-2.0"
] | null | null | null |
tests/test_vocab_tsv.py
|
juhoinkinen/Annif
|
6ac84312ce6f4fbdfbbb681a62fe218d90abde93
|
[
"Apache-2.0"
] | null | null | null |
"""Unit tests for TSV vocabulary functionality in Annif"""
from annif.corpus import SubjectIndex
def test_load_tsv_uri_brackets(tmpdir):
tmpfile = tmpdir.join('subjects.tsv')
tmpfile.write("<http://www.yso.fi/onto/yso/p8993>\thylyt\n" +
"<http://www.yso.fi/onto/yso/p9285>\tneoliittinen kausi")
index = SubjectIndex.load(str(tmpfile))
assert len(index) == 2
assert index[0] == ('http://www.yso.fi/onto/yso/p8993', 'hylyt')
assert index[1] == (
'http://www.yso.fi/onto/yso/p9285',
'neoliittinen kausi')
def test_load_tsv_uri_nobrackets(tmpdir):
tmpfile = tmpdir.join('subjects.tsv')
tmpfile.write("http://www.yso.fi/onto/yso/p8993\thylyt\n" +
"http://www.yso.fi/onto/yso/p9285\tneoliittinen kausi")
index = SubjectIndex.load(str(tmpfile))
assert len(index) == 2
assert index[0] == ('http://www.yso.fi/onto/yso/p8993', 'hylyt')
assert index[1] == (
'http://www.yso.fi/onto/yso/p9285',
'neoliittinen kausi')
| 33.193548
| 75
| 0.637512
| 141
| 1,029
| 4.595745
| 0.304965
| 0.08642
| 0.123457
| 0.148148
| 0.851852
| 0.799383
| 0.799383
| 0.799383
| 0.799383
| 0.799383
| 0
| 0.0454
| 0.186589
| 1,029
| 30
| 76
| 34.3
| 0.728793
| 0.050535
| 0
| 0.666667
| 0
| 0
| 0.399588
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
825716fe09d94125ddd537a64cc888c9fa9ae569
| 188
|
py
|
Python
|
pypy/module/itertools/test/test_ztranslation.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/itertools/test/test_ztranslation.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/itertools/test/test_ztranslation.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2022-03-30T11:42:37.000Z
|
2022-03-30T11:42:37.000Z
|
from pypy.objspace.fake.checkmodule import checkmodule
def test_checkmodule():
# itertools.compress.__next__() crashes in backendopt
checkmodule('itertools', ignore=['compress'])
| 31.333333
| 57
| 0.771277
| 20
| 188
| 7
| 0.75
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117021
| 188
| 5
| 58
| 37.6
| 0.843373
| 0.271277
| 0
| 0
| 0
| 0
| 0.125926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
827fe775c62cfee94c84955030576b61939df637
| 16,769
|
py
|
Python
|
tests/test_cookies.py
|
jsenecal/async-fastapi-jwt-auth
|
dd825f51a2e93192d4128c85b0d4a73df1a9c418
|
[
"MIT"
] | 4
|
2022-02-04T08:06:32.000Z
|
2022-03-25T23:22:07.000Z
|
tests/test_cookies.py
|
jsenecal/async-fastapi-jwt-auth
|
dd825f51a2e93192d4128c85b0d4a73df1a9c418
|
[
"MIT"
] | null | null | null |
tests/test_cookies.py
|
jsenecal/async-fastapi-jwt-auth
|
dd825f51a2e93192d4128c85b0d4a73df1a9c418
|
[
"MIT"
] | 1
|
2022-02-16T16:26:27.000Z
|
2022-02-16T16:26:27.000Z
|
import pytest
from async_fastapi_jwt_auth import AuthJWT
from async_fastapi_jwt_auth.exceptions import AuthJWTException
from fastapi import FastAPI, Request, Depends
from fastapi.responses import JSONResponse
from fastapi.testclient import TestClient
@pytest.fixture(scope='function')
def client():
app = FastAPI()
@app.exception_handler(AuthJWTException)
def authjwt_exception_handler(request: Request, exc: AuthJWTException):
return JSONResponse(
status_code=exc.status_code,
content={"detail": exc.message}
)
@app.get('/all-token')
async def all_token(Authorize: AuthJWT = Depends()):
access_token = await Authorize.create_access_token(subject=1, fresh=True)
refresh_token = await Authorize.create_refresh_token(subject=1)
await Authorize.set_access_cookies(access_token)
await Authorize.set_refresh_cookies(refresh_token)
return {"msg": "all token"}
@app.get('/all-token-response')
async def all_token_response(Authorize: AuthJWT = Depends()):
access_token = await Authorize.create_access_token(subject=1, fresh=True)
refresh_token = await Authorize.create_refresh_token(subject=1)
response = JSONResponse(content={"msg": "all token"})
await Authorize.set_access_cookies(access_token, response)
await Authorize.set_refresh_cookies(refresh_token, response)
return response
@app.get('/access-token')
async def access_token(Authorize: AuthJWT = Depends()):
access_token = await Authorize.create_access_token(subject=1)
await Authorize.set_access_cookies(access_token)
return {"msg": "access token"}
@app.get('/access-token-response')
async def access_token_response(Authorize: AuthJWT = Depends()):
access_token = await Authorize.create_access_token(subject=1)
response = JSONResponse(content={"msg": "access token"})
await Authorize.set_access_cookies(access_token, response)
return response
@app.get('/refresh-token')
async def refresh_token(Authorize: AuthJWT = Depends()):
refresh_token = await Authorize.create_refresh_token(subject=1)
await Authorize.set_refresh_cookies(refresh_token)
return {"msg": "refresh token"}
@app.get('/refresh-token-response')
async def refresh_token_response(Authorize: AuthJWT = Depends()):
refresh_token = await Authorize.create_refresh_token(subject=1)
response = JSONResponse(content={"msg": "refresh token"})
await Authorize.set_refresh_cookies(refresh_token, response)
return response
@app.get('/unset-all-token')
async def unset_all_token(Authorize: AuthJWT = Depends()):
await Authorize.unset_jwt_cookies()
return {"msg": "unset all token"}
@app.get('/unset-all-token-response')
async def unset_all_token_response(Authorize: AuthJWT = Depends()):
response = JSONResponse(content={"msg": "unset all token"})
await Authorize.unset_jwt_cookies(response)
return response
@app.get('/unset-access-token')
async def unset_access_token(Authorize: AuthJWT = Depends()):
await Authorize.unset_access_cookies()
@app.get('/unset-refresh-token')
async def unset_refresh_token(Authorize: AuthJWT = Depends()):
await Authorize.unset_refresh_cookies()
@app.post('/jwt-optional')
async def jwt_optional(Authorize: AuthJWT = Depends()):
await Authorize.jwt_optional()
return {"hello": await Authorize.get_jwt_subject()}
@app.post('/jwt-required')
async def jwt_required(Authorize: AuthJWT = Depends()):
await Authorize.jwt_required()
return {"hello": await Authorize.get_jwt_subject()}
@app.post('/jwt-refresh')
async def jwt_refresh(Authorize: AuthJWT = Depends()):
await Authorize.jwt_refresh_token_required()
return {"hello": await Authorize.get_jwt_subject()}
@app.post('/jwt-fresh')
async def jwt_fresh(Authorize: AuthJWT = Depends()):
await Authorize.fresh_jwt_required()
return {"hello": await Authorize.get_jwt_subject()}
client = TestClient(app)
return client
@pytest.mark.parametrize(
"url", ["/access-token", "/refresh-token", "/unset-access-token", "/unset-refresh-token"]
)
def test_warning_if_cookies_not_in_token_location(url, client):
@AuthJWT.load_config
def get_secret_key():
return [("authjwt_secret_key", "secret")]
with pytest.raises(RuntimeWarning, match=r"authjwt_token_location"):
client.get(url)
async def test_set_cookie_not_valid_type_max_age(Authorize):
@AuthJWT.load_config
def get_cookie_location():
return [("authjwt_token_location", {'cookies'}), ("authjwt_secret_key", "secret")]
token = await Authorize.create_access_token(subject=1)
with pytest.raises(TypeError, match=r"max_age"):
await Authorize.set_access_cookies(token, max_age="string")
with pytest.raises(TypeError, match=r"max_age"):
await Authorize.set_refresh_cookies(token, max_age="string")
@pytest.mark.asyncio
async def test_set_unset_cookies_not_valid_type_response(Authorize):
@AuthJWT.load_config
def get_cookie_location():
return [("authjwt_token_location", {'cookies'}), ("authjwt_secret_key", "secret")]
token = await Authorize.create_access_token(subject=1)
with pytest.raises(TypeError, match=r"response"):
await Authorize.set_access_cookies(token, response={"msg": "hello"})
with pytest.raises(TypeError, match=r"response"):
await Authorize.set_refresh_cookies(token, response={"msg": "hello"})
with pytest.raises(TypeError, match=r"response"):
await Authorize.unset_jwt_cookies({"msg": "hello"})
with pytest.raises(TypeError, match=r"response"):
await Authorize.unset_access_cookies({"msg": "hello"})
with pytest.raises(TypeError, match=r"response"):
await Authorize.unset_refresh_cookies({"msg": "hello"})
@pytest.mark.parametrize("url",
["/access-token", "/refresh-token", "/access-token-response", "/refresh-token-response"])
def test_set_cookie_csrf_protect_false(url, client):
@AuthJWT.load_config
def get_cookie_location():
return [
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret"),
("authjwt_cookie_csrf_protect", False)
]
cookie_key = url.split("-")[0][1:]
response = client.get(url)
assert response.cookies.get("csrf_{}_token".format(cookie_key)) is None
@pytest.mark.parametrize("url",
["/access-token", "/refresh-token", "/access-token-response", "/refresh-token-response"])
def test_set_cookie_csrf_protect_true(url, client):
@AuthJWT.load_config
def get_cookie_location():
return [("authjwt_token_location", {'cookies'}), ("authjwt_secret_key", "secret")]
cookie_key = url.split("-")[0][1:]
response = client.get(url)
assert response.cookies.get("csrf_{}_token".format(cookie_key)) is not None
def test_unset_all_cookie(client):
@AuthJWT.load_config
def get_cookie_location():
return [("authjwt_token_location", {'cookies'}), ("authjwt_secret_key", "secret")]
response = client.get('/all-token')
assert response.cookies.get("access_token_cookie") is not None
assert response.cookies.get("csrf_access_token") is not None
assert response.cookies.get("refresh_token_cookie") is not None
assert response.cookies.get("csrf_refresh_token") is not None
response = client.get('/unset-all-token')
assert response.cookies.get("access_token_cookie") is None
assert response.cookies.get("csrf_access_token") is None
assert response.cookies.get("refresh_token_cookie") is None
assert response.cookies.get("csrf_refresh_token") is None
def test_unset_all_cookie_response(client):
@AuthJWT.load_config
def get_cookie_location():
return [("authjwt_token_location", {'cookies'}), ("authjwt_secret_key", "secret")]
response = client.get('/all-token-response')
assert response.cookies.get("access_token_cookie") is not None
assert response.cookies.get("csrf_access_token") is not None
assert response.cookies.get("refresh_token_cookie") is not None
assert response.cookies.get("csrf_refresh_token") is not None
response = client.get('/unset-all-token-response')
assert response.cookies.get("access_token_cookie") is None
assert response.cookies.get("csrf_access_token") is None
assert response.cookies.get("refresh_token_cookie") is None
assert response.cookies.get("csrf_refresh_token") is None
def test_custom_cookie_key(client):
@AuthJWT.load_config
def get_cookie_location():
return [
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret"),
("authjwt_access_cookie_key", "access_cookie"),
("authjwt_refresh_cookie_key", "refresh_cookie"),
("authjwt_access_csrf_cookie_key", "csrf_access"),
("authjwt_refresh_csrf_cookie_key", "csrf_refresh")
]
response = client.get('/all-token')
assert response.cookies.get("access_cookie") is not None
assert response.cookies.get("csrf_access") is not None
assert response.cookies.get("refresh_cookie") is not None
assert response.cookies.get("csrf_refresh") is not None
response = client.get('/unset-all-token')
assert response.cookies.get("access_cookie") is None
assert response.cookies.get("csrf_access") is None
assert response.cookies.get("refresh_cookie") is None
assert response.cookies.get("csrf_refresh") is None
def test_cookie_optional_protected(client):
@AuthJWT.load_config
def get_cookie_location():
return [("authjwt_token_location", {'cookies'}), ("authjwt_secret_key", "secret")]
url = '/jwt-optional'
# without token
response = client.post(url)
assert response.status_code == 200
assert response.json() == {'hello': None}
# change request methods and not check csrf token
@AuthJWT.load_config
def change_request_methods():
return [
("authjwt_csrf_methods", {"GET"}),
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret")
]
client.get('/access-token')
response = client.post(url)
assert response.status_code == 200
assert response.json() == {'hello': 1}
# change csrf protect to False not check csrf token
@AuthJWT.load_config
def change_request_csrf_protect_to_false():
return [
("authjwt_csrf_methods", {'POST', 'PUT', 'PATCH', 'DELETE'}),
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret"),
("authjwt_cookie_csrf_protect", False)
]
client.get('/access-token')
response = client.post(url)
assert response.status_code == 200
assert response.json() == {'hello': 1}
# missing csrf token
@AuthJWT.load_config
def change_csrf_protect_to_true():
return [
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret"),
("authjwt_cookie_csrf_protect", True)
]
res = client.get('/access-token')
csrf_token = res.cookies.get("csrf_access_token")
response = client.post(url)
assert response.status_code == 401
assert response.json() == {'detail': 'Missing CSRF Token'}
# csrf token do not match
response = client.post(url, headers={"X-CSRF-Token": "invalid"})
assert response.status_code == 401
assert response.json() == {'detail': 'CSRF double submit tokens do not match'}
response = client.post(url, headers={"X-CSRF-Token": csrf_token})
assert response.status_code == 200
assert response.json() == {'hello': 1}
# missing claim csrf in token
@AuthJWT.load_config
def change_request_csrf_protect_to_falsee():
return [
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret"),
("authjwt_cookie_csrf_protect", False)
]
client.get('/access-token')
@AuthJWT.load_config
def change_request_csrf_protect_to_truee():
return [("authjwt_token_location", {'cookies'}), ("authjwt_secret_key", "secret")]
response = client.post(url, headers={"X-CSRF-Token": "invalid"})
assert response.status_code == 422
assert response.json() == {'detail': 'Missing claim: csrf'}
# custom csrf header name and cookie key
@AuthJWT.load_config
def custom_header_name_cookie_key():
return [
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret"),
("authjwt_access_cookie_key", "access_cookie"),
("authjwt_access_csrf_header_name", "X-CSRF")
]
res = client.get('/access-token')
csrf_token = res.cookies.get("csrf_access_token")
# valid request
response = client.post(url, headers={"X-CSRF": csrf_token})
assert response.status_code == 200
assert response.json() == {'hello': 1}
@pytest.mark.parametrize("url", ["/jwt-required", "/jwt-refresh", "/jwt-fresh"])
def test_cookie_protected(url, client):
# custom csrf header name and cookie key
@AuthJWT.load_config
def custom_header_name_cookie_key():
return [
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret"),
("authjwt_access_cookie_key", "access_cookie"),
("authjwt_access_csrf_header_name", "X-CSRF-Access"),
("authjwt_refresh_cookie_key", "refresh_cookie"),
("authjwt_refresh_csrf_header_name", "X-CSRF-Refresh")
]
res = client.get('/all-token')
csrf_access = res.cookies.get("csrf_access_token")
csrf_refresh = res.cookies.get("csrf_refresh_token")
if url != "/jwt-refresh":
response = client.post(url, headers={"X-CSRF-Access": csrf_access})
else:
response = client.post(url, headers={"X-CSRF-Refresh": csrf_refresh})
assert response.status_code == 200
assert response.json() == {'hello': 1}
# missing csrf token
response = client.post(url)
assert response.status_code == 401
assert response.json() == {'detail': 'Missing CSRF Token'}
# missing cookie
client.get('/unset-all-token')
response = client.post(url)
assert response.status_code == 401
if url != "/jwt-refresh":
assert response.json() == {'detail': 'Missing cookie access_cookie'}
else:
assert response.json() == {'detail': 'Missing cookie refresh_cookie'}
# change csrf protect to False not check csrf token
@AuthJWT.load_config
def change_request_csrf_protect_to_false():
return [
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret"),
("authjwt_cookie_csrf_protect", False)
]
client.get('/all-token')
response = client.post(url)
assert response.status_code == 200
assert response.json() == {'hello': 1}
# change request methods and not check csrf token
@AuthJWT.load_config
def change_request_methods():
return [
("authjwt_csrf_methods", {"GET"}),
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret"),
("authjwt_cookie_csrf_protect", True)
]
response = client.post(url)
assert response.status_code == 200
assert response.json() == {'hello': 1}
# missing claim csrf in token
@AuthJWT.load_config
def change_request_methods_to_default():
return [
("authjwt_csrf_methods", {'POST', 'PUT', 'PATCH', 'DELETE'}),
("authjwt_token_location", {'cookies'}),
("authjwt_secret_key", "secret"),
]
response = client.post(url, headers={"X-CSRF-Token": "invalid"})
assert response.status_code == 422
assert response.json() == {'detail': 'Missing claim: csrf'}
# csrf token do not match
res = client.get('/all-token')
csrf_access = res.cookies.get("csrf_access_token")
csrf_refresh = res.cookies.get("csrf_refresh_token")
response = client.post(url, headers={"X-CSRF-Token": "invalid"})
assert response.status_code == 401
assert response.json() == {'detail': 'CSRF double submit tokens do not match'}
# valid request
if url != "/jwt-refresh":
response = client.post(url, headers={"X-CSRF-Token": csrf_access})
else:
response = client.post(url, headers={"X-CSRF-Token": csrf_refresh})
assert response.status_code == 200
assert response.json() == {'hello': 1}
| 36.936123
| 114
| 0.671596
| 2,010
| 16,769
| 5.362687
| 0.058209
| 0.07663
| 0.050654
| 0.05789
| 0.856109
| 0.820484
| 0.783932
| 0.752296
| 0.739493
| 0.707023
| 0
| 0.005201
| 0.197388
| 16,769
| 453
| 115
| 37.01766
| 0.795676
| 0.028147
| 0
| 0.612903
| 0
| 0
| 0.225184
| 0.064312
| 0
| 0
| 0
| 0
| 0.173021
| 1
| 0.085044
| false
| 0
| 0.017595
| 0.058651
| 0.199413
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
829b66a7d3322553c5ceacae30708193ffeee163
| 235
|
py
|
Python
|
data/extractors/__init__.py
|
alanwang93/ATEC2018-NLP-PyTorch
|
8e00c6af1d3e1db7ab4433a0587784e45f830347
|
[
"MIT"
] | 1
|
2021-09-07T01:27:29.000Z
|
2021-09-07T01:27:29.000Z
|
data/extractors/__init__.py
|
alanwang93/ATEC2018-NLP-PyTorch
|
8e00c6af1d3e1db7ab4433a0587784e45f830347
|
[
"MIT"
] | null | null | null |
data/extractors/__init__.py
|
alanwang93/ATEC2018-NLP-PyTorch
|
8e00c6af1d3e1db7ab4433a0587784e45f830347
|
[
"MIT"
] | null | null | null |
from .extractor import Extractor
from .word_embed_extractor import WordEmbedExtractor
from .similarity_extractor import SimilarityExtractor
from .word_bool_extractor import WordBoolExtractor
from .tfidf_extractor import TFIDFExtractor
| 39.166667
| 53
| 0.893617
| 26
| 235
| 7.846154
| 0.461538
| 0.367647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 235
| 5
| 54
| 47
| 0.948837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
82e30e001972e51a758ae09445bf2d54ef50c419
| 39
|
py
|
Python
|
cvstudio/view/widgets/switch_button/__init__.py
|
haruiz/PytorchCvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 32
|
2019-10-31T03:10:52.000Z
|
2020-12-23T11:50:53.000Z
|
cvstudio/view/widgets/switch_button/__init__.py
|
haruiz/CvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 19
|
2019-10-31T15:06:05.000Z
|
2020-06-15T02:21:55.000Z
|
cvstudio/view/widgets/switch_button/__init__.py
|
haruiz/PytorchCvStudio
|
ccf79dd0cc0d61f3fd01b1b5d96f7cda7b681eef
|
[
"MIT"
] | 8
|
2019-10-31T03:32:50.000Z
|
2020-07-17T20:47:37.000Z
|
from .switch_button import SwitchButton
| 39
| 39
| 0.897436
| 5
| 39
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d63ca4be42ee1faf162b5fc99e66c4bf4979b6a
| 15,093
|
py
|
Python
|
tests/test_cbers.py
|
cogeotiff/rio-tiler-pds
|
52482c80baf7fd26cf06cd2af2961cca396b20e0
|
[
"BSD-3-Clause"
] | 30
|
2020-07-21T23:32:14.000Z
|
2022-02-21T23:35:35.000Z
|
tests/test_cbers.py
|
cogeotiff/rio-tiler-pds
|
52482c80baf7fd26cf06cd2af2961cca396b20e0
|
[
"BSD-3-Clause"
] | 36
|
2020-07-21T20:48:51.000Z
|
2021-10-06T08:15:00.000Z
|
tests/test_cbers.py
|
cogeotiff/rio-tiler-pds
|
52482c80baf7fd26cf06cd2af2961cca396b20e0
|
[
"BSD-3-Clause"
] | 4
|
2020-07-23T06:19:30.000Z
|
2021-11-18T03:27:04.000Z
|
"""tests rio_tiler.sentinel2"""
import os
from unittest.mock import patch
import pytest
import rasterio
from rio_tiler.errors import InvalidBandName, MissingBands, TileOutsideBounds
from rio_tiler_pds.cbers.aws import CBERSReader
from rio_tiler_pds.cbers.utils import sceneid_parser
from rio_tiler_pds.errors import InvalidCBERSSceneId
CBERS_BUCKET = os.path.join(os.path.dirname(__file__), "fixtures", "cbers-pds")
# CBERS4 test scenes
CBERS_MUX_SCENE = "CBERS_4_MUX_20171121_057_094_L2"
CBERS_AWFI_SCENE = "CBERS_4_AWFI_20170420_146_129_L2"
CBERS_PAN10M_SCENE = "CBERS_4_PAN10M_20170427_161_109_L4"
CBERS_PAN5M_SCENE = "CBERS_4_PAN5M_20170425_153_114_L4"
# CBERS4A test scenes
CBERS_4A_MUX_SCENE = "CBERS_4A_MUX_20200808_201_137_L4"
CBERS_4A_WPM_SCENE = "CBERS_4A_WPM_20200730_209_139_L4"
CBERS_4A_WFI_SCENE = "CBERS_4A_WFI_20200801_221_156_L4"
@pytest.fixture(autouse=True)
def testing_env_var(monkeypatch):
"""Set fake env to make sure we don't hit AWS services."""
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "jqt")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "rde")
monkeypatch.delenv("AWS_PROFILE", raising=False)
monkeypatch.setenv("AWS_CONFIG_FILE", "/tmp/noconfigheere")
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", "/tmp/noconfighereeither")
monkeypatch.setenv("GDAL_DISABLE_READDIR_ON_OPEN", "EMPTY_DIR")
def mock_rasterio_open(band):
"""Mock rasterio Open."""
assert band.startswith("s3://cbers-pds")
band = band.replace("s3://cbers-pds", CBERS_BUCKET)
return rasterio.open(band)
@patch("rio_tiler.io.cogeo.rasterio")
def test_AWSPDS_CBERSReader_CB4_MUX(rio):
"""Should work as expected (get bounds)"""
rio.open = mock_rasterio_open
scene = "CBERS_4_MUX_20171121_057_094"
with pytest.raises(InvalidCBERSSceneId):
with CBERSReader(scene):
pass
with CBERSReader(CBERS_MUX_SCENE) as cbers:
bounds = cbers.bounds
assert cbers.scene_params.get("scene") == CBERS_MUX_SCENE
assert len(bounds) == 4
assert cbers.minzoom
assert cbers.maxzoom
assert cbers.bands == ("B5", "B6", "B7", "B8")
with pytest.raises(MissingBands):
cbers.info()
with pytest.raises(InvalidBandName):
cbers.info(bands="BAND5")
metadata = cbers.info(bands="B5")
assert len(metadata["band_metadata"]) == 1
assert metadata["band_descriptions"] == [("B5", "")]
metadata = cbers.info(bands=cbers.bands)
assert len(metadata["band_metadata"]) == 4
assert metadata["band_descriptions"] == [
("B5", ""),
("B6", ""),
("B7", ""),
("B8", ""),
]
with pytest.raises(MissingBands):
cbers.stats()
stats = cbers.stats(bands="B5")
assert len(stats.items()) == 1
assert stats["B5"]["percentiles"] == [28, 98]
stats = cbers.stats(bands=cbers.bands, hist_options={"bins": 20})
assert len(stats["B5"]["histogram"][0]) == 20
with pytest.raises(MissingBands):
cbers.metadata()
metadata = cbers.metadata(bands="B5")
assert metadata["statistics"]["B5"]["percentiles"] == [28, 98]
metadata = cbers.metadata(bands=cbers.bands)
assert metadata["statistics"]["B5"]["percentiles"] == [28, 98]
assert len(metadata["band_metadata"]) == 4
assert metadata["band_descriptions"] == [
("B5", ""),
("B6", ""),
("B7", ""),
("B8", ""),
]
tile_z = 10
tile_x = 664
tile_y = 495
data, mask = cbers.tile(tile_x, tile_y, tile_z, bands=cbers.scene_params["rgb"])
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
tile_z = 10
tile_x = 694
tile_y = 495
with pytest.raises(TileOutsideBounds):
cbers.tile(tile_x, tile_y, tile_z, bands=cbers.scene_params["rgb"])
tile_z = 10
tile_x = 664
tile_y = 495
data, mask = cbers.tile(
tile_x, tile_y, tile_z, expression="B8*0.8, B7*1.1, B6*0.8"
)
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.io.cogeo.rasterio")
def test_AWSPDS_CBERSReader_CB4_AWFI(rio):
"""Should work as expected (get bounds)"""
rio.open = mock_rasterio_open
with CBERSReader(CBERS_AWFI_SCENE) as cbers:
bounds = cbers.bounds
assert cbers.scene_params.get("scene") == CBERS_AWFI_SCENE
assert len(bounds) == 4
assert cbers.minzoom
assert cbers.maxzoom
assert cbers.bands == ("B13", "B14", "B15", "B16")
tile_z = 10
tile_x = 401
tile_y = 585
data, mask = cbers.tile(tile_x, tile_y, tile_z, bands=cbers.scene_params["rgb"])
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.io.cogeo.rasterio")
def test_AWSPDS_CBERSReader_CB4_PAN10M(rio):
"""Should work as expected (get bounds)"""
rio.open = mock_rasterio_open
with CBERSReader(CBERS_PAN10M_SCENE) as cbers:
bounds = cbers.bounds
assert cbers.scene_params.get("scene") == CBERS_PAN10M_SCENE
assert len(bounds) == 4
assert cbers.minzoom
assert cbers.maxzoom
assert cbers.bands == ("B2", "B3", "B4")
tile_z = 10
tile_x = 370
tile_y = 535
data, mask = cbers.tile(tile_x, tile_y, tile_z, bands=cbers.scene_params["rgb"])
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.io.cogeo.rasterio")
def test_AWSPDS_CBERSReader_CB4_PAN5M(rio):
"""Should work as expected (get bounds)"""
rio.open = mock_rasterio_open
with CBERSReader(CBERS_PAN5M_SCENE) as cbers:
bounds = cbers.bounds
assert cbers.scene_params.get("scene") == CBERS_PAN5M_SCENE
assert len(bounds) == 4
assert cbers.minzoom
assert cbers.maxzoom
assert cbers.bands == ("B1",)
tile_z = 10
tile_x = 390
tile_y = 547
data, mask = cbers.tile(tile_x, tile_y, tile_z, bands=cbers.scene_params["rgb"])
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.io.cogeo.rasterio")
def test_AWSPDS_CBERSReader_CB4A_MUX(rio):
"""Should work as expected (get bounds)"""
rio.open = mock_rasterio_open
with CBERSReader(CBERS_4A_MUX_SCENE) as cbers:
bounds = cbers.bounds
assert cbers.scene_params.get("scene") == CBERS_4A_MUX_SCENE
assert len(bounds) == 4
assert cbers.minzoom
assert cbers.maxzoom
assert cbers.bands == ("B5", "B6", "B7", "B8")
with pytest.raises(MissingBands):
cbers.info()
with pytest.raises(InvalidBandName):
cbers.info(bands="BAND5")
metadata = cbers.info(bands="B5")
assert len(metadata["band_metadata"]) == 1
assert metadata["band_descriptions"] == [("B5", "")]
metadata = cbers.info(bands=cbers.bands)
assert len(metadata["band_metadata"]) == 4
assert metadata["band_descriptions"] == [
("B5", ""),
("B6", ""),
("B7", ""),
("B8", ""),
]
with pytest.raises(MissingBands):
cbers.stats()
stats = cbers.stats(bands="B5")
assert len(stats.items()) == 1
assert stats["B5"]["percentiles"] == [30, 52]
stats = cbers.stats(bands=cbers.bands, hist_options=dict(bins=20))
assert len(stats["B5"]["histogram"][0]) == 20
with pytest.raises(MissingBands):
cbers.metadata()
metadata = cbers.metadata(bands="B5")
assert metadata["statistics"]["B5"]["percentiles"] == [30, 52]
metadata = cbers.metadata(bands=cbers.bands)
assert metadata["statistics"]["B5"]["percentiles"] == [30, 52]
assert len(metadata["band_metadata"]) == 4
assert metadata["band_descriptions"] == [
("B5", ""),
("B6", ""),
("B7", ""),
("B8", ""),
]
tile_z = 10
tile_x = 385
tile_y = 567
data, mask = cbers.tile(tile_x, tile_y, tile_z, bands=cbers.scene_params["rgb"])
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
tile_z = 10
tile_x = 694
tile_y = 495
with pytest.raises(TileOutsideBounds):
cbers.tile(tile_x, tile_y, tile_z, bands=cbers.scene_params["rgb"])
tile_z = 10
tile_x = 385
tile_y = 567
data, mask = cbers.tile(
tile_x, tile_y, tile_z, expression="B8*0.8, B7*1.1, B6*0.8"
)
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.io.cogeo.rasterio")
def test_AWSPDS_CBERSReader_CB4A_WPM(rio):
"""Should work as expected (get bounds)"""
rio.open = mock_rasterio_open
with CBERSReader(CBERS_4A_WPM_SCENE) as cbers:
bounds = cbers.bounds
assert cbers.scene_params.get("scene") == CBERS_4A_WPM_SCENE
assert len(bounds) == 4
assert cbers.minzoom
assert cbers.maxzoom
assert cbers.bands == ("B0", "B1", "B2", "B3", "B4")
tile_z = 10
tile_x = 366
tile_y = 572
data, mask = cbers.tile(tile_x, tile_y, tile_z, bands=cbers.scene_params["rgb"])
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
@patch("rio_tiler.io.cogeo.rasterio")
def test_AWSPDS_CBERSReader_CB4A_WFI(rio):
"""Should work as expected (get bounds)"""
rio.open = mock_rasterio_open
with CBERSReader(CBERS_4A_WFI_SCENE) as cbers:
bounds = cbers.bounds
assert cbers.scene_params.get("scene") == CBERS_4A_WFI_SCENE
assert len(bounds) == 4
assert cbers.minzoom
assert cbers.maxzoom
assert cbers.bands == ("B13", "B14", "B15", "B16")
tile_z = 10
tile_x = 316
tile_y = 614
data, mask = cbers.tile(tile_x, tile_y, tile_z, bands=cbers.scene_params["rgb"])
assert data.shape == (3, 256, 256)
assert mask.shape == (256, 256)
def test_cbers_id_valid():
"""Parse valid CBERS sceneids and return metadata."""
scene = "CBERS_4_MUX_20171121_057_094_L2"
expected_content = {
"satellite": "CBERS",
"mission": "4",
"instrument": "MUX",
"acquisitionYear": "2017",
"acquisitionMonth": "11",
"acquisitionDay": "21",
"path": "057",
"row": "094",
"processingCorrectionLevel": "L2",
"scene": "CBERS_4_MUX_20171121_057_094_L2",
"date": "2017-11-21",
"reference_band": "B6",
"bands": ("B5", "B6", "B7", "B8"),
"rgb": ("B7", "B6", "B5"),
}
assert sceneid_parser(scene) == expected_content
scene = "CBERS_4_AWFI_20171121_057_094_L2"
expected_content = {
"satellite": "CBERS",
"mission": "4",
"instrument": "AWFI",
"acquisitionYear": "2017",
"acquisitionMonth": "11",
"acquisitionDay": "21",
"path": "057",
"row": "094",
"processingCorrectionLevel": "L2",
"scene": "CBERS_4_AWFI_20171121_057_094_L2",
"date": "2017-11-21",
"reference_band": "B14",
"bands": ("B13", "B14", "B15", "B16"),
"rgb": ("B15", "B14", "B13"),
}
assert sceneid_parser(scene) == expected_content
scene = "CBERS_4_PAN10M_20171121_057_094_L2"
expected_content = {
"satellite": "CBERS",
"mission": "4",
"instrument": "PAN10M",
"acquisitionYear": "2017",
"acquisitionMonth": "11",
"acquisitionDay": "21",
"path": "057",
"row": "094",
"processingCorrectionLevel": "L2",
"scene": "CBERS_4_PAN10M_20171121_057_094_L2",
"date": "2017-11-21",
"reference_band": "B4",
"bands": ("B2", "B3", "B4"),
"rgb": ("B3", "B4", "B2"),
}
assert sceneid_parser(scene) == expected_content
scene = "CBERS_4_PAN5M_20171121_057_094_L2"
expected_content = {
"satellite": "CBERS",
"mission": "4",
"instrument": "PAN5M",
"acquisitionYear": "2017",
"acquisitionMonth": "11",
"acquisitionDay": "21",
"path": "057",
"row": "094",
"processingCorrectionLevel": "L2",
"scene": "CBERS_4_PAN5M_20171121_057_094_L2",
"date": "2017-11-21",
"reference_band": "B1",
"bands": ("B1",),
"rgb": ("B1", "B1", "B1"),
}
scene = "CBERS_4A_MUX_20200808_201_137_L4"
expected_content = {
"satellite": "CBERS",
"mission": "4A",
"instrument": "MUX",
"acquisitionYear": "2020",
"acquisitionMonth": "08",
"acquisitionDay": "08",
"path": "201",
"row": "137",
"processingCorrectionLevel": "L4",
"scene": "CBERS_4A_MUX_20200808_201_137_L4",
"date": "2020-08-08",
"reference_band": "B6",
"bands": ("B5", "B6", "B7", "B8"),
"rgb": ("B7", "B6", "B5"),
}
# Same as above testing 2A and 2B levels
scene = "CBERS_4A_MUX_20200808_201_137_L2A"
expected_content = {
"satellite": "CBERS",
"mission": "4A",
"instrument": "MUX",
"acquisitionYear": "2020",
"acquisitionMonth": "08",
"acquisitionDay": "08",
"path": "201",
"row": "137",
"processingCorrectionLevel": "L2A",
"scene": "CBERS_4A_MUX_20200808_201_137_L2A",
"date": "2020-08-08",
"reference_band": "B6",
"bands": ("B5", "B6", "B7", "B8"),
"rgb": ("B7", "B6", "B5"),
}
assert sceneid_parser(scene) == expected_content
scene = "CBERS_4A_WFI_20200801_221_156_L4"
expected_content = {
"satellite": "CBERS",
"mission": "4A",
"instrument": "WFI",
"acquisitionYear": "2020",
"acquisitionMonth": "08",
"acquisitionDay": "01",
"path": "221",
"row": "156",
"processingCorrectionLevel": "L4",
"scene": "CBERS_4A_WFI_20200801_221_156_L4",
"date": "2020-08-01",
"reference_band": "B14",
"bands": ("B13", "B14", "B15", "B16"),
"rgb": ("B15", "B14", "B13"),
}
assert sceneid_parser(scene) == expected_content
scene = "CBERS_4A_WPM_20200730_209_139_L4"
expected_content = {
"satellite": "CBERS",
"mission": "4A",
"instrument": "WPM",
"acquisitionYear": "2020",
"acquisitionMonth": "07",
"acquisitionDay": "30",
"path": "209",
"row": "139",
"processingCorrectionLevel": "L4",
"scene": "CBERS_4A_WPM_20200730_209_139_L4",
"date": "2020-07-30",
"reference_band": "B2",
"bands": ("B0", "B1", "B2", "B3", "B4"),
"rgb": ("B3", "B2", "B1"),
}
assert sceneid_parser(scene) == expected_content
| 31.841772
| 88
| 0.580932
| 1,782
| 15,093
| 4.69248
| 0.124579
| 0.037072
| 0.030615
| 0.01447
| 0.82552
| 0.804592
| 0.797656
| 0.792394
| 0.719445
| 0.712748
| 0
| 0.093716
| 0.26615
| 15,093
| 473
| 89
| 31.909091
| 0.66125
| 0.032068
| 0
| 0.673629
| 0
| 0
| 0.22184
| 0.086592
| 0
| 0
| 0
| 0
| 0.214099
| 1
| 0.02611
| false
| 0.002611
| 0.020888
| 0
| 0.049608
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7d64e6fe9fe84d9cb4e07635e3cfe69955e089b4
| 121
|
py
|
Python
|
sphinx/python-intro/source/code/oneoffcoder/function/basicfunction.py
|
oneoffcoder/books
|
84619477294a3e37e0d7538adf819113c9e8dcb8
|
[
"CC-BY-4.0"
] | 26
|
2020-05-05T08:07:43.000Z
|
2022-02-12T03:28:15.000Z
|
sphinx/python-intro/source/code/oneoffcoder/function/basicfunction.py
|
oneoffcoder/books
|
84619477294a3e37e0d7538adf819113c9e8dcb8
|
[
"CC-BY-4.0"
] | 19
|
2021-03-10T00:33:51.000Z
|
2022-03-02T13:04:32.000Z
|
sphinx/python-intro/source/code/oneoffcoder/function/basicfunction.py
|
oneoffcoder/books
|
84619477294a3e37e0d7538adf819113c9e8dcb8
|
[
"CC-BY-4.0"
] | 2
|
2022-01-09T16:48:21.000Z
|
2022-02-19T17:06:50.000Z
|
def add_one(num):
return num + 1
def minus_one(num):
return num - 1;
x = 10
y = add_one(x)
z = minus_one(x)
| 9.307692
| 19
| 0.586777
| 24
| 121
| 2.791667
| 0.458333
| 0.179104
| 0.358209
| 0.447761
| 0.477612
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045977
| 0.280992
| 121
| 12
| 20
| 10.083333
| 0.724138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.285714
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7d7603b7de6e89c930d304c79e53475b096865df
| 279
|
py
|
Python
|
Python3/Lists/nested_comprehension.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
Python3/Lists/nested_comprehension.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
Python3/Lists/nested_comprehension.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
nested_list = [list(range(1,4)), list(range(4,7)), list(range(8,10))]
print(f'{nested_list =}')
print(f'{[[single_list for single_list in nested_list] for single_list in nested_list] =}')
print(f'{[["X" if num % 2 == 0 else "O" for num in range(1,4)] for num in range(1,4)] =}')
| 55.8
| 91
| 0.65233
| 55
| 279
| 3.181818
| 0.363636
| 0.228571
| 0.12
| 0.182857
| 0.48
| 0.48
| 0.308571
| 0
| 0
| 0
| 0
| 0.053279
| 0.125448
| 279
| 5
| 92
| 55.8
| 0.663934
| 0
| 0
| 0
| 0
| 0.25
| 0.628571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.75
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
7d9ba1a3c9d79d17f96a7fafcef817d6c876b020
| 152
|
py
|
Python
|
Python_Birds/01_Programacao_Procedural/03_Modularizacao/debug.py
|
Miyake-Diogo/Python_Pro
|
01366d34bd1d659c4f0b11356c8981c8353c63b6
|
[
"Apache-2.0"
] | null | null | null |
Python_Birds/01_Programacao_Procedural/03_Modularizacao/debug.py
|
Miyake-Diogo/Python_Pro
|
01366d34bd1d659c4f0b11356c8981c8353c63b6
|
[
"Apache-2.0"
] | null | null | null |
Python_Birds/01_Programacao_Procedural/03_Modularizacao/debug.py
|
Miyake-Diogo/Python_Pro
|
01366d34bd1d659c4f0b11356c8981c8353c63b6
|
[
"Apache-2.0"
] | null | null | null |
# Modularização
# Debug
# coloque os breakpoints onde se quer executar
def soma(parcela1, parcela2):
return parcela1 + parcela2
print(soma(1,2))
| 16.888889
| 46
| 0.736842
| 20
| 152
| 5.6
| 0.85
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048
| 0.177632
| 152
| 8
| 47
| 19
| 0.848
| 0.427632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7dc81c12dd434f50f8359179dd61f78b4b3f3483
| 63
|
py
|
Python
|
cgivar2gvcf/__main__.py
|
madprime/cgivar2gvcf
|
13b4cd8da08669f7e4b0ceed77a7a17082f91037
|
[
"MIT"
] | 4
|
2016-04-28T16:46:40.000Z
|
2021-08-14T18:55:50.000Z
|
cgivar2gvcf/__main__.py
|
madprime/cgivar2gvcf
|
13b4cd8da08669f7e4b0ceed77a7a17082f91037
|
[
"MIT"
] | 5
|
2015-12-31T21:26:38.000Z
|
2016-01-26T20:23:24.000Z
|
cgivar2gvcf/__main__.py
|
madprime/cgivar2vcf
|
13b4cd8da08669f7e4b0ceed77a7a17082f91037
|
[
"MIT"
] | 2
|
2016-05-25T16:52:30.000Z
|
2017-09-12T19:35:33.000Z
|
from cgivar2gvcf import from_command_line
from_command_line()
| 15.75
| 41
| 0.873016
| 9
| 63
| 5.666667
| 0.555556
| 0.431373
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 0.095238
| 63
| 3
| 42
| 21
| 0.877193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7df26880277fa213ec3c04e43eafe875e209a2a5
| 66
|
py
|
Python
|
marge/tests/__init__.py
|
FirstDraftGIS/marge
|
eeee44f2c8f7956d8dc9d3c47b23a9497e9b75b3
|
[
"Apache-2.0"
] | 2
|
2018-09-30T07:02:57.000Z
|
2019-02-15T15:16:39.000Z
|
marge/tests/__init__.py
|
FirstDraftGIS/marge
|
eeee44f2c8f7956d8dc9d3c47b23a9497e9b75b3
|
[
"Apache-2.0"
] | null | null | null |
marge/tests/__init__.py
|
FirstDraftGIS/marge
|
eeee44f2c8f7956d8dc9d3c47b23a9497e9b75b3
|
[
"Apache-2.0"
] | null | null | null |
from .cleaner import *
from .models import *
from .utils import *
| 16.5
| 22
| 0.727273
| 9
| 66
| 5.333333
| 0.555556
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 66
| 3
| 23
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
81622fc4967dd019bbed90d40f1ed583089e21be
| 241
|
py
|
Python
|
urlrouter/models.py
|
mikespub-archive/wkornewald-allbuttonspressed
|
57adb0de9a61b8abec80e678b6589f6a5a3131b5
|
[
"BSD-3-Clause"
] | null | null | null |
urlrouter/models.py
|
mikespub-archive/wkornewald-allbuttonspressed
|
57adb0de9a61b8abec80e678b6589f6a5a3131b5
|
[
"BSD-3-Clause"
] | null | null | null |
urlrouter/models.py
|
mikespub-archive/wkornewald-allbuttonspressed
|
57adb0de9a61b8abec80e678b6589f6a5a3131b5
|
[
"BSD-3-Clause"
] | null | null | null |
from . import api
from django.db import models
class URLRoute(models.Model):
url = models.CharField(primary_key=True, max_length=256)
handler = models.CharField(max_length=64)
target = models.CharField(max_length=64, null=True)
| 30.125
| 60
| 0.755187
| 35
| 241
| 5.085714
| 0.6
| 0.252809
| 0.202247
| 0.269663
| 0.292135
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033816
| 0.141079
| 241
| 7
| 61
| 34.428571
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8191dcd3c1845015ada5c94394ee5bce70f505bc
| 9,477
|
py
|
Python
|
code/3_1_4.py
|
yuanyaaa/StimulateRandomProcess
|
8a6d531e275068650835d3fcabf4204184bc1e4b
|
[
"Apache-2.0"
] | null | null | null |
code/3_1_4.py
|
yuanyaaa/StimulateRandomProcess
|
8a6d531e275068650835d3fcabf4204184bc1e4b
|
[
"Apache-2.0"
] | null | null | null |
code/3_1_4.py
|
yuanyaaa/StimulateRandomProcess
|
8a6d531e275068650835d3fcabf4204184bc1e4b
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import random
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
class MatchTurn:
def __init__(self):
self.epochs = [100, 1000, 10000]
self.nrange = 51
self.E_Rn_x = [[], [], []]
self.E_Rn_y = [[], [], []]
# print(self.E_Rn_x)
def Q1(self):
for n in range(2, self.nrange):
print(n)
for e_index, epoch in enumerate(self.epochs):
print(e_index, epoch)
average_Rn = 0
for i in range(epoch):
init = [i for i in range(0, n)]
# print(init)
iter_nums = 0
# print(len(init))
while len(init) != 0:
old_init = init.copy()
# print('old', old_init)
random.shuffle(init)
# print(init)
init = [old_init[i] for i in range(len(init)) if init[i] != old_init[i]]
iter_nums += 1
# print(init)
average_Rn += iter_nums / epoch
self.E_Rn_x[e_index].append(n)
self.E_Rn_y[e_index].append(average_Rn)
x = range(2, self.nrange)
y = [x_ for x_ in x]
fig, ax = plt.subplots(1, 1)
handle_1, = plt.plot(x, y, lw=6, color='navajowhite')
handle_2, = plt.plot(self.E_Rn_x[0], self.E_Rn_y[0], color='tomato', linestyle='--')
handle_3, = plt.plot(self.E_Rn_x[1], self.E_Rn_y[1], color='violet', linestyle='--')
handle_4, = plt.plot(self.E_Rn_x[2], self.E_Rn_y[2], color='aqua', linestyle='--')
ax.legend(handles=[handle_1, handle_2, handle_3, handle_4],
labels=[' Theoretical value ', 'simulate: epoch=100', 'simulate: epoch=1000',
'simulate: epoch=10000'], loc='best')
# plt.plot(self.E_Rn_x, self.E_Rn_y)
# 嵌入绘制局部放大图的坐标系
axins = inset_axes(ax, width="40%", height="30%", loc='lower left',
bbox_to_anchor=(0.5, 0.1, 1, 1),
bbox_transform=ax.transAxes)
axins.plot(x, y, lw=6, color='navajowhite')
axins.plot(self.E_Rn_x[0], self.E_Rn_y[0], color='tomato', linestyle='--')
axins.plot(self.E_Rn_x[1], self.E_Rn_y[1], color='violet', linestyle='--')
axins.plot(self.E_Rn_x[2], self.E_Rn_y[2], color='aqua', linestyle='--')
# 设置放大区间
zone_left = 45
zone_right = 47
# 坐标轴的扩展比例(根据实际数据调整)
x_ratio = 0.5 # x轴显示范围的扩展比例
y_ratio = 1 # y轴显示范围的扩展比例
# X轴的显示范围
xlim0 = x[zone_left] - (x[zone_right] - x[zone_left]) * x_ratio
xlim1 = x[zone_right] + (x[zone_right] - x[zone_left]) * x_ratio
# Y轴的显示范围
y = np.hstack((self.E_Rn_y[2][zone_left:zone_right], self.E_Rn_y[2][zone_left:zone_right]))
ylim0 = np.min(y) - (np.max(y) - np.min(y)) * y_ratio
ylim1 = np.max(y) + (np.max(y) - np.min(y)) * y_ratio
# 调整子坐标系的显示范围
axins.set_xlim(xlim0, xlim1)
axins.set_ylim(ylim0, ylim1)
# 建立父坐标系与子坐标系的连接线
# loc1 loc2: 坐标系的四个角
# 1 (右上) 2 (左上) 3(左下) 4(右下)
mark_inset(ax, axins, loc1=3, loc2=1, fc="none", ec='k', lw=1)
self.plot_config(True, 'Num of People', 'E(Rn)', 'E(Rn)', '3_14_1_epoch10000.pdf')
plt.savefig('3_14_1_epoch.pdf')
def Q2(self):
for n in range(2, self.nrange):
print(n)
for e_index, epoch in enumerate(self.epochs):
print(e_index, epoch)
average_Sn = 0
for i in range(epoch):
init = [i for i in range(0, n)]
iter_nums = 0
while len(init) != 0:
old_init = init.copy()
iter_nums += len(old_init)
random.shuffle(init)
init = [old_init[i] for i in range(len(init)) if init[i] != old_init[i]]
average_Sn += iter_nums / epoch
self.E_Rn_x[e_index].append(n)
self.E_Rn_y[e_index].append(average_Sn)
fig, ax = plt.subplots(1, 1)
x = range(2, self.nrange)
y = [x_ + x_ * x_ / 2 for x_ in x]
handle_1, = plt.plot(x, y, lw=6, color='navajowhite')
handle_2, = plt.plot(self.E_Rn_x[0], self.E_Rn_y[0], color='tomato', linestyle='--')
handle_3, = plt.plot(self.E_Rn_x[1], self.E_Rn_y[1], color='violet', linestyle='--')
handle_4, = plt.plot(self.E_Rn_x[2], self.E_Rn_y[2], color='aqua', linestyle='--')
ax.legend(handles=[handle_1, handle_2, handle_3, handle_4],
labels=[' Theoretical value ', 'simulate: epoch=100', 'simulate: epoch=1000',
'simulate: epoch=10000'], loc='best')
self.plot_config(True, 'Num of People', 'E(Sn)', 'E(Sn)', '3_14_2_epoch.pdf')
# plt.plot(self.E_Rn_x, self.E_Rn_y)
# 嵌入绘制局部放大图的坐标系
axins = inset_axes(ax, width="40%", height="30%", loc='lower left',
bbox_to_anchor=(0.5, 0.1, 1, 1),
bbox_transform=ax.transAxes)
axins.plot(x, y, lw=6, color='navajowhite')
axins.plot(self.E_Rn_x[0], self.E_Rn_y[0], color='tomato', linestyle='--')
axins.plot(self.E_Rn_x[1], self.E_Rn_y[1], color='violet', linestyle='--')
axins.plot(self.E_Rn_x[2], self.E_Rn_y[2], color='aqua', linestyle='--')
# 设置放大区间
zone_left = 45
zone_right = 47
# 坐标轴的扩展比例(根据实际数据调整)
x_ratio = 0.5 # x轴显示范围的扩展比例
y_ratio = 1 # y轴显示范围的扩展比例
# X轴的显示范围
xlim0 = x[zone_left] - (x[zone_right] - x[zone_left]) * x_ratio
xlim1 = x[zone_right] + (x[zone_right] - x[zone_left]) * x_ratio
# Y轴的显示范围
y = np.hstack((self.E_Rn_y[2][zone_left:zone_right], self.E_Rn_y[2][zone_left:zone_right]))
ylim0 = np.min(y) - (np.max(y) - np.min(y)) * y_ratio
ylim1 = np.max(y) + (np.max(y) - np.min(y)) * y_ratio
# 调整子坐标系的显示范围
axins.set_xlim(xlim0, xlim1)
axins.set_ylim(ylim0, ylim1)
# 建立父坐标系与子坐标系的连接线
# loc1 loc2: 坐标系的四个角
# 1 (右上) 2 (左上) 3(左下) 4(右下)
mark_inset(ax, axins, loc1=3, loc2=1, fc="none", ec='k', lw=1)
# plt.gca().add_artist(l1)
self.plot_config(True, 'Num of People', 'E(Sn)', 'E(Sn)', '3_14_2_epoch100.pdf')
plt.savefig('3_14_2_epoch.pdf')
def Q3(self):
for n in range(2, self.nrange):
average_Cn = 0
print(n)
for i in range(self.epoch):
init = [i for i in range(0, n)]
iter_nums = 0
while len(init) != 0:
old_init = init.copy()
random.shuffle(init)
init = [old_init[i] for i in range(len(init)) if init[i] != old_init[i]]
iter_nums += len(init)
average_Cn += iter_nums / self.epoch / n
self.E_Rn_x.append(n)
self.E_Rn_y.append(average_Cn)
fig, ax = plt.subplots(1, 1)
x = range(2, self.nrange)
y = [x_ / 2 for x_ in x]
handle_1, = ax.plot(x, y, lw=6, color='thistle')
handle_2, = ax.plot(self.E_Rn_x, self.E_Rn_y, color='darkslategray')
self.plot_config(True, 'Num of People', 'E(Cn)', 'E(Cn)', '3_14_3_epoch' + str(self.epoch) + '.pdf')
# plt.show()
plt.legend(handles=[handle_1, handle_2], labels=['n/2', 'E(Cn)'], loc='best')
# plt.savefig('test.pdf')
# plt.plot(self.E_Rn_x, self.E_Rn_y)
# 嵌入绘制局部放大图的坐标系
axins = inset_axes(ax, width="40%", height="30%", loc='lower left',
bbox_to_anchor=(0.5, 0.1, 1, 1),
bbox_transform=ax.transAxes)
axins.plot(x, y, lw=6, color='thistle')
axins.plot(self.E_Rn_x, self.E_Rn_y, color='darkslategray')
# 设置放大区间
zone_left = 45
zone_right = 47
# 坐标轴的扩展比例(根据实际数据调整)
x_ratio = 0.5 # x轴显示范围的扩展比例
y_ratio = 1 # y轴显示范围的扩展比例
# X轴的显示范围
xlim0 = x[zone_left] - (x[zone_right] - x[zone_left]) * x_ratio
xlim1 = x[zone_right] + (x[zone_right] - x[zone_left]) * x_ratio
# Y轴的显示范围
y = np.hstack((self.E_Rn_y[zone_left:zone_right], self.E_Rn_y[zone_left:zone_right]))
ylim0 = np.min(y) - (np.max(y) - np.min(y)) * y_ratio
ylim1 = np.max(y) + (np.max(y) - np.min(y)) * y_ratio
# 调整子坐标系的显示范围
axins.set_xlim(xlim0, xlim1)
axins.set_ylim(ylim0, ylim1)
# 建立父坐标系与子坐标系的连接线
# loc1 loc2: 坐标系的四个角
# 1 (右上) 2 (左上) 3(左下) 4(右下)
mark_inset(ax, axins, loc1=3, loc2=1, fc="none", ec='k', lw=1)
# plt.gca().add_artist(l1)
self.plot_config(True, 'Num of People', 'E(Cn)', 'E(Cn)', '3_14_3.png')
plt.savefig('3_14_3_epoch' + str(self.epoch) + '.pdf')
def plot_config(self, grid: bool, xlabel: str, ylabel: str, title: str, fig: str):
if grid:
plt.grid(linestyle='-.')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
# plt.savefig(fig)
# plt.show()
if __name__ == '__main__':
match_turn = MatchTurn()
match_turn.Q2()
| 41.384279
| 108
| 0.526221
| 1,381
| 9,477
| 3.400434
| 0.115134
| 0.032581
| 0.073041
| 0.045997
| 0.866269
| 0.849872
| 0.836031
| 0.826022
| 0.767036
| 0.767036
| 0
| 0.04293
| 0.321621
| 9,477
| 228
| 109
| 41.565789
| 0.68751
| 0.081355
| 0
| 0.658228
| 0
| 0
| 0.077163
| 0.002426
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031646
| false
| 0
| 0.031646
| 0
| 0.06962
| 0.031646
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4d54df565b046e7b2fdc20d348d5002bc59e5bb
| 17,391
|
py
|
Python
|
watertap/unit_models/zero_order/tests/test_uv_aop_zo.py
|
dangunter/watertap
|
5fe94e4c27dc1ae9e2872960e4183dccadd42d8e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
watertap/unit_models/zero_order/tests/test_uv_aop_zo.py
|
dangunter/watertap
|
5fe94e4c27dc1ae9e2872960e4183dccadd42d8e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
watertap/unit_models/zero_order/tests/test_uv_aop_zo.py
|
dangunter/watertap
|
5fe94e4c27dc1ae9e2872960e4183dccadd42d8e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
Tests for zero-order UV-AOP model
"""
import pytest
from io import StringIO
from pyomo.environ import (
check_optimal_termination, ConcreteModel, Constraint, value, Var, Block)
from pyomo.util.check_units import assert_units_consistent
from idaes.core import FlowsheetBlock
from idaes.core.util import get_solver
from idaes.core.util.model_statistics import degrees_of_freedom
from idaes.core.util.testing import initialization_tester
from idaes.generic_models.costing import UnitModelCostingBlock
from watertap.unit_models.zero_order import UVAOPZO
from watertap.core.wt_database import Database
from watertap.core.zero_order_properties import WaterParameterBlock
from watertap.core.zero_order_costing import ZeroOrderCosting
solver = get_solver()
class TestUVAOPZO_with_default_removal:
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.params = WaterParameterBlock(
default={"solute_list": ["viruses_enteric",
"tss",
"toc",
"cryptosporidium",
"total_coliforms_fecal_ecoli"]})
m.fs.unit = UVAOPZO(default={
"property_package": m.fs.params,
"database": m.db})
m.fs.unit.inlet.flow_mass_comp[0, "H2O"].fix(10000)
m.fs.unit.inlet.flow_mass_comp[0, "viruses_enteric"].fix(1)
m.fs.unit.inlet.flow_mass_comp[0, "toc"].fix(2)
m.fs.unit.inlet.flow_mass_comp[0, "tss"].fix(3)
m.fs.unit.inlet.flow_mass_comp[0, "cryptosporidium"].fix(5)
m.fs.unit.inlet.flow_mass_comp[0, "total_coliforms_fecal_ecoli"].fix(3)
return m
@pytest.mark.unit
def test_build(self, model):
assert model.fs.unit.config.database == model.db
assert model.fs.unit._tech_type == 'uv_aop'
assert isinstance(model.fs.unit.electricity, Var)
assert isinstance(model.fs.unit.energy_electric_flow_vol_inlet, Var)
assert isinstance(model.fs.unit.electricity_consumption, Constraint)
assert isinstance(model.fs.unit.uv_reduced_equivalent_dose, Var)
assert isinstance(model.fs.unit.uv_transmittance_in, Var)
assert isinstance(model.fs.unit.oxidant_dose, Var)
assert isinstance(model.fs.unit.chemical_flow_mass, Var)
assert isinstance(model.fs.unit.chemical_flow_mass_constraint, Constraint)
@pytest.mark.component
def test_load_parameters(self, model):
data = model.db.get_unit_operation_parameters("uv_aop")
assert model.fs.unit.recovery_frac_mass_H2O[0].fixed
assert model.fs.unit.recovery_frac_mass_H2O[0].value == 1
model.fs.unit.load_parameters_from_database(use_default_removal=True)
assert model.fs.unit.recovery_frac_mass_H2O[0].value == 1
for (t, j), v in model.fs.unit.removal_frac_mass_solute.items():
assert v.fixed
if j not in data["removal_frac_mass_solute"]:
assert v.value == data["default_removal_frac_mass_solute"]["value"]
else:
assert v.value == data["removal_frac_mass_solute"][j]["value"]
assert model.fs.unit.energy_electric_flow_vol_inlet.fixed
assert model.fs.unit.energy_electric_flow_vol_inlet.value == data[
"energy_electric_flow_vol_inlet"]["value"]
assert model.fs.unit.uv_reduced_equivalent_dose[0].fixed
assert model.fs.unit.uv_reduced_equivalent_dose[0].value == data[
"uv_reduced_equivalent_dose"]["value"]
assert model.fs.unit.uv_transmittance_in[0].fixed
assert model.fs.unit.uv_transmittance_in[0].value == data[
"uv_transmittance_in"]["value"]
@pytest.mark.component
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model.fs.unit) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model.fs.unit)
@pytest.mark.component
def test_initialize(self, model):
initialization_tester(model)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert (pytest.approx(10.004685, rel=1e-5) ==
value(model.fs.unit.properties_treated[0].flow_vol))
assert (pytest.approx(0.1650012, rel=1e-5) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["toc"]))
assert (pytest.approx(0.299860, rel=1e-5) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["tss"]))
assert (pytest.approx(5.4974e-6, rel=1e-5) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["cryptosporidium"]))
assert (pytest.approx(1.79916e-6, rel=1e-5) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["total_coliforms_fecal_ecoli"]))
assert (pytest.approx(3605.04, rel=1e-5) ==
value(model.fs.unit.electricity[0]))
@pytest.mark.component
def test_report(self, model):
stream = StringIO()
model.fs.unit.report(ostream=stream)
output = """
====================================================================================
Unit : fs.unit Time: 0.0
------------------------------------------------------------------------------------
Unit Performance
Variables:
Key : Value : Fixed : Bounds
Electricity Demand : 3605.0 : False : (0, None)
Electricity Intensity : 0.10000 : True : (None, None)
Oxidant Dosage (mg/L) : 5.0000 : True : (None, None)
Oxidant Flow (kg/s) : 0.050070 : False : (0, None)
Solute Removal [cryptosporidium] : 0.99999 : True : (0, None)
Solute Removal [toc] : 0.17461 : True : (0, None)
Solute Removal [total_coliforms_fecal_ecoli] : 0.99999 : True : (0, None)
Solute Removal [tss] : 0.0000 : True : (0, None)
Solute Removal [viruses_enteric] : 0.96540 : True : (0, None)
UV Reduced Equivalent Dosage (mJ/cm^2) : 100.00 : True : (None, None)
UV Transmittance of Feed : 0.90000 : True : (None, None)
------------------------------------------------------------------------------------
Stream Table
Inlet Treated
Volumetric Flowrate 10.014 10.005
Mass Concentration H2O 998.60 999.53
Mass Concentration viruses_enteric 0.099860 0.0034581
Mass Concentration tss 0.29958 0.29986
Mass Concentration toc 0.19972 0.16500
Mass Concentration cryptosporidium 0.49930 5.4974e-06
Mass Concentration total_coliforms_fecal_ecoli 0.29958 1.7992e-06
====================================================================================
"""
assert output in stream.getvalue()
class TestUVAOPZO_subtype_no_default_removal:
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.params = WaterParameterBlock(
default={"solute_list": ["viruses_enteric",
"toc",
"cryptosporidium",
"total_coliforms_fecal_ecoli"]})
m.fs.unit = UVAOPZO(default={
"property_package": m.fs.params,
"database": m.db,
"process_subtype": "hydrogen_peroxide"})
m.fs.unit.inlet.flow_mass_comp[0, "H2O"].fix(10000)
m.fs.unit.inlet.flow_mass_comp[0, "viruses_enteric"].fix(1)
m.fs.unit.inlet.flow_mass_comp[0, "toc"].fix(2)
m.fs.unit.inlet.flow_mass_comp[0, "cryptosporidium"].fix(5)
m.fs.unit.inlet.flow_mass_comp[0, "total_coliforms_fecal_ecoli"].fix(3)
return m
@pytest.mark.unit
def test_build(self, model):
assert model.fs.unit.config.database == model.db
assert model.fs.unit._tech_type == 'uv_aop'
assert model.fs.unit.config.process_subtype == "hydrogen_peroxide"
assert isinstance(model.fs.unit.electricity, Var)
assert isinstance(model.fs.unit.energy_electric_flow_vol_inlet, Var)
assert isinstance(model.fs.unit.electricity_consumption, Constraint)
assert isinstance(model.fs.unit.uv_reduced_equivalent_dose, Var)
assert isinstance(model.fs.unit.uv_transmittance_in, Var)
assert isinstance(model.fs.unit.oxidant_dose, Var)
assert isinstance(model.fs.unit.chemical_flow_mass, Var)
assert isinstance(model.fs.unit.chemical_flow_mass_constraint, Constraint)
@pytest.mark.component
def test_load_parameters(self, model):
data = model.db.get_unit_operation_parameters("uv_aop",
subtype=model.fs.unit.config.process_subtype)
assert model.fs.unit.recovery_frac_mass_H2O[0].fixed
assert model.fs.unit.recovery_frac_mass_H2O[0].value == 1
model.fs.unit.load_parameters_from_database()
assert model.fs.unit.recovery_frac_mass_H2O[0].value == 1
for (t, j), v in model.fs.unit.removal_frac_mass_solute.items():
assert v.fixed
if j not in data["removal_frac_mass_solute"]:
assert v.value == data["default_removal_frac_mass_solute"]["value"]
else:
assert v.value == data["removal_frac_mass_solute"][j]["value"]
assert model.fs.unit.energy_electric_flow_vol_inlet.fixed
assert model.fs.unit.energy_electric_flow_vol_inlet.value == data[
"energy_electric_flow_vol_inlet"]["value"]
assert model.fs.unit.uv_reduced_equivalent_dose[0].fixed
assert model.fs.unit.uv_reduced_equivalent_dose[0].value == data[
"uv_reduced_equivalent_dose"]["value"]
assert model.fs.unit.uv_transmittance_in[0].fixed
assert model.fs.unit.uv_transmittance_in[0].value == data[
"uv_transmittance_in"]["value"]
assert model.fs.unit.oxidant_dose[0].fixed
assert model.fs.unit.oxidant_dose[0].value == data[
"oxidant_dose"]["value"]
@pytest.mark.component
def test_degrees_of_freedom(self, model):
assert degrees_of_freedom(model.fs.unit) == 0
@pytest.mark.component
def test_unit_consistency(self, model):
assert_units_consistent(model.fs.unit)
@pytest.mark.component
def test_initialize(self, model):
initialization_tester(model)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, model):
results = solver.solve(model)
# Check for optimal solution
assert check_optimal_termination(results)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, model):
assert (pytest.approx(10.001685, rel=1e-5) ==
value(model.fs.unit.properties_treated[0].flow_vol))
assert (pytest.approx(0.165051, rel=1e-5) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["toc"]))
assert (pytest.approx(5.49907e-6, rel=1e-5) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["cryptosporidium"]))
assert (pytest.approx(1.79970e-6, rel=1e-5) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["total_coliforms_fecal_ecoli"]))
assert (pytest.approx(0.0034591, rel=1e-5) == value(
model.fs.unit.properties_treated[0].conc_mass_comp["viruses_enteric"]))
assert (pytest.approx(3603.96, rel=1e-5) ==
value(model.fs.unit.electricity[0]))
assert (pytest.approx(0.050055, rel=1e-5) ==
value(model.fs.unit.chemical_flow_mass[0]))
@pytest.mark.component
def test_report(self, model):
stream = StringIO()
model.fs.unit.report(ostream=stream)
output = """
====================================================================================
Unit : fs.unit Time: 0.0
------------------------------------------------------------------------------------
Unit Performance
Variables:
Key : Value : Fixed : Bounds
Electricity Demand : 3604.0 : False : (0, None)
Electricity Intensity : 0.10000 : True : (None, None)
Oxidant Dosage (mg/L) : 5.0000 : True : (None, None)
Oxidant Flow (kg/s) : 0.050055 : False : (0, None)
Solute Removal [cryptosporidium] : 0.99999 : True : (0, None)
Solute Removal [toc] : 0.17461 : True : (0, None)
Solute Removal [total_coliforms_fecal_ecoli] : 0.99999 : True : (0, None)
Solute Removal [viruses_enteric] : 0.96540 : True : (0, None)
UV Reduced Equivalent Dosage (mJ/cm^2) : 100.00 : True : (None, None)
UV Transmittance of Feed : 0.90000 : True : (None, None)
------------------------------------------------------------------------------------
Stream Table
Inlet Treated
Volumetric Flowrate 10.011 10.002
Mass Concentration H2O 998.90 999.83
Mass Concentration viruses_enteric 0.099890 0.0034591
Mass Concentration toc 0.19978 0.16505
Mass Concentration cryptosporidium 0.49945 5.4991e-06
Mass Concentration total_coliforms_fecal_ecoli 0.29967 1.7997e-06
====================================================================================
"""
assert output in stream.getvalue()
def test_costing():
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.params = WaterParameterBlock(
default={"solute_list": ["viruses_enteric",
"toc",
"cryptosporidium"]})
m.fs.costing = ZeroOrderCosting()
m.fs.unit1 = UVAOPZO(default={
"property_package": m.fs.params,
"database": m.db})
m.fs.unit1.inlet.flow_mass_comp[0, "H2O"].fix(10000)
m.fs.unit1.inlet.flow_mass_comp[0, "viruses_enteric"].fix(1)
m.fs.unit1.inlet.flow_mass_comp[0, "toc"].fix(2)
m.fs.unit1.inlet.flow_mass_comp[0, "cryptosporidium"].fix(3)
m.fs.unit1.load_parameters_from_database(use_default_removal=True)
assert degrees_of_freedom(m.fs.unit1) == 0
m.fs.unit1.costing = UnitModelCostingBlock(default={
"flowsheet_costing_block": m.fs.costing})
assert isinstance(m.fs.unit1.chemical_flow_mass, Var)
assert isinstance(m.fs.costing.uv_aop, Block)
assert isinstance(m.fs.costing.uv_aop.uv_capital_a_parameter, Var)
assert isinstance(m.fs.costing.uv_aop.uv_capital_b_parameter, Var)
assert isinstance(m.fs.costing.uv_aop.uv_capital_c_parameter, Var)
assert isinstance(m.fs.costing.uv_aop.uv_capital_d_parameter, Var)
assert isinstance(m.fs.costing.uv_aop.aop_capital_a_parameter, Var)
assert isinstance(m.fs.costing.uv_aop.aop_capital_b_parameter, Var)
assert isinstance(m.fs.unit1.costing.capital_cost, Var)
assert isinstance(m.fs.unit1.costing.capital_cost_constraint,
Constraint)
assert_units_consistent(m.fs)
assert degrees_of_freedom(m.fs.unit1) == 0
assert m.fs.unit1.electricity[0] in \
m.fs.costing._registered_flows["electricity"]
assert str(m.fs.costing._registered_flows["hydrogen_peroxide"][0]) == str(
m.fs.unit1.chemical_flow_mass[0])
| 45.407311
| 100
| 0.600196
| 2,087
| 17,391
| 4.8184
| 0.134164
| 0.047733
| 0.071102
| 0.042263
| 0.812649
| 0.794749
| 0.776253
| 0.76601
| 0.743437
| 0.717184
| 0
| 0.040355
| 0.249094
| 17,391
| 382
| 101
| 45.526178
| 0.729688
| 0.035593
| 0
| 0.696667
| 0
| 0.006667
| 0.303405
| 0.074782
| 0
| 0
| 0
| 0
| 0.28
| 1
| 0.063333
| false
| 0
| 0.043333
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4dcbd9e0164eaae599ac67bb5ac0e53caacccd6
| 941
|
py
|
Python
|
api/auth/apikeys.py
|
Retrylife/apiv2
|
a3c6cd4e556db9126a0e8aebbace7c3307b6a277
|
[
"MIT"
] | null | null | null |
api/auth/apikeys.py
|
Retrylife/apiv2
|
a3c6cd4e556db9126a0e8aebbace7c3307b6a277
|
[
"MIT"
] | null | null | null |
api/auth/apikeys.py
|
Retrylife/apiv2
|
a3c6cd4e556db9126a0e8aebbace7c3307b6a277
|
[
"MIT"
] | null | null | null |
all_scopes = ["tba", "frc", "devrant", "github", "rss", "rtl", "rtl-access", "monitor"]
read = {
# debugging key
"070f42125103c1b1edd465a102c052c245d1eb455a35c370614c037c83e2c65c9646c699e8fa27184bc8dcf74f14e23c8a01cf9160b7718b82a8645261b6458d": ["tba", "rtl-access", "devrant", "monitor"],
# devrant status key retrylife.ca
"cac386b5ca59ab4d6256f9ce548cd96af41bda212bc688c9ca710a5cec77fd078bbc3a5b0efb6232b7e705af4bb2bf2f412441c6610e4d2ca8b2af8451d5e69d": ["devrant"],
# 5k24 live data key
"8107b599263a5466f28fa6dc4d29a93feff43edcb2aa54761a7e3055fb57cb582b85a1cd403b5769f62fe38c593286994e9f121785ab36cc304db0f1255220a3": ["tba"],
# SnappyFRC
"32fb46261eec1a42cc641cc287274f1a21a179c45e03ea929659f2246570ccbf19bff93a118645fdcc4928272ae2bef2c169c326a4f0ceea05de16c5de0bc99d": ["tba"]
}
internal_keys = {
"tba": "QPI1VLcQrowB0Oq8G0NdTjk30HpSaJ4fJuO4GV29ATKJkJIS6GNVZ1qnlLg0O6Ql",
"monitor": None
}
| 44.809524
| 180
| 0.807651
| 42
| 941
| 18.047619
| 0.666667
| 0.023747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.397406
| 0.098831
| 941
| 21
| 181
| 44.809524
| 0.496462
| 0.07864
| 0
| 0
| 0
| 0
| 0.774942
| 0.668213
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c4dd4802ca59c3df2a76ab98d481dc761a936ce9
| 26
|
py
|
Python
|
test2.py
|
MPogoda/compilers_python
|
48aed1cbe715d9121f3031e400d579d241754304
|
[
"MIT"
] | 1
|
2022-03-27T06:47:36.000Z
|
2022-03-27T06:47:36.000Z
|
test2.py
|
MPogoda/compilers_python
|
48aed1cbe715d9121f3031e400d579d241754304
|
[
"MIT"
] | null | null | null |
test2.py
|
MPogoda/compilers_python
|
48aed1cbe715d9121f3031e400d579d241754304
|
[
"MIT"
] | null | null | null |
class class class class:
| 8.666667
| 24
| 0.769231
| 4
| 26
| 5
| 0.25
| 1.5
| 1.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 26
| 2
| 25
| 13
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f20f2c2658d8575c91afa0016cae29e6691a3018
| 115
|
py
|
Python
|
src/flasktex/views.py
|
hosiet/flasktex
|
a1fe0dd84fdeed5e5d99125af0d789f33895e396
|
[
"BSD-3-Clause"
] | 1
|
2020-03-22T10:58:24.000Z
|
2020-03-22T10:58:24.000Z
|
src/flasktex/views.py
|
hosiet/flasktex
|
a1fe0dd84fdeed5e5d99125af0d789f33895e396
|
[
"BSD-3-Clause"
] | null | null | null |
src/flasktex/views.py
|
hosiet/flasktex
|
a1fe0dd84fdeed5e5d99125af0d789f33895e396
|
[
"BSD-3-Clause"
] | null | null | null |
from flasktex import app
import flasktex.api_1_0
@app.route("/")
def ft_route_root():
return 'this is root.'
| 14.375
| 26
| 0.713043
| 19
| 115
| 4.105263
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.165217
| 115
| 7
| 27
| 16.428571
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0.121739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
f21c573bfb74d312a7e25b3f0805322696a71a53
| 66
|
py
|
Python
|
GEOAkaze/__init__.py
|
ahsouri/GEOAkaze
|
63072998cb1a7e51fdd4a9c04102fd983ad6a2b6
|
[
"MIT"
] | null | null | null |
GEOAkaze/__init__.py
|
ahsouri/GEOAkaze
|
63072998cb1a7e51fdd4a9c04102fd983ad6a2b6
|
[
"MIT"
] | null | null | null |
GEOAkaze/__init__.py
|
ahsouri/GEOAkaze
|
63072998cb1a7e51fdd4a9c04102fd983ad6a2b6
|
[
"MIT"
] | null | null | null |
from .GEOAkaze_mod import GEOAkaze
from .make_kml import make_kml
| 22
| 34
| 0.848485
| 11
| 66
| 4.818182
| 0.545455
| 0.264151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 2
| 35
| 33
| 0.913793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f21d490fcdce47dd3ce94ca1890a9617d6bbec14
| 25
|
py
|
Python
|
LociAnalysis/refdb/__init__.py
|
bnbowman/LociAnalysis
|
c0f11c2a2b80c7cde61b9991283a17f97062118e
|
[
"BSD-3-Clause"
] | 3
|
2017-09-22T15:17:42.000Z
|
2020-05-12T04:59:07.000Z
|
LociAnalysis/refdb/__init__.py
|
bnbowman/LociAnalysis
|
c0f11c2a2b80c7cde61b9991283a17f97062118e
|
[
"BSD-3-Clause"
] | null | null | null |
LociAnalysis/refdb/__init__.py
|
bnbowman/LociAnalysis
|
c0f11c2a2b80c7cde61b9991283a17f97062118e
|
[
"BSD-3-Clause"
] | null | null | null |
from .refdb import RefDb
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f2214abd577ec5293700ad33a303f96e92b0cb2c
| 77
|
py
|
Python
|
Python/Math/Power Mod Power.py
|
jaswal72/hacker-rank
|
95aaa71b4636928664341dc9c6f75d69af5f26ac
|
[
"MIT"
] | 1
|
2017-03-27T18:21:38.000Z
|
2017-03-27T18:21:38.000Z
|
Python/Math/Power Mod Power.py
|
jaswal72/hacker-rank
|
95aaa71b4636928664341dc9c6f75d69af5f26ac
|
[
"MIT"
] | null | null | null |
Python/Math/Power Mod Power.py
|
jaswal72/hacker-rank
|
95aaa71b4636928664341dc9c6f75d69af5f26ac
|
[
"MIT"
] | null | null | null |
a=int(input())
b=int(input())
c=int(input())
print pow(a,b)
print pow(a,b,c)
| 12.833333
| 16
| 0.623377
| 18
| 77
| 2.666667
| 0.388889
| 0.5
| 0.375
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 77
| 5
| 17
| 15.4
| 0.685714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.4
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
482d8de3b1f47ab2d0d7c2d39c36cf57fd806f9a
| 70
|
py
|
Python
|
bools/datetime/timedelta.py
|
lotcher/bools
|
bdd5c056d7bb6f8c304b56869c49966a9b6af1a1
|
[
"MIT"
] | 11
|
2021-06-18T11:11:36.000Z
|
2022-02-10T05:59:28.000Z
|
bools/datetime/timedelta.py
|
lotcher/bools
|
bdd5c056d7bb6f8c304b56869c49966a9b6af1a1
|
[
"MIT"
] | null | null | null |
bools/datetime/timedelta.py
|
lotcher/bools
|
bdd5c056d7bb6f8c304b56869c49966a9b6af1a1
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
class Timedelta(timedelta):
pass
| 11.666667
| 30
| 0.771429
| 8
| 70
| 6.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185714
| 70
| 5
| 31
| 14
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
48537630316dad32b2dcddf3786a98ab34c9bac3
| 38,936
|
py
|
Python
|
tests/invalid_models_tests/test_relative_fields.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | 1
|
2019-02-10T19:33:27.000Z
|
2019-02-10T19:33:27.000Z
|
tests/invalid_models_tests/test_relative_fields.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | null | null | null |
tests/invalid_models_tests/test_relative_fields.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.core.checks import Error
from django.db import models
from django.test.utils import override_settings
from django.test.testcases import skipIfDBFeature
from .base import IsolatedModelsTestCase
class RelativeFieldTests(IsolatedModelsTestCase):
def test_valid_foreign_key_without_accessor(self):
class Target(models.Model):
# There would be a clash if Model.field installed an accessor.
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, related_name='+')
field = Model._meta.get_field('field')
errors = field.check()
self.assertEqual(errors, [])
def test_foreign_key_to_missing_model(self):
# Model names are resolved when a model is being created, so we cannot
# test relative fields in isolation and we need to attach them to a
# model.
class Model(models.Model):
foreign_key = models.ForeignKey('Rel1')
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
('The field has a relation with model Rel1, '
'which has either not been installed or is abstract.'),
hint=('Ensure that you did not misspell the model name and '
'the model is not abstract. Does your INSTALLED_APPS '
'setting contain the app where Rel1 is defined?'),
obj=field,
id='E030',
),
]
self.assertEqual(errors, expected)
def test_many_to_many_to_missing_model(self):
class Model(models.Model):
m2m = models.ManyToManyField("Rel2")
field = Model._meta.get_field('m2m')
errors = field.check(from_model=Model)
expected = [
Error(
('The field has a relation with model Rel2, '
'which has either not been installed or is abstract.'),
hint=('Ensure that you did not misspell the model name and '
'the model is not abstract. Does your INSTALLED_APPS '
'setting contain the app where Rel2 is defined?'),
obj=field,
id='E030',
),
]
self.assertEqual(errors, expected)
def test_ambiguous_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
field = models.ManyToManyField('Person',
through="AmbiguousRelationship", related_name='tertiary')
class AmbiguousRelationship(models.Model):
# Too much foreign keys to Person.
first_person = models.ForeignKey(Person, related_name="first")
second_person = models.ForeignKey(Person, related_name="second")
second_model = models.ForeignKey(Group)
field = Group._meta.get_field('field')
errors = field.check(from_model=Group)
expected = [
Error(
('The model is used as an intermediary model by '
'invalid_models_tests.Group.field, but it has more than one '
'foreign key to Person, '
'which is ambiguous and is not permitted.'),
hint=('If you want to create a recursive relationship, use '
'ForeignKey("self", symmetrical=False, '
'through="AmbiguousRelationship").'),
obj=field,
id='E027',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_with_foreign_key_to_wrong_model(self):
class WrongModel(models.Model):
pass
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="InvalidRelationship")
class InvalidRelationship(models.Model):
person = models.ForeignKey(Person)
wrong_foreign_key = models.ForeignKey(WrongModel)
# The last foreign key should point to Group model.
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
('The model is used as an intermediary model by '
'invalid_models_tests.Group.members, but it misses '
'a foreign key to Group or Person.'),
hint=None,
obj=InvalidRelationship,
id='E028',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_missing_foreign_key(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="InvalidRelationship")
class InvalidRelationship(models.Model):
group = models.ForeignKey(Group)
# No foreign key to Person
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
('The model is used as an intermediary model by '
'invalid_models_tests.Group.members, but it misses '
'a foreign key to Group or Person.'),
hint=None,
obj=InvalidRelationship,
id='E028',
),
]
self.assertEqual(errors, expected)
def test_missing_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="MissingM2MModel")
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
('The field specifies a many-to-many relation through model '
'MissingM2MModel, which has not been installed.'),
hint=('Ensure that you did not misspell the model name and '
'the model is not abstract. Does your INSTALLED_APPS '
'setting contain the app where MissingM2MModel is defined?'),
obj=field,
id='E023',
),
]
self.assertEqual(errors, expected)
def test_symmetrical_self_referential_field(self):
class Person(models.Model):
# Implicit symmetrical=False.
friends = models.ManyToManyField('self', through="Relationship")
class Relationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set")
second = models.ForeignKey(Person, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='E024',
),
]
self.assertEqual(errors, expected)
def test_too_many_foreign_keys_in_self_referential_model(self):
class Person(models.Model):
friends = models.ManyToManyField('self',
through="InvalidRelationship", symmetrical=False)
class InvalidRelationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set_2")
second = models.ForeignKey(Person, related_name="rel_to_set_2")
third = models.ForeignKey(Person, related_name="too_many_by_far")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
('The model is used as an intermediary model by '
'invalid_models_tests.Person.friends, but it has more than two '
'foreign keys to Person, which is ambiguous and '
'is not permitted.'),
hint=None,
obj=InvalidRelationship,
id='E025',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table(self):
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self',
through="Relationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(Person, related_name="rel_from_set")
second = models.ForeignKey(Person, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='E024',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_abstract_model(self):
class Model(models.Model):
foreign_key = models.ForeignKey('AbstractModel')
class AbstractModel(models.Model):
class Meta:
abstract = True
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
('The field has a relation with model AbstractModel, '
'which has either not been installed or is abstract.'),
hint=('Ensure that you did not misspell the model name and '
'the model is not abstract. Does your INSTALLED_APPS '
'setting contain the app where AbstractModel is defined?'),
obj=field,
id='E030',
),
]
self.assertEqual(errors, expected)
def test_m2m_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
m2m = models.ManyToManyField('AbstractModel')
field = Model._meta.get_field('m2m')
errors = field.check(from_model=Model)
expected = [
Error(
('The field has a relation with model AbstractModel, '
'which has either not been installed or is abstract.'),
hint=('Ensure that you did not misspell the model name and '
'the model is not abstract. Does your INSTALLED_APPS '
'setting contain the app where AbstractModel is defined?'),
obj=field,
id='E030',
),
]
self.assertEqual(errors, expected)
def test_unique_m2m(self):
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
members = models.ManyToManyField('Person', unique=True)
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
'ManyToManyFields must not be unique.',
hint=None,
obj=field,
id='E022',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field(self):
class Target(models.Model):
bad = models.IntegerField() # No unique=True
class Model(models.Model):
foreign_key = models.ForeignKey('Target', to_field='bad')
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Target.bad must have unique=True because it is referenced by a foreign key.',
hint=None,
obj=field,
id='E019',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field_under_explicit_model(self):
class Target(models.Model):
bad = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, to_field='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'Target.bad must have unique=True because it is referenced by a foreign key.',
hint=None,
obj=field,
id='E019',
),
]
self.assertEqual(errors, expected)
def test_foreign_object_to_non_unique_fields(self):
class Person(models.Model):
# Note that both fields are not unique.
country_id = models.IntegerField()
city_id = models.IntegerField()
class MMembership(models.Model):
person_country_id = models.IntegerField()
person_city_id = models.IntegerField()
person = models.ForeignObject(Person,
from_fields=['person_country_id', 'person_city_id'],
to_fields=['country_id', 'city_id'])
field = MMembership._meta.get_field('person')
errors = field.check()
expected = [
Error(
('No unique=True constraint on field combination '
'"country_id,city_id" under model Person.'),
hint=('Set unique=True argument on any of the fields '
'"country_id,city_id" under model Person.'),
obj=field,
id='E018',
)
]
self.assertEqual(errors, expected)
def test_on_delete_set_null_on_non_nullable_field(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person',
on_delete=models.SET_NULL)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'The field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field.',
obj=field,
id='E020',
),
]
self.assertEqual(errors, expected)
def test_on_delete_set_default_without_default_value(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person',
on_delete=models.SET_DEFAULT)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'The field specifies on_delete=SET_DEFAULT, but has no default value.',
hint=None,
obj=field,
id='E021',
),
]
self.assertEqual(errors, expected)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_nullable_primary_key(self):
class Model(models.Model):
field = models.IntegerField(primary_key=True, null=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'Primary keys must not have null=True.',
hint='Set null=False on the field or remove primary_key=True argument.',
obj=field,
id='E036',
),
]
self.assertEqual(errors, expected)
def test_not_swapped_model(self):
class SwappableModel(models.Model):
# A model that can be, but isn't swapped out. References to this
# model should *not* raise any validation error.
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(SwappableModel,
related_name='explicit_fk')
implicit_fk = models.ForeignKey('invalid_models_tests.SwappableModel',
related_name='implicit_fk')
explicit_m2m = models.ManyToManyField(SwappableModel,
related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappableModel',
related_name='implicit_m2m')
explicit_fk = Model._meta.get_field('explicit_fk')
self.assertEqual(explicit_fk.check(), [])
implicit_fk = Model._meta.get_field('implicit_fk')
self.assertEqual(implicit_fk.check(), [])
explicit_m2m = Model._meta.get_field('explicit_m2m')
self.assertEqual(explicit_m2m.check(from_model=Model), [])
implicit_m2m = Model._meta.get_field('implicit_m2m')
self.assertEqual(implicit_m2m.check(from_model=Model), [])
@override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement')
def test_referencing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(SwappedModel,
related_name='explicit_fk')
implicit_fk = models.ForeignKey('invalid_models_tests.SwappedModel',
related_name='implicit_fk')
explicit_m2m = models.ManyToManyField(SwappedModel,
related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappedModel',
related_name='implicit_m2m')
fields = [
Model._meta.get_field('explicit_fk'),
Model._meta.get_field('implicit_fk'),
Model._meta.get_field('explicit_m2m'),
Model._meta.get_field('implicit_m2m'),
]
expected_error = Error(
('The field defines a relation with the model '
'invalid_models_tests.SwappedModel, which has been swapped out.'),
hint='Update the relation to point at settings.TEST_SWAPPED_MODEL',
id='E029',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
class AccessorClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target'))
def test_fk_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target'))
def test_fk_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target'))
def test_m2m_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_accessor_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model_set = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
'Accessor for field Model.rel clashes with field Target.model_set.',
hint=('Rename field Target.model_set or add/change '
'a related_name argument to the definition '
'for field Model.rel.'),
obj=Model._meta.get_field('rel'),
id='E014',
),
]
self.assertEqual(errors, expected)
def test_clash_between_accessors(self):
class Target(models.Model):
pass
class Model(models.Model):
foreign = models.ForeignKey(Target)
m2m = models.ManyToManyField(Target)
errors = Model.check()
expected = [
Error(
'Clash between accessors for Model.foreign and Model.m2m.',
hint=('Add or change a related_name argument to the definition '
'for Model.foreign or Model.m2m.'),
obj=Model._meta.get_field('foreign'),
id='E016',
),
Error(
'Clash between accessors for Model.m2m and Model.foreign.',
hint=('Add or change a related_name argument to the definition '
'for Model.m2m or Model.foreign.'),
obj=Model._meta.get_field('m2m'),
id='E016',
),
]
self.assertEqual(errors, expected)
class ReverseQueryNameClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target'))
def test_fk_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target'))
def test_fk_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target'))
def test_m2m_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_reverse_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
'Reverse query name for field Model.rel clashes with field Target.model.',
hint=('Rename field Target.model or add/change '
'a related_name argument to the definition '
'for field Model.rel.'),
obj=Model._meta.get_field('rel'),
id='E015',
),
]
self.assertEqual(errors, expected)
class ExplicitRelatedNameClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', related_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target', related_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', related_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def _test_explicit_related_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
'Accessor for field Model.rel clashes with field Target.clash.',
hint=('Rename field Target.clash or add/change '
'a related_name argument to the definition '
'for field Model.rel.'),
obj=Model._meta.get_field('rel'),
id='E014',
),
Error(
'Reverse query name for field Model.rel clashes with field Target.clash.',
hint=('Rename field Target.clash or add/change '
'a related_name argument to the definition '
'for field Model.rel.'),
obj=Model._meta.get_field('rel'),
id='E015',
),
]
self.assertEqual(errors, expected)
class ExplicitRelatedQueryNameClashTests(IsolatedModelsTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target',
related_query_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ForeignKey('Target',
related_query_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target',
related_query_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another'),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def _test_explicit_related_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
'Reverse query name for field Model.rel clashes with field Target.clash.',
hint=('Rename field Target.clash or add/change a related_name '
'argument to the definition for field Model.rel.'),
obj=Model._meta.get_field('rel'),
id='E015',
),
]
self.assertEqual(errors, expected)
class SelfReferentialM2MClashTests(IsolatedModelsTestCase):
def test_clash_between_accessors(self):
class Model(models.Model):
first_m2m = models.ManyToManyField('self', symmetrical=False)
second_m2m = models.ManyToManyField('self', symmetrical=False)
errors = Model.check()
expected = [
Error(
'Clash between accessors for Model.first_m2m and Model.second_m2m.',
hint=('Add or change a related_name argument to the definition '
'for Model.first_m2m or Model.second_m2m.'),
obj=Model._meta.get_field('first_m2m'),
id='E016',
),
Error(
'Clash between accessors for Model.second_m2m and Model.first_m2m.',
hint=('Add or change a related_name argument to the definition '
'for Model.second_m2m or Model.first_m2m.'),
obj=Model._meta.get_field('second_m2m'),
id='E016',
),
]
self.assertEqual(errors, expected)
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
'Accessor for field Model.model_set clashes with field Model.model_set.',
hint=('Rename field Model.model_set or add/change '
'a related_name argument to the definition '
'for field Model.model_set.'),
obj=Model._meta.get_field('model_set'),
id='E014',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
'Reverse query name for field Model.model clashes with field Model.model.',
hint=('Rename field Model.model or add/change a related_name '
'argument to the definition for field Model.model.'),
obj=Model._meta.get_field('model'),
id='E015',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.IntegerField()
m2m = models.ManyToManyField("self",
symmetrical=False, related_name='clash')
errors = Model.check()
expected = [
Error(
'Accessor for field Model.m2m clashes with field Model.clash.',
hint=('Rename field Model.clash or add/change a related_name '
'argument to the definition for field Model.m2m.'),
obj=Model._meta.get_field('m2m'),
id='E014',
),
Error(
'Reverse query name for field Model.m2m clashes with field Model.clash.',
hint=('Rename field Model.clash or add/change a related_name '
'argument to the definition for field Model.m2m.'),
obj=Model._meta.get_field('m2m'),
id='E015',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
first = models.ManyToManyField("self",
symmetrical=False, related_name='first_accessor')
second = models.ManyToManyField("self",
symmetrical=False, related_name='second_accessor')
errors = Model.check()
self.assertEqual(errors, [])
class SelfReferentialFKClashTests(IsolatedModelsTestCase):
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ForeignKey("Model")
errors = Model.check()
expected = [
Error(
'Accessor for field Model.model_set clashes with field Model.model_set.',
hint=('Rename field Model.model_set or add/change '
'a related_name argument to the definition '
'for field Model.model_set.'),
obj=Model._meta.get_field('model_set'),
id='E014',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ForeignKey("Model")
errors = Model.check()
expected = [
Error(
'Reverse query name for field Model.model clashes with field Model.model.',
hint=('Rename field Model.model or add/change '
'a related_name argument to the definition '
'for field Model.model.'),
obj=Model._meta.get_field('model'),
id='E015',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.CharField(max_length=10)
foreign = models.ForeignKey("Model", related_name='clash')
errors = Model.check()
expected = [
Error(
'Accessor for field Model.foreign clashes with field Model.clash.',
hint=('Rename field Model.clash or add/change '
'a related_name argument to the definition '
'for field Model.foreign.'),
obj=Model._meta.get_field('foreign'),
id='E014',
),
Error(
'Reverse query name for field Model.foreign clashes with field Model.clash.',
hint=('Rename field Model.clash or add/change '
'a related_name argument to the definition '
'for field Model.foreign.'),
obj=Model._meta.get_field('foreign'),
id='E015',
),
]
self.assertEqual(errors, expected)
class ComplexClashTests(IsolatedModelsTestCase):
# New tests should not be included here, because this is a single,
# self-contained sanity check, not a test of everything.
def test_complex_clash(self):
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash = models.CharField(max_length=10)
model = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Model(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, related_name='id')
foreign_2 = models.ForeignKey(Target, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
errors = Model.check()
expected = [
Error(
'Accessor for field Model.foreign_1 clashes with field Target.id.',
hint=('Rename field Target.id or add/change a related_name '
'argument to the definition for field Model.foreign_1.'),
obj=Model._meta.get_field('foreign_1'),
id='E014',
),
Error(
'Reverse query name for field Model.foreign_1 clashes with field Target.id.',
hint=('Rename field Target.id or add/change a related_name '
'argument to the definition for field Model.foreign_1.'),
obj=Model._meta.get_field('foreign_1'),
id='E015',
),
Error(
'Clash between accessors for Model.foreign_1 and Model.m2m_1.',
hint=('Add or change a related_name argument to '
'the definition for Model.foreign_1 or Model.m2m_1.'),
obj=Model._meta.get_field('foreign_1'),
id='E016',
),
Error(
'Clash between reverse query names for Model.foreign_1 and Model.m2m_1.',
hint=('Add or change a related_name argument to '
'the definition for Model.foreign_1 or Model.m2m_1.'),
obj=Model._meta.get_field('foreign_1'),
id='E017',
),
Error(
'Clash between accessors for Model.foreign_2 and Model.m2m_2.',
hint=('Add or change a related_name argument '
'to the definition for Model.foreign_2 or Model.m2m_2.'),
obj=Model._meta.get_field('foreign_2'),
id='E016',
),
Error(
'Clash between reverse query names for Model.foreign_2 and Model.m2m_2.',
hint=('Add or change a related_name argument to '
'the definition for Model.foreign_2 or Model.m2m_2.'),
obj=Model._meta.get_field('foreign_2'),
id='E017',
),
Error(
'Accessor for field Model.m2m_1 clashes with field Target.id.',
hint=('Rename field Target.id or add/change a related_name '
'argument to the definition for field Model.m2m_1.'),
obj=Model._meta.get_field('m2m_1'),
id='E014',
),
Error(
'Reverse query name for field Model.m2m_1 clashes with field Target.id.',
hint=('Rename field Target.id or add/change a related_name '
'argument to the definition for field Model.m2m_1.'),
obj=Model._meta.get_field('m2m_1'),
id='E015',
),
Error(
'Clash between accessors for Model.m2m_1 and Model.foreign_1.',
hint=('Add or change a related_name argument to the definition '
'for Model.m2m_1 or Model.foreign_1.'),
obj=Model._meta.get_field('m2m_1'),
id='E016',
),
Error(
'Clash between reverse query names for Model.m2m_1 and Model.foreign_1.',
hint=('Add or change a related_name argument to '
'the definition for Model.m2m_1 or Model.foreign_1.'),
obj=Model._meta.get_field('m2m_1'),
id='E017',
),
Error(
'Clash between accessors for Model.m2m_2 and Model.foreign_2.',
hint=('Add or change a related_name argument to the definition '
'for Model.m2m_2 or Model.foreign_2.'),
obj=Model._meta.get_field('m2m_2'),
id='E016',
),
Error(
'Clash between reverse query names for Model.m2m_2 and Model.foreign_2.',
hint=('Add or change a related_name argument to the definition '
'for Model.m2m_2 or Model.foreign_2.'),
obj=Model._meta.get_field('m2m_2'),
id='E017',
),
]
self.assertEqual(errors, expected)
| 37.510597
| 94
| 0.572683
| 4,109
| 38,936
| 5.235824
| 0.065223
| 0.036302
| 0.031235
| 0.037139
| 0.817514
| 0.781119
| 0.739379
| 0.702705
| 0.667333
| 0.644464
| 0
| 0.012444
| 0.335422
| 38,936
| 1,037
| 95
| 37.54677
| 0.818983
| 0.017259
| 0
| 0.709491
| 0
| 0
| 0.246189
| 0.012472
| 0
| 0
| 0
| 0
| 0.043981
| 1
| 0.068287
| false
| 0.015046
| 0.006944
| 0
| 0.167824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f997a6a6ce02b0840b0af18b4849233bc37f985
| 30
|
py
|
Python
|
draalcore/models/__init__.py
|
jojanper/draalcore
|
3d3f5a53efe32c721c34d7e48267328a4e9e8402
|
[
"MIT"
] | 1
|
2017-04-25T10:54:55.000Z
|
2017-04-25T10:54:55.000Z
|
draalcore/models/__init__.py
|
jojanper/draalcore
|
3d3f5a53efe32c721c34d7e48267328a4e9e8402
|
[
"MIT"
] | 1
|
2022-02-10T06:48:36.000Z
|
2022-02-10T06:48:36.000Z
|
draalcore/models/__init__.py
|
jojanper/draalcore
|
3d3f5a53efe32c721c34d7e48267328a4e9e8402
|
[
"MIT"
] | null | null | null |
from .fields import * # noqa
| 15
| 29
| 0.666667
| 4
| 30
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 30
| 1
| 30
| 30
| 0.869565
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b5005f5b90507ed2579315c816fdec7d94e88283
| 237
|
py
|
Python
|
providers/base_provider.py
|
tophertimzen/sephiroth
|
3c0e106197fba506da44b697796297c28f274955
|
[
"WTFPL"
] | 1
|
2020-05-19T08:23:36.000Z
|
2020-05-19T08:23:36.000Z
|
providers/base_provider.py
|
tophertimzen/sephiroth
|
3c0e106197fba506da44b697796297c28f274955
|
[
"WTFPL"
] | null | null | null |
providers/base_provider.py
|
tophertimzen/sephiroth
|
3c0e106197fba506da44b697796297c28f274955
|
[
"WTFPL"
] | 1
|
2020-05-19T08:23:41.000Z
|
2020-05-19T08:23:41.000Z
|
class BaseProvider():
def _get_ranges(self):
raise NotImplementedError
def _process_ranges(self):
raise NotImplementedError
def get_processed_ranges(self):
return self.processed_ranges
| 19.75
| 36
| 0.666667
| 23
| 237
| 6.565217
| 0.478261
| 0.198676
| 0.198676
| 0.450331
| 0.490066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.278481
| 237
| 12
| 37
| 19.75
| 0.883041
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.142857
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.