hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
b21bfec88e0dfd45846324420361a10ba1865cb9
193
py
Python
kleeneup/__init__.py
caiopo/kleeneup
0050054853ac7a3a2e40d492cc5fe741ef737191
[ "MIT" ]
null
null
null
kleeneup/__init__.py
caiopo/kleeneup
0050054853ac7a3a2e40d492cc5fe741ef737191
[ "MIT" ]
null
null
null
kleeneup/__init__.py
caiopo/kleeneup
0050054853ac7a3a2e40d492cc5fe741ef737191
[ "MIT" ]
1
2018-10-10T00:59:54.000Z
2018-10-10T00:59:54.000Z
from .regular_grammar import RegularGrammar from .finite_automaton import FiniteAutomaton, State, Symbol, Sentence from .regular_expression import RegularExpression, StitchedBinaryTree, Lambda
48.25
77
0.870466
20
193
8.25
0.75
0.133333
0
0
0
0
0
0
0
0
0
0
0.088083
193
3
78
64.333333
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b22a4ac4d8d41f1f54853d90f7a7aa435b4d6a78
41
py
Python
test/python/echo_hi_then_error.py
WrkMetric/Python--NodeJS
502bb3d81152ef9a16fb618f71f9e9fc43777349
[ "MIT", "Unlicense" ]
1,869
2015-01-07T18:06:52.000Z
2022-03-30T08:35:39.000Z
test/python/echo_hi_then_error.py
PavanAnanthSharma/python-shell
502bb3d81152ef9a16fb618f71f9e9fc43777349
[ "MIT", "Unlicense" ]
252
2015-01-08T17:33:58.000Z
2022-03-31T09:04:38.000Z
test/python/echo_hi_then_error.py
PavanAnanthSharma/python-shell
502bb3d81152ef9a16fb618f71f9e9fc43777349
[ "MIT", "Unlicense" ]
238
2015-03-22T11:22:30.000Z
2022-03-15T22:01:44.000Z
print('hi') raise Exception('fibble-fah')
20.5
29
0.731707
6
41
5
1
0
0
0
0
0
0
0
0
0
0
0
0.04878
41
2
29
20.5
0.769231
0
0
0
0
0
0.285714
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
b24b76ff37f2289a78c64dcda02fb884eb113dbd
227
py
Python
examples/scannet_normals/data.py
goodok/fastai_sparse
802ede772c19ccca7449eb13d0a107bc0c10ab0f
[ "MIT" ]
49
2019-03-31T21:20:27.000Z
2021-06-30T18:46:58.000Z
examples/scannet_normals/data.py
goodok/fastai_sparse
802ede772c19ccca7449eb13d0a107bc0c10ab0f
[ "MIT" ]
6
2019-04-17T16:01:05.000Z
2020-11-10T09:22:10.000Z
examples/scannet_normals/data.py
goodok/fastai_sparse
802ede772c19ccca7449eb13d0a107bc0c10ab0f
[ "MIT" ]
5
2019-04-01T10:46:29.000Z
2021-01-03T05:18:08.000Z
# -*- coding: utf-8 -*- from functools import partial from fastai_sparse.data import SparseDataBunch merge_fn = partial(SparseDataBunch.merge_fn, keys_lists=['id', 'labels_raw', 'filtred_mask', 'random_seed', 'num_points'])
28.375
122
0.753304
30
227
5.433333
0.8
0.245399
0.269939
0
0
0
0
0
0
0
0
0.004926
0.105727
227
7
123
32.428571
0.79803
0.092511
0
0
0
0
0.220588
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
b26fb5509497d72210ea4f3275edb63a6b2bc440
85
py
Python
tests/__init__.py
doublechiang/qsmcmd
63e31390de020472c6ff4284cbe2d2c5147cb13d
[ "MIT" ]
1
2021-05-07T09:57:01.000Z
2021-05-07T09:57:01.000Z
tests/__init__.py
doublechiang/qsmcmd
63e31390de020472c6ff4284cbe2d2c5147cb13d
[ "MIT" ]
30
2017-08-24T21:21:03.000Z
2021-01-21T19:32:36.000Z
tests/__init__.py
doublechiang/qsmcmd
63e31390de020472c6ff4284cbe2d2c5147cb13d
[ "MIT" ]
null
null
null
import os, sys sys.path.insert(0, os.path.join(os.path.dirname(__file__),'../src'))
21.25
68
0.694118
15
85
3.666667
0.666667
0.218182
0
0
0
0
0
0
0
0
0
0.012658
0.070588
85
3
69
28.333333
0.683544
0
0
0
0
0
0.070588
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b293c4e951eab343a95232f50c197cd3ae253ad6
126
py
Python
database_email_backend/__init__.py
enderlabs/django-database-email-backend
aad6bade66d076b5425f772430adc7e77e60f5ce
[ "MIT" ]
1
2016-01-15T18:54:59.000Z
2016-01-15T18:54:59.000Z
database_email_backend/__init__.py
enderlabs/django-database-email-backend
aad6bade66d076b5425f772430adc7e77e60f5ce
[ "MIT" ]
1
2015-11-04T22:19:21.000Z
2015-11-04T22:19:21.000Z
database_email_backend/__init__.py
enderlabs/django-database-email-backend
aad6bade66d076b5425f772430adc7e77e60f5ce
[ "MIT" ]
4
2015-11-04T20:45:16.000Z
2021-03-03T06:28:20.000Z
# -*- coding: utf-8 -*- VERSION = (1, 0, 4) __version__ = "1.0.4" __authors__ = ["Stefan Foulis <[email protected]>", ]
25.2
59
0.611111
18
126
3.833333
0.666667
0.231884
0.26087
0.289855
0
0
0
0
0
0
0
0.065421
0.150794
126
4
60
31.5
0.579439
0.166667
0
0
0
0
0.427184
0.242718
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
a25bec9b2e01804b38b6f619f80dd7f9ad6e8b87
44
py
Python
test/py.py
PhilipDeegan/mkn
399dd01990e130c4deeb0c2800204836d3875ae9
[ "BSD-3-Clause" ]
61
2015-02-05T07:43:13.000Z
2020-05-19T13:26:50.000Z
test/py.py
mkn/mkn
a05b542497270def02200df6620804b89429259b
[ "BSD-3-Clause" ]
29
2016-11-21T03:37:42.000Z
2020-10-18T12:04:53.000Z
test/py.py
mkn/mkn
a05b542497270def02200df6620804b89429259b
[ "BSD-3-Clause" ]
12
2016-01-05T05:35:29.000Z
2020-03-15T11:03:37.000Z
#! /usr/bin/python3 print("HELLO PYTHON")
8.8
21
0.659091
6
44
4.833333
1
0
0
0
0
0
0
0
0
0
0
0.026316
0.136364
44
4
22
11
0.736842
0.409091
0
0
0
0
0.5
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
a25d09e67ac4aff5540ba2b0f11ec21250507d36
121
py
Python
ToDoApp/admin.py
aishabazylzhanova/ToDo
a787e57bf8ace5719d847d8fc4949d05a5d117c5
[ "MIT" ]
null
null
null
ToDoApp/admin.py
aishabazylzhanova/ToDo
a787e57bf8ace5719d847d8fc4949d05a5d117c5
[ "MIT" ]
null
null
null
ToDoApp/admin.py
aishabazylzhanova/ToDo
a787e57bf8ace5719d847d8fc4949d05a5d117c5
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Tasks admin.site.register(Tasks) # Register your models here.
20.166667
33
0.768595
17
121
5.470588
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.165289
121
5
34
24.2
0.920792
0.214876
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
a261c4073b37f990b45a6d0c9e5cc17d54ee8a8f
24,440
py
Python
data_attributes.py
prise-3d/Thesis-NoiseDetection-metrics
b37b2a3e0601e8a879df12c9d88289b1ea43bbb1
[ "MIT" ]
null
null
null
data_attributes.py
prise-3d/Thesis-NoiseDetection-metrics
b37b2a3e0601e8a879df12c9d88289b1ea43bbb1
[ "MIT" ]
null
null
null
data_attributes.py
prise-3d/Thesis-NoiseDetection-metrics
b37b2a3e0601e8a879df12c9d88289b1ea43bbb1
[ "MIT" ]
null
null
null
# main imports import numpy as np import sys # image transform imports from PIL import Image from skimage import color from sklearn.decomposition import FastICA from sklearn.decomposition import IncrementalPCA from sklearn.decomposition import TruncatedSVD from numpy.linalg import svd as lin_svd from scipy.signal import medfilt2d, wiener, cwt import pywt import cv2 from ipfml.processing import transform, compression, segmentation from ipfml.filters import convolution, kernels from ipfml import utils # modules and config imports sys.path.insert(0, '') # trick to enable import of main folder module import custom_config as cfg from modules.utils import data as dt def get_image_features(data_type, block): """ Method which returns the data type expected """ if data_type == 'lab': block_file_path = '/tmp/lab_img.png' block.save(block_file_path) data = transform.get_LAB_L_SVD_s(Image.open(block_file_path)) if data_type == 'mscn': img_mscn_revisited = transform.rgb_to_mscn(block) # save tmp as img img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L') mscn_revisited_file_path = '/tmp/mscn_revisited_img.png' img_output.save(mscn_revisited_file_path) img_block = Image.open(mscn_revisited_file_path) # extract from temp image data = compression.get_SVD_s(img_block) """if data_type == 'mscn': img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8') img_mscn = transform.calculate_mscn_coefficients(img_gray, 7) img_mscn_norm = transform.normalize_2D_arr(img_mscn) img_mscn_gray = np.array(img_mscn_norm*255, 'uint8') data = compression.get_SVD_s(img_mscn_gray) """ if data_type == 'low_bits_6': low_bits_6 = transform.rgb_to_LAB_L_low_bits(block, 6) data = compression.get_SVD_s(low_bits_6) if data_type == 'low_bits_5': low_bits_5 = transform.rgb_to_LAB_L_low_bits(block, 5) data = compression.get_SVD_s(low_bits_5) if data_type == 'low_bits_4': low_bits_4 = transform.rgb_to_LAB_L_low_bits(block, 4) data = compression.get_SVD_s(low_bits_4) if data_type == 'low_bits_3': low_bits_3 = transform.rgb_to_LAB_L_low_bits(block, 3) data = compression.get_SVD_s(low_bits_3) if data_type == 'low_bits_2': low_bits_2 = transform.rgb_to_LAB_L_low_bits(block, 2) data = compression.get_SVD_s(low_bits_2) if data_type == 'low_bits_4_shifted_2': data = compression.get_SVD_s(transform.rgb_to_LAB_L_bits(block, (3, 6))) if data_type == 'sub_blocks_stats': block = np.asarray(block) width, height, _= block.shape sub_width, sub_height = int(width / 4), int(height / 4) sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height)) data = [] for sub_b in sub_blocks: # by default use the whole lab L canal l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b)) # get information we want from svd data.append(np.mean(l_svd_data)) data.append(np.median(l_svd_data)) data.append(np.percentile(l_svd_data, 25)) data.append(np.percentile(l_svd_data, 75)) data.append(np.var(l_svd_data)) area_under_curve = utils.integral_area_trapz(l_svd_data, dx=100) data.append(area_under_curve) # convert into numpy array after computing all stats data = np.asarray(data) if data_type == 'sub_blocks_stats_reduced': block = np.asarray(block) width, height, _= block.shape sub_width, sub_height = int(width / 4), int(height / 4) sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height)) data = [] for sub_b in sub_blocks: # by default use the whole lab L canal l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b)) # get information we want from svd data.append(np.mean(l_svd_data)) data.append(np.median(l_svd_data)) data.append(np.percentile(l_svd_data, 25)) data.append(np.percentile(l_svd_data, 75)) data.append(np.var(l_svd_data)) # convert into numpy array after computing all stats data = np.asarray(data) if data_type == 'sub_blocks_area': block = np.asarray(block) width, height, _= block.shape sub_width, sub_height = int(width / 8), int(height / 8) sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height)) data = [] for sub_b in sub_blocks: # by default use the whole lab L canal l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b)) area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50) data.append(area_under_curve) # convert into numpy array after computing all stats data = np.asarray(data) if data_type == 'sub_blocks_area_normed': block = np.asarray(block) width, height, _= block.shape sub_width, sub_height = int(width / 8), int(height / 8) sub_blocks = segmentation.divide_in_blocks(block, (sub_width, sub_height)) data = [] for sub_b in sub_blocks: # by default use the whole lab L canal l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b)) l_svd_data = utils.normalize_arr(l_svd_data) area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50) data.append(area_under_curve) # convert into numpy array after computing all stats data = np.asarray(data) if data_type == 'mscn_var_4': data = _get_mscn_variance(block, (100, 100)) if data_type == 'mscn_var_16': data = _get_mscn_variance(block, (50, 50)) if data_type == 'mscn_var_64': data = _get_mscn_variance(block, (25, 25)) if data_type == 'mscn_var_16_max': data = _get_mscn_variance(block, (50, 50)) data = np.asarray(data) size = int(len(data) / 4) indices = data.argsort()[-size:][::-1] data = data[indices] if data_type == 'mscn_var_64_max': data = _get_mscn_variance(block, (25, 25)) data = np.asarray(data) size = int(len(data) / 4) indices = data.argsort()[-size:][::-1] data = data[indices] if data_type == 'ica_diff': current_image = transform.get_LAB_L(block) ica = FastICA(n_components=50) ica.fit(current_image) image_ica = ica.fit_transform(current_image) image_restored = ica.inverse_transform(image_ica) final_image = utils.normalize_2D_arr(image_restored) final_image = np.array(final_image * 255, 'uint8') sv_values = utils.normalize_arr(compression.get_SVD_s(current_image)) ica_sv_values = utils.normalize_arr(compression.get_SVD_s(final_image)) data = abs(np.array(sv_values) - np.array(ica_sv_values)) if data_type == 'svd_trunc_diff': current_image = transform.get_LAB_L(block) svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42) transformed_image = svd.fit_transform(current_image) restored_image = svd.inverse_transform(transformed_image) reduced_image = (current_image - restored_image) U, s, V = compression.get_SVD(reduced_image) data = s if data_type == 'ipca_diff': current_image = transform.get_LAB_L(block) transformer = IncrementalPCA(n_components=20, batch_size=25) transformed_image = transformer.fit_transform(current_image) restored_image = transformer.inverse_transform(transformed_image) reduced_image = (current_image - restored_image) U, s, V = compression.get_SVD(reduced_image) data = s if data_type == 'svd_reconstruct': reconstructed_interval = (90, 200) begin, end = reconstructed_interval lab_img = transform.get_LAB_L(block) lab_img = np.array(lab_img, 'uint8') U, s, V = lin_svd(lab_img, full_matrices=True) smat = np.zeros((end-begin, end-begin), dtype=complex) smat[:, :] = np.diag(s[begin:end]) output_img = np.dot(U[:, begin:end], np.dot(smat, V[begin:end, :])) output_img = np.array(output_img, 'uint8') data = compression.get_SVD_s(output_img) if 'sv_std_filters' in data_type: # convert into lab by default to apply filters lab_img = transform.get_LAB_L(block) arr = np.array(lab_img) images = [] # Apply list of filter on arr images.append(medfilt2d(arr, [3, 3])) images.append(medfilt2d(arr, [5, 5])) images.append(wiener(arr, [3, 3])) images.append(wiener(arr, [5, 5])) # By default computation of current block image s_arr = compression.get_SVD_s(arr) sv_vector = [s_arr] # for each new image apply SVD and get SV for img in images: s = compression.get_SVD_s(img) sv_vector.append(s) sv_array = np.array(sv_vector) _, length = sv_array.shape sv_std = [] # normalize each SV vectors and compute standard deviation for each sub vectors for i in range(length): sv_array[:, i] = utils.normalize_arr(sv_array[:, i]) sv_std.append(np.std(sv_array[:, i])) indices = [] if 'lowest' in data_type: indices = utils.get_indices_of_lowest_values(sv_std, 200) if 'highest' in data_type: indices = utils.get_indices_of_highest_values(sv_std, 200) # data are arranged following std trend computed data = s_arr[indices] # with the use of wavelet if 'wave_sv_std_filters' in data_type: # convert into lab by default to apply filters lab_img = transform.get_LAB_L(block) arr = np.array(lab_img) images = [] # Apply list of filter on arr images.append(medfilt2d(arr, [3, 3])) # By default computation of current block image s_arr = compression.get_SVD_s(arr) sv_vector = [s_arr] # for each new image apply SVD and get SV for img in images: s = compression.get_SVD_s(img) sv_vector.append(s) sv_array = np.array(sv_vector) _, length = sv_array.shape sv_std = [] # normalize each SV vectors and compute standard deviation for each sub vectors for i in range(length): sv_array[:, i] = utils.normalize_arr(sv_array[:, i]) sv_std.append(np.std(sv_array[:, i])) indices = [] if 'lowest' in data_type: indices = utils.get_indices_of_lowest_values(sv_std, 200) if 'highest' in data_type: indices = utils.get_indices_of_highest_values(sv_std, 200) # data are arranged following std trend computed data = s_arr[indices] # with the use of wavelet if 'sv_std_filters_full' in data_type: # convert into lab by default to apply filters lab_img = transform.get_LAB_L(block) arr = np.array(lab_img) images = [] # Apply list of filter on arr kernel = np.ones((3,3),np.float32)/9 images.append(cv2.filter2D(arr,-1,kernel)) kernel = np.ones((5,5),np.float32)/25 images.append(cv2.filter2D(arr,-1,kernel)) images.append(cv2.GaussianBlur(arr, (3, 3), 0.5)) images.append(cv2.GaussianBlur(arr, (3, 3), 1)) images.append(cv2.GaussianBlur(arr, (3, 3), 1.5)) images.append(cv2.GaussianBlur(arr, (5, 5), 0.5)) images.append(cv2.GaussianBlur(arr, (5, 5), 1)) images.append(cv2.GaussianBlur(arr, (5, 5), 1.5)) images.append(medfilt2d(arr, [3, 3])) images.append(medfilt2d(arr, [5, 5])) images.append(wiener(arr, [3, 3])) images.append(wiener(arr, [5, 5])) wave = w2d(arr, 'db1', 2) images.append(np.array(wave, 'float64')) # By default computation of current block image s_arr = compression.get_SVD_s(arr) sv_vector = [s_arr] # for each new image apply SVD and get SV for img in images: s = compression.get_SVD_s(img) sv_vector.append(s) sv_array = np.array(sv_vector) _, length = sv_array.shape sv_std = [] # normalize each SV vectors and compute standard deviation for each sub vectors for i in range(length): sv_array[:, i] = utils.normalize_arr(sv_array[:, i]) sv_std.append(np.std(sv_array[:, i])) indices = [] if 'lowest' in data_type: indices = utils.get_indices_of_lowest_values(sv_std, 200) if 'highest' in data_type: indices = utils.get_indices_of_highest_values(sv_std, 200) # data are arranged following std trend computed data = s_arr[indices] if 'sv_entropy_std_filters' in data_type: lab_img = transform.get_LAB_L(block) arr = np.array(lab_img) images = [] kernel = np.ones((3,3),np.float32)/9 images.append(cv2.filter2D(arr,-1,kernel)) kernel = np.ones((5,5),np.float32)/25 images.append(cv2.filter2D(arr,-1,kernel)) images.append(cv2.GaussianBlur(arr, (3, 3), 0.5)) images.append(cv2.GaussianBlur(arr, (3, 3), 1)) images.append(cv2.GaussianBlur(arr, (3, 3), 1.5)) images.append(cv2.GaussianBlur(arr, (5, 5), 0.5)) images.append(cv2.GaussianBlur(arr, (5, 5), 1)) images.append(cv2.GaussianBlur(arr, (5, 5), 1.5)) images.append(medfilt2d(arr, [3, 3])) images.append(medfilt2d(arr, [5, 5])) images.append(wiener(arr, [3, 3])) images.append(wiener(arr, [5, 5])) wave = w2d(arr, 'db1', 2) images.append(np.array(wave, 'float64')) sv_vector = [] sv_entropy_list = [] # for each new image apply SVD and get SV for img in images: s = compression.get_SVD_s(img) sv_vector.append(s) sv_entropy = [utils.get_entropy_contribution_of_i(s, id_sv) for id_sv, sv in enumerate(s)] sv_entropy_list.append(sv_entropy) sv_std = [] sv_array = np.array(sv_vector) _, length = sv_array.shape # normalize each SV vectors and compute standard deviation for each sub vectors for i in range(length): sv_array[:, i] = utils.normalize_arr(sv_array[:, i]) sv_std.append(np.std(sv_array[:, i])) indices = [] if 'lowest' in data_type: indices = utils.get_indices_of_lowest_values(sv_std, 200) if 'highest' in data_type: indices = utils.get_indices_of_highest_values(sv_std, 200) # data are arranged following std trend computed s_arr = compression.get_SVD_s(arr) data = s_arr[indices] if 'convolutional_kernels' in data_type: sub_zones = segmentation.divide_in_blocks(block, (20, 20)) data = [] diff_std_list_3 = [] diff_std_list_5 = [] diff_mean_list_3 = [] diff_mean_list_5 = [] plane_std_list_3 = [] plane_std_list_5 = [] plane_mean_list_3 = [] plane_mean_list_5 = [] plane_max_std_list_3 = [] plane_max_std_list_5 = [] plane_max_mean_list_3 = [] plane_max_mean_list_5 = [] for sub_zone in sub_zones: l_img = transform.get_LAB_L(sub_zone) normed_l_img = utils.normalize_2D_arr(l_img) # bilateral with window of size (3, 3) normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (3, 3)) std_diff = np.std(normed_diff) mean_diff = np.mean(normed_diff) diff_std_list_3.append(std_diff) diff_mean_list_3.append(mean_diff) # bilateral with window of size (5, 5) normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (5, 5)) std_diff = np.std(normed_diff) mean_diff = np.mean(normed_diff) diff_std_list_5.append(std_diff) diff_mean_list_5.append(mean_diff) # plane mean with window of size (3, 3) normed_plane_mean = convolution.convolution2D(normed_l_img, kernels.plane_mean, (3, 3)) std_plane_mean = np.std(normed_plane_mean) mean_plane_mean = np.mean(normed_plane_mean) plane_std_list_3.append(std_plane_mean) plane_mean_list_3.append(mean_plane_mean) # plane mean with window of size (5, 5) normed_plane_mean = convolution.convolution2D(normed_l_img, kernels.plane_mean, (5, 5)) std_plane_mean = np.std(normed_plane_mean) mean_plane_mean = np.mean(normed_plane_mean) plane_std_list_5.append(std_plane_mean) plane_mean_list_5.append(mean_plane_mean) # plane max error with window of size (3, 3) normed_plane_max = convolution.convolution2D(normed_l_img, kernels.plane_max_error, (3, 3)) std_plane_max = np.std(normed_plane_max) mean_plane_max = np.mean(normed_plane_max) plane_max_std_list_3.append(std_plane_max) plane_max_mean_list_3.append(mean_plane_max) # plane max error with window of size (5, 5) normed_plane_max = convolution.convolution2D(normed_l_img, kernels.plane_max_error, (5, 5)) std_plane_max = np.std(normed_plane_max) mean_plane_max = np.mean(normed_plane_max) plane_max_std_list_5.append(std_plane_max) plane_max_mean_list_5.append(mean_plane_max) diff_std_list_3 = np.array(diff_std_list_3) diff_std_list_5 = np.array(diff_std_list_5) diff_mean_list_3 = np.array(diff_mean_list_3) diff_mean_list_5 = np.array(diff_mean_list_5) plane_std_list_3 = np.array(plane_std_list_3) plane_std_list_5 = np.array(plane_std_list_5) plane_mean_list_3 = np.array(plane_mean_list_3) plane_mean_list_5 = np.array(plane_mean_list_5) plane_max_std_list_3 = np.array(plane_max_std_list_3) plane_max_std_list_5 = np.array(plane_max_std_list_5) plane_max_mean_list_3 = np.array(plane_max_mean_list_3) plane_max_mean_list_5 = np.array(plane_max_mean_list_5) if 'std_max_blocks' in data_type: data.append(np.std(diff_std_list_3[0:int(len(sub_zones)/5)])) data.append(np.std(diff_mean_list_3[0:int(len(sub_zones)/5)])) data.append(np.std(diff_std_list_5[0:int(len(sub_zones)/5)])) data.append(np.std(diff_mean_list_5[0:int(len(sub_zones)/5)])) data.append(np.std(plane_std_list_3[0:int(len(sub_zones)/5)])) data.append(np.std(plane_mean_list_3[0:int(len(sub_zones)/5)])) data.append(np.std(plane_std_list_5[0:int(len(sub_zones)/5)])) data.append(np.std(plane_mean_list_5[0:int(len(sub_zones)/5)])) data.append(np.std(plane_max_std_list_3[0:int(len(sub_zones)/5)])) data.append(np.std(plane_max_mean_list_3[0:int(len(sub_zones)/5)])) data.append(np.std(plane_max_std_list_5[0:int(len(sub_zones)/5)])) data.append(np.std(plane_max_mean_list_5[0:int(len(sub_zones)/5)])) if 'mean_max_blocks' in data_type: data.append(np.mean(diff_std_list_3[0:int(len(sub_zones)/5)])) data.append(np.mean(diff_mean_list_3[0:int(len(sub_zones)/5)])) data.append(np.mean(diff_std_list_5[0:int(len(sub_zones)/5)])) data.append(np.mean(diff_mean_list_5[0:int(len(sub_zones)/5)])) data.append(np.mean(plane_std_list_3[0:int(len(sub_zones)/5)])) data.append(np.mean(plane_mean_list_3[0:int(len(sub_zones)/5)])) data.append(np.mean(plane_std_list_5[0:int(len(sub_zones)/5)])) data.append(np.mean(plane_mean_list_5[0:int(len(sub_zones)/5)])) data.append(np.mean(plane_max_std_list_3[0:int(len(sub_zones)/5)])) data.append(np.mean(plane_max_mean_list_3[0:int(len(sub_zones)/5)])) data.append(np.mean(plane_max_std_list_5[0:int(len(sub_zones)/5)])) data.append(np.mean(plane_max_mean_list_5[0:int(len(sub_zones)/5)])) if 'std_normed' in data_type: data.append(np.std(diff_std_list_3)) data.append(np.std(diff_mean_list_3)) data.append(np.std(diff_std_list_5)) data.append(np.std(diff_mean_list_5)) data.append(np.std(plane_std_list_3)) data.append(np.std(plane_mean_list_3)) data.append(np.std(plane_std_list_5)) data.append(np.std(plane_mean_list_5)) data.append(np.std(plane_max_std_list_3)) data.append(np.std(plane_max_mean_list_3)) data.append(np.std(plane_max_std_list_5)) data.append(np.std(plane_max_mean_list_5)) if 'mean_normed' in data_type: data.append(np.mean(diff_std_list_3)) data.append(np.mean(diff_mean_list_3)) data.append(np.mean(diff_std_list_5)) data.append(np.mean(diff_mean_list_5)) data.append(np.mean(plane_std_list_3)) data.append(np.mean(plane_mean_list_3)) data.append(np.mean(plane_std_list_5)) data.append(np.mean(plane_mean_list_5)) data.append(np.mean(plane_max_std_list_3)) data.append(np.mean(plane_max_mean_list_3)) data.append(np.mean(plane_max_std_list_5)) data.append(np.mean(plane_max_mean_list_5)) data = np.array(data) if data_type == 'convolutional_kernel_stats_svd': l_img = transform.get_LAB_L(block) normed_l_img = utils.normalize_2D_arr(l_img) # bilateral with window of size (5, 5) normed_diff = convolution.convolution2D(normed_l_img, kernels.min_bilateral_diff, (5, 5)) # getting sigma vector from SVD compression s = compression.get_SVD_s(normed_diff) data = s if data_type == 'svd_entropy': l_img = transform.get_LAB_L(block) blocks = segmentation.divide_in_blocks(l_img, (20, 20)) values = [] for b in blocks: sv = compression.get_SVD_s(b) values.append(utils.get_entropy(sv)) data = np.array(values) if data_type == 'svd_entropy_20': l_img = transform.get_LAB_L(block) blocks = segmentation.divide_in_blocks(l_img, (20, 20)) values = [] for b in blocks: sv = compression.get_SVD_s(b) values.append(utils.get_entropy(sv)) data = np.array(values) if data_type == 'svd_entropy_noise_20': l_img = transform.get_LAB_L(block) blocks = segmentation.divide_in_blocks(l_img, (20, 20)) values = [] for b in blocks: sv = compression.get_SVD_s(b) sv_size = len(sv) values.append(utils.get_entropy(sv[int(sv_size / 4):])) data = np.array(values) return data def w2d(arr, mode='haar', level=1): #convert to float imArray = arr np.divide(imArray, 255) # compute coefficients coeffs=pywt.wavedec2(imArray, mode, level=level) #Process Coefficients coeffs_H=list(coeffs) coeffs_H[0] *= 0 # reconstruction imArray_H = pywt.waverec2(coeffs_H, mode) imArray_H *= 255 imArray_H = np.uint8(imArray_H) return imArray_H def _get_mscn_variance(block, sub_block_size=(50, 50)): blocks = segmentation.divide_in_blocks(block, sub_block_size) data = [] for block in blocks: mscn_coefficients = transform.get_mscn_coefficients(block) flat_coeff = mscn_coefficients.flatten() data.append(np.var(flat_coeff)) return np.sort(data)
32.849462
103
0.627169
3,557
24,440
3.999438
0.073376
0.036553
0.049768
0.029242
0.807114
0.780964
0.737382
0.703922
0.639744
0.609447
0
0.02781
0.267308
24,440
743
104
32.893674
0.766628
0.084738
0
0.50905
0
0
0.028344
0.006653
0
0
0
0
0
1
0.006787
false
0
0.036199
0
0.049774
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
a2adbf90bc22cca044acdd78bea2c9355ce557e4
2,848
py
Python
desktop/core/ext-py/Mako-1.0.7/test/test_cmd.py
kokosing/hue
2307f5379a35aae9be871e836432e6f45138b3d9
[ "Apache-2.0" ]
5,079
2015-01-01T03:39:46.000Z
2022-03-31T07:38:22.000Z
desktop/core/ext-py/Mako-1.0.7/test/test_cmd.py
zks888/hue
93a8c370713e70b216c428caa2f75185ef809deb
[ "Apache-2.0" ]
1,623
2015-01-01T08:06:24.000Z
2022-03-30T19:48:52.000Z
desktop/core/ext-py/Mako-1.0.7/test/test_cmd.py
zks888/hue
93a8c370713e70b216c428caa2f75185ef809deb
[ "Apache-2.0" ]
2,033
2015-01-04T07:18:02.000Z
2022-03-28T19:55:47.000Z
from __future__ import with_statement from contextlib import contextmanager from test import TemplateTest, eq_, raises, template_base, mock import os from mako.cmd import cmdline class CmdTest(TemplateTest): @contextmanager def _capture_output_fixture(self, stream="stdout"): with mock.patch("sys.%s" % stream) as stdout: yield stdout def test_stdin_success(self): with self._capture_output_fixture() as stdout: with mock.patch("sys.stdin", mock.Mock( read=mock.Mock(return_value="hello world ${x}"))): cmdline(["--var", "x=5", "-"]) eq_(stdout.write.mock_calls[0][1][0], "hello world 5") def test_stdin_syntax_err(self): with mock.patch("sys.stdin", mock.Mock( read=mock.Mock(return_value="${x"))): with self._capture_output_fixture("stderr") as stderr: with raises(SystemExit): cmdline(["--var", "x=5", "-"]) assert "SyntaxException: Expected" in \ stderr.write.mock_calls[0][1][0] assert "Traceback" in stderr.write.mock_calls[0][1][0] def test_stdin_rt_err(self): with mock.patch("sys.stdin", mock.Mock( read=mock.Mock(return_value="${q}"))): with self._capture_output_fixture("stderr") as stderr: with raises(SystemExit): cmdline(["--var", "x=5", "-"]) assert "NameError: Undefined" in stderr.write.mock_calls[0][1][0] assert "Traceback" in stderr.write.mock_calls[0][1][0] def test_file_success(self): with self._capture_output_fixture() as stdout: cmdline(["--var", "x=5", os.path.join(template_base, "cmd_good.mako")]) eq_(stdout.write.mock_calls[0][1][0], "hello world 5") def test_file_syntax_err(self): with self._capture_output_fixture("stderr") as stderr: with raises(SystemExit): cmdline(["--var", "x=5", os.path.join(template_base, "cmd_syntax.mako")]) assert "SyntaxException: Expected" in stderr.write.mock_calls[0][1][0] assert "Traceback" in stderr.write.mock_calls[0][1][0] def test_file_rt_err(self): with self._capture_output_fixture("stderr") as stderr: with raises(SystemExit): cmdline(["--var", "x=5", os.path.join(template_base, "cmd_runtime.mako")]) assert "NameError: Undefined" in stderr.write.mock_calls[0][1][0] assert "Traceback" in stderr.write.mock_calls[0][1][0] def test_file_notfound(self): with raises(SystemExit, "error: can't find fake.lalala"): cmdline(["--var", "x=5", "fake.lalala"])
39.013699
78
0.581812
353
2,848
4.504249
0.201133
0.056604
0.08805
0.09434
0.738994
0.725157
0.725157
0.725157
0.725157
0.666038
0
0.018969
0.27809
2,848
72
79
39.555556
0.754377
0
0
0.490909
0
0
0.133825
0
0
0
0
0
0.145455
1
0.145455
false
0
0.090909
0
0.254545
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
a2d10542879056ad7800cdebe98204d350251551
346
py
Python
diffir/__init__.py
capreolus-ir/diffir
90906ce4b7d5f23d6190eea26020f9e4096cb0cd
[ "Apache-2.0" ]
12
2021-03-10T17:04:05.000Z
2022-01-13T15:44:34.000Z
diffir/__init__.py
capreolus-ir/diffir
90906ce4b7d5f23d6190eea26020f9e4096cb0cd
[ "Apache-2.0" ]
7
2021-05-19T21:28:52.000Z
2021-12-16T16:01:40.000Z
diffir/__init__.py
capreolus-ir/diffir
90906ce4b7d5f23d6190eea26020f9e4096cb0cd
[ "Apache-2.0" ]
null
null
null
__version__ = "0.2.0" from diffir.weight import Weight from diffir.weight.custom import CustomWeight from diffir.weight.unsupervised import ExactMatchWeight from diffir.measure import Measure from diffir.measure.qrels import QrelMeasure from diffir.measure.unsupervised import TopkMeasure from diffir.weight.weights_builder import WeightBuilder
34.6
55
0.858382
45
346
6.488889
0.4
0.239726
0.219178
0
0
0
0
0
0
0
0
0.009554
0.092486
346
9
56
38.444444
0.920382
0
0
0
0
0
0.014451
0
0
0
0
0
0
1
0
false
0
0.875
0
0.875
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
0c0e6124651142c0387644ad144cc2392388c0c5
33
py
Python
Fase 4 - Temas avanzados/Tema 11 - Modulos/Leccion 01 - Modulos/Saludos/test.py
ruben69695/python-course
a3d3532279510fa0315a7636c373016c7abe4f0a
[ "MIT" ]
1
2019-01-27T20:44:53.000Z
2019-01-27T20:44:53.000Z
Fase 4 - Temas avanzados/Tema 11 - Modulos/Leccion 01 - Modulos/Saludos/test.py
ruben69695/python-course
a3d3532279510fa0315a7636c373016c7abe4f0a
[ "MIT" ]
null
null
null
Fase 4 - Temas avanzados/Tema 11 - Modulos/Leccion 01 - Modulos/Saludos/test.py
ruben69695/python-course
a3d3532279510fa0315a7636c373016c7abe4f0a
[ "MIT" ]
null
null
null
import saludos saludos.saludar()
11
17
0.818182
4
33
6.75
0.75
0
0
0
0
0
0
0
0
0
0
0
0.090909
33
3
17
11
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
0c25a90b221d6137090c0e77b536a592e4921a3d
337
py
Python
api/data_explorer/models/__init__.py
karamalhotra/data-explorer
317f4d7330887969ab6bfe2ca23ec24163472c55
[ "BSD-3-Clause" ]
null
null
null
api/data_explorer/models/__init__.py
karamalhotra/data-explorer
317f4d7330887969ab6bfe2ca23ec24163472c55
[ "BSD-3-Clause" ]
null
null
null
api/data_explorer/models/__init__.py
karamalhotra/data-explorer
317f4d7330887969ab6bfe2ca23ec24163472c55
[ "BSD-3-Clause" ]
null
null
null
# coding: utf-8 # flake8: noqa from __future__ import absolute_import # import models into model package from data_explorer.models.dataset_response import DatasetResponse from data_explorer.models.facet import Facet from data_explorer.models.facet_value import FacetValue from data_explorer.models.facets_response import FacetsResponse
33.7
65
0.860534
46
337
6.043478
0.5
0.115108
0.230216
0.316547
0.194245
0
0
0
0
0
0
0.006579
0.097923
337
9
66
37.444444
0.907895
0.175074
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
a743058f6e943a66d50447c9ef87971c35895cc0
169
py
Python
taxcalc/tbi/__init__.py
ClarePan/Tax-Calculator
d2d6cb4b551f34017db7166d91d982b5c4670816
[ "CC0-1.0" ]
1
2021-02-23T21:03:43.000Z
2021-02-23T21:03:43.000Z
taxcalc/tbi/__init__.py
ClarePan/Tax-Calculator
d2d6cb4b551f34017db7166d91d982b5c4670816
[ "CC0-1.0" ]
null
null
null
taxcalc/tbi/__init__.py
ClarePan/Tax-Calculator
d2d6cb4b551f34017db7166d91d982b5c4670816
[ "CC0-1.0" ]
null
null
null
from taxcalc.tbi.tbi import (run_nth_year_taxcalc_model, run_nth_year_gdp_elast_model, reform_warnings_errors)
42.25
58
0.585799
19
169
4.631579
0.684211
0.136364
0.227273
0
0
0
0
0
0
0
0
0
0.378698
169
3
59
56.333333
0.838095
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
a778a8a2d428dbcafebc55dfd31568980b3b8abe
375
py
Python
app/env/lib/python3.7/site-packages/twilio/http/response.py
siyaochen/Tier1Health
536591a7534bbb3fb27fe889bfed9de152ec1864
[ "MIT" ]
30
2018-06-12T12:00:53.000Z
2021-05-02T01:27:16.000Z
app/env/lib/python3.7/site-packages/twilio/http/response.py
siyaochen/Tier1Health
536591a7534bbb3fb27fe889bfed9de152ec1864
[ "MIT" ]
11
2019-12-26T17:21:03.000Z
2022-03-21T22:17:07.000Z
bot/lib/python3.7/site-packages/twilio/http/response.py
carlosrh18/DavinciBot
d73a6b7f68d7bab25d134d3f85c6b63a86c206c5
[ "MIT" ]
4
2019-03-28T18:20:48.000Z
2019-11-18T18:52:04.000Z
class Response(object): """ """ def __init__(self, status_code, text): self.content = text self.cached = False self.status_code = status_code self.ok = self.status_code < 400 @property def text(self): return self.content def __repr__(self): return 'HTTP {} {}'.format(self.status_code, self.content)
22.058824
66
0.592
44
375
4.75
0.431818
0.239234
0.267943
0
0
0
0
0
0
0
0
0.011278
0.290667
375
16
67
23.4375
0.774436
0
0
0
0
0
0.027548
0
0
0
0
0
0
1
0.272727
false
0
0
0.181818
0.545455
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
a7b2d4bcebc84c01285d54f2bcd39c69c67e7a6d
249
py
Python
adminapp/admin.py
gabyxbinnaeah/Bus-Booking
51d2a521f890986e4e7e17775708cec3cd71d2b4
[ "MIT" ]
null
null
null
adminapp/admin.py
gabyxbinnaeah/Bus-Booking
51d2a521f890986e4e7e17775708cec3cd71d2b4
[ "MIT" ]
null
null
null
adminapp/admin.py
gabyxbinnaeah/Bus-Booking
51d2a521f890986e4e7e17775708cec3cd71d2b4
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Admin, Profile # from userapp.models import Book # from driverapp.models import Bus admin.site.register(Admin) admin.site.register(Profile) # admin.site.register(Bus) # admin.site.register(Book)
27.666667
35
0.795181
36
249
5.5
0.361111
0.181818
0.343434
0.20202
0
0
0
0
0
0
0
0
0.104418
249
9
36
27.666667
0.887892
0.461847
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
a7b4359004682dcf25fa55ff619454c6c53df969
47
py
Python
numba/cuda/simulator/cudadrv/error.py
auderson/numba
3d67c9850ab56457f418cf40af6245fd9c337705
[ "BSD-2-Clause" ]
6,620
2015-01-04T08:51:04.000Z
2022-03-31T12:52:18.000Z
numba/cuda/simulator/cudadrv/error.py
auderson/numba
3d67c9850ab56457f418cf40af6245fd9c337705
[ "BSD-2-Clause" ]
6,457
2015-01-04T03:18:41.000Z
2022-03-31T17:38:42.000Z
numba/cuda/simulator/cudadrv/error.py
auderson/numba
3d67c9850ab56457f418cf40af6245fd9c337705
[ "BSD-2-Clause" ]
930
2015-01-25T02:33:03.000Z
2022-03-30T14:10:32.000Z
class CudaSupportError(RuntimeError): pass
15.666667
37
0.787234
4
47
9.25
1
0
0
0
0
0
0
0
0
0
0
0
0.148936
47
2
38
23.5
0.925
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
a7dac603aaf8f14d5ec261bf9ee335b205d9767b
201
py
Python
backend/app/bucket.py
thanet-s/subme-selected-topics-project
fac1630839c580bbd66b93f2dc9004c8637a7b15
[ "MIT" ]
null
null
null
backend/app/bucket.py
thanet-s/subme-selected-topics-project
fac1630839c580bbd66b93f2dc9004c8637a7b15
[ "MIT" ]
null
null
null
backend/app/bucket.py
thanet-s/subme-selected-topics-project
fac1630839c580bbd66b93f2dc9004c8637a7b15
[ "MIT" ]
null
null
null
from minio import Minio import os minio_client = Minio( os.environ['MINIO_HOST'], access_key=os.environ['MINIO_ROOT_USER'], secret_key=os.environ['MINIO_ROOT_PASSWORD'], secure=False )
22.333333
49
0.731343
29
201
4.793103
0.517241
0.194245
0.302158
0.244604
0.302158
0
0
0
0
0
0
0
0.149254
201
9
50
22.333333
0.812866
0
0
0
0
0
0.217822
0
0
0
0
0
0
1
0
false
0.125
0.25
0
0.25
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
5
38fa54c4a5025900fd457356bdca81cf5e7db815
92
py
Python
datamux/src/datamux/simulate_mode.py
nirdslab/streaminghub
a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf
[ "MIT" ]
null
null
null
datamux/src/datamux/simulate_mode.py
nirdslab/streaminghub
a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf
[ "MIT" ]
null
null
null
datamux/src/datamux/simulate_mode.py
nirdslab/streaminghub
a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf
[ "MIT" ]
1
2020-01-22T15:35:29.000Z
2020-01-22T15:35:29.000Z
class SimulateMode: @staticmethod def start_simulation(device, guide=None): return
15.333333
43
0.75
10
92
6.8
1
0
0
0
0
0
0
0
0
0
0
0
0.173913
92
5
44
18.4
0.894737
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
ac4f1637d2da63115e2a93b02c3d3a4bb30ba74a
56
py
Python
src/import_hook/__init__.py
zthxxx/sniputils
e67f55dfa0689f1dde6b6e78d76f04022b4d4585
[ "MIT" ]
null
null
null
src/import_hook/__init__.py
zthxxx/sniputils
e67f55dfa0689f1dde6b6e78d76f04022b4d4585
[ "MIT" ]
null
null
null
src/import_hook/__init__.py
zthxxx/sniputils
e67f55dfa0689f1dde6b6e78d76f04022b4d4585
[ "MIT" ]
null
null
null
from .import_track import * from .reimportable import *
18.666667
27
0.785714
7
56
6.142857
0.571429
0
0
0
0
0
0
0
0
0
0
0
0.142857
56
2
28
28
0.895833
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3ba3da7e08c14ac9df758e9e45e7cb972fc56eb2
139
py
Python
compiled/construct/repeat_eos_u4.py
smarek/ci_targets
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
[ "MIT" ]
4
2017-04-08T12:55:11.000Z
2020-12-05T21:09:31.000Z
compiled/construct/repeat_eos_u4.py
smarek/ci_targets
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
[ "MIT" ]
7
2018-04-23T01:30:33.000Z
2020-10-30T23:56:14.000Z
compiled/construct/repeat_eos_u4.py
smarek/ci_targets
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
[ "MIT" ]
6
2017-04-08T11:41:14.000Z
2020-10-30T22:47:31.000Z
from construct import * from construct.lib import * repeat_eos_u4 = Struct( 'numbers' / GreedyRange(Int32ul), ) _schema = repeat_eos_u4
15.444444
34
0.755396
18
139
5.555556
0.666667
0.26
0.22
0
0
0
0
0
0
0
0
0.033898
0.151079
139
8
35
17.375
0.813559
0
0
0
0
0
0.05036
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
3bb5cf6df03cde1b36d438f6ec362fdce3a55254
101
py
Python
submissions/abc085/a.py
m-star18/atcoder
08e475810516602fa088f87daf1eba590b4e07cc
[ "Unlicense" ]
1
2021-05-10T01:16:28.000Z
2021-05-10T01:16:28.000Z
submissions/abc085/a.py
m-star18/atcoder
08e475810516602fa088f87daf1eba590b4e07cc
[ "Unlicense" ]
3
2021-05-11T06:14:15.000Z
2021-06-19T08:18:36.000Z
submissions/abc085/a.py
m-star18/atcoder
08e475810516602fa088f87daf1eba590b4e07cc
[ "Unlicense" ]
null
null
null
# sys.stdin.readline() import sys input = sys.stdin.readline print(input().replace('2017', '2018'))
16.833333
38
0.70297
14
101
5.071429
0.642857
0.225352
0.450704
0
0
0
0
0
0
0
0
0.087912
0.09901
101
5
39
20.2
0.692308
0.19802
0
0
0
0
0.101266
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0.333333
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
3bda0b8e560a339719620c78e288885bd05aa2f6
689
py
Python
lib/abridger/exc.py
willangenent/abridger
6daa80f7360339376b38544ce60694c5addaa30f
[ "MIT" ]
8
2016-10-19T14:15:34.000Z
2020-06-23T09:37:02.000Z
lib/abridger/exc.py
freewilll/abridger
6daa80f7360339376b38544ce60694c5addaa30f
[ "MIT" ]
null
null
null
lib/abridger/exc.py
freewilll/abridger
6daa80f7360339376b38544ce60694c5addaa30f
[ "MIT" ]
null
null
null
class AbridgerError(Exception): pass class ConfigFileLoaderError(AbridgerError): pass class IncludeError(ConfigFileLoaderError): pass class DataError(ConfigFileLoaderError): pass class FileNotFoundError(ConfigFileLoaderError): pass class DatabaseUrlError(AbridgerError): pass class ExtractionModelError(AbridgerError): pass class UnknownTableError(AbridgerError): pass class UnknownColumnError(AbridgerError): pass class InvalidConfigError(ExtractionModelError): pass class RelationIntegrityError(ExtractionModelError): pass class GeneratorError(Exception): pass class CyclicDependencyError(GeneratorError): pass
13.509804
51
0.776488
52
689
10.288462
0.307692
0.201869
0.205607
0
0
0
0
0
0
0
0
0
0.166909
689
50
52
13.78
0.932056
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
0
0
1
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
3bef530282cd351acc8d5d5fce296f7123e0bfe8
56
py
Python
node/views/__init__.py
mohamedmansor/path-detector
14954795ea47109d404b54f74575337f86d6134f
[ "MIT" ]
null
null
null
node/views/__init__.py
mohamedmansor/path-detector
14954795ea47109d404b54f74575337f86d6134f
[ "MIT" ]
null
null
null
node/views/__init__.py
mohamedmansor/path-detector
14954795ea47109d404b54f74575337f86d6134f
[ "MIT" ]
null
null
null
from .node_view import ConnectNodesViewSet, PathViewSet
28
55
0.875
6
56
8
1
0
0
0
0
0
0
0
0
0
0
0
0.089286
56
1
56
56
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ce07948f6f31a33c9447bac9ba7da84e0cc0cfdb
25
py
Python
write_grok/__init__.py
namedyangfan/Python_practice
7f7394d82bb5afc13b039eec286b9485a775ae39
[ "MIT" ]
null
null
null
write_grok/__init__.py
namedyangfan/Python_practice
7f7394d82bb5afc13b039eec286b9485a775ae39
[ "MIT" ]
null
null
null
write_grok/__init__.py
namedyangfan/Python_practice
7f7394d82bb5afc13b039eec286b9485a775ae39
[ "MIT" ]
null
null
null
from .write_grok import *
25
25
0.8
4
25
4.75
1
0
0
0
0
0
0
0
0
0
0
0
0.12
25
1
25
25
0.863636
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
cbff48d02931d3f7dcc779f4f74d3a26a84b6bb5
1,043
py
Python
FlaskApp/app.py
Dec22gln/FlaskBlog
114ca9fc39f039cbdf0f1ff613fb66e364cea171
[ "MIT" ]
null
null
null
FlaskApp/app.py
Dec22gln/FlaskBlog
114ca9fc39f039cbdf0f1ff613fb66e364cea171
[ "MIT" ]
null
null
null
FlaskApp/app.py
Dec22gln/FlaskBlog
114ca9fc39f039cbdf0f1ff613fb66e364cea171
[ "MIT" ]
null
null
null
from flask import Flask from flask import render_template app = Flask(__name__) @app.route('/') def hello_world(): return render_template('index.html') @app.route('/index') def index(): return render_template('index.html') @app.route('/contact') def contact(): return render_template('contact.html') @app.route('/cv') def cv(): return render_template('cv.html') @app.route('/hire-me') def hireMe(): return render_template('hire-me.html') @app.route('/project-page') def projectPage(): return render_template('project-page.html') @app.route('/projects-compact-grid') def projects1(): return render_template('projects-compact-grid.html') @app.route('/projects-no-images') def projects2(): return render_template('projects-no-images.html') @app.route('/projects-with-sidebar') def projects3(): return render_template('projects-with-sidebar.html') @app.route('/projects-grid-cards') def projects4(): return render_template('projects-with-sidebar.html') if __name__ == '__main__': app.run()
21.729167
56
0.708533
137
1,043
5.218978
0.270073
0.215385
0.27972
0.111888
0.223776
0.223776
0.223776
0
0
0
0
0.004348
0.117929
1,043
47
57
22.191489
0.772826
0
0
0.114286
0
0
0.286673
0.139022
0
0
0
0
0
1
0.285714
false
0
0.057143
0.285714
0.628571
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
0255255ddce0aede915e8004ff48e8619c540430
126
py
Python
src/timber_clay_hybrid/assembly/__init__.py
augmentedfabricationlab/Timber_Clay_Hybrid
243efddac77970c989b551697a0e188932064849
[ "MIT" ]
1
2020-12-16T01:25:07.000Z
2020-12-16T01:25:07.000Z
src/timber_clay_hybrid/assembly/__init__.py
augmentedfabricationlab/timber_clay_hybrid
243efddac77970c989b551697a0e188932064849
[ "MIT" ]
null
null
null
src/timber_clay_hybrid/assembly/__init__.py
augmentedfabricationlab/timber_clay_hybrid
243efddac77970c989b551697a0e188932064849
[ "MIT" ]
null
null
null
from .assembly import HRCAssembly from .element import HRCElement from .artist import AssemblyArtist from .utilities import *
25.2
34
0.833333
15
126
7
0.6
0
0
0
0
0
0
0
0
0
0
0
0.126984
126
4
35
31.5
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
025ca2353166896f2415d32f2b2cf83266307837
19
py
Python
dbt/adapters/athena/__version__.py
sacundim/dbt-athena
120c9d3c88da98ec11ddfcf0a0a3fda49538f197
[ "Apache-2.0" ]
92
2019-03-23T07:23:55.000Z
2021-06-15T18:18:32.000Z
dbt/adapters/athena/__version__.py
sacundim/dbt-athena
120c9d3c88da98ec11ddfcf0a0a3fda49538f197
[ "Apache-2.0" ]
156
2019-03-21T03:26:58.000Z
2021-06-29T15:30:51.000Z
dbt/adapters/athena/__version__.py
sacundim/dbt-athena
120c9d3c88da98ec11ddfcf0a0a3fda49538f197
[ "Apache-2.0" ]
58
2019-04-12T09:09:43.000Z
2021-06-24T15:25:11.000Z
version = "0.21.0"
9.5
18
0.578947
4
19
2.75
0.75
0
0
0
0
0
0
0
0
0
0
0.25
0.157895
19
1
19
19
0.4375
0
0
0
0
0
0.315789
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
5a12d4be2ea76f2966c05949af40280a754ab4f5
3,641
py
Python
tests/test_gru.py
nsuke/hyrnn
b3efcc7b004d8402344467bf319f1d86324d11e5
[ "Apache-2.0" ]
73
2019-04-08T08:17:39.000Z
2022-03-29T03:48:07.000Z
tests/test_gru.py
nsuke/hyrnn
b3efcc7b004d8402344467bf319f1d86324d11e5
[ "Apache-2.0" ]
10
2019-03-19T04:24:07.000Z
2021-02-25T00:19:24.000Z
tests/test_gru.py
nsuke/hyrnn
b3efcc7b004d8402344467bf319f1d86324d11e5
[ "Apache-2.0" ]
14
2019-05-06T09:42:37.000Z
2021-07-17T17:18:05.000Z
import hyrnn import torch.nn def test_MobiusGRU_no_packed_just_works(): input_size = 4 hidden_size = 3 batch_size = 5 gru = hyrnn.nets.MobiusGRU(input_size, hidden_size, hyperbolic_input=False) timestops = 10 sequence = torch.randn(timestops, batch_size, input_size) out, ht = gru(sequence) # out: (seq_len, batch, num_directions * hidden_size) # ht: (num_layers * num_directions, batch, hidden_size) assert out.shape[0] == timestops assert out.shape[1] == batch_size assert out.shape[2] == hidden_size assert ht.shape[0] == 1 assert ht.shape[1] == batch_size assert ht.shape[2] == hidden_size def test_MobiusGRU_2_layers_no_packed_just_works(): input_size = 4 hidden_size = 3 batch_size = 5 num_layers = 2 gru = hyrnn.nets.MobiusGRU( input_size, hidden_size, num_layers=num_layers, hyperbolic_input=False ) timestops = 10 sequence = torch.randn(timestops, batch_size, input_size) out, ht = gru(sequence) # out: (seq_len, batch, num_directions * hidden_size) # ht: (num_layers * num_directions, batch, hidden_size) assert out.shape[0] == timestops assert out.shape[1] == batch_size assert out.shape[2] == hidden_size assert ht.shape[0] == num_layers assert ht.shape[1] == batch_size assert ht.shape[2] == hidden_size def test_mobius_gru_loop_just_works(): input_size = 4 hidden_size = 3 num_sequences = 3 seqs = torch.nn.utils.rnn.pack_sequence( [ torch.zeros(10, input_size), torch.zeros(5, input_size), torch.zeros(1, input_size), ] ) loop_params = dict() loop_params["h0"] = torch.zeros(num_sequences, hidden_size, requires_grad=False) loop_params["input"] = seqs.data loop_params["weight_ih"] = torch.nn.Parameter( torch.randn(3 * hidden_size, input_size) ) loop_params["weight_hh"] = torch.nn.Parameter( torch.randn(3 * hidden_size, hidden_size) ) loop_params["bias"] = torch.randn(3, hidden_size) loop_params["c"] = 1.0 loop_params["nonlin"] = None loop_params["hyperbolic_input"] = True loop_params["hyperbolic_hidden_state0"] = True loop_params["batch_sizes"] = seqs.batch_sizes hyrnn.nets.mobius_gru_loop(**loop_params) def test_MobiusGRU_with_packed_just_works(): input_size = 4 hidden_size = 3 gru = hyrnn.nets.MobiusGRU(input_size, hidden_size, hyperbolic_input=False) seqs = torch.nn.utils.rnn.pack_sequence( [ torch.zeros(10, input_size), torch.zeros(5, input_size), torch.zeros(1, input_size), ] ) h, ht = gru(seqs) assert h.data.size(0) == 16 # sum of times assert h.data.size(1) == hidden_size # ht: (num_layers * num_directions, batch, hidden_size) assert ht.size(2) == hidden_size assert ht.size(1) == 3 # batch size assert ht.size(0) == 1 # num layers def test_MobiusGRU_2_layers_with_packed_just_works(): input_size = 4 hidden_size = 3 gru = hyrnn.nets.MobiusGRU( input_size, hidden_size, num_layers=2, hyperbolic_input=False) seqs = torch.nn.utils.rnn.pack_sequence([ torch.zeros(10, input_size), torch.zeros(5, input_size), torch.zeros(1, input_size) ]) h, ht = gru(seqs) assert h.data.size(0) == 16 # sum of times assert h.data.size(1) == hidden_size # ht: (num_layers * num_directions, batch, hidden_size) assert ht.size(2) == hidden_size assert ht.size(1) == 3 # batch size assert ht.size(0) == 2 # num layers
31.938596
84
0.651744
518
3,641
4.322394
0.133205
0.125056
0.053595
0.048236
0.793211
0.766414
0.766414
0.766414
0.719964
0.719964
0
0.02543
0.233178
3,641
113
85
32.221239
0.776504
0.106839
0
0.541667
0
0
0.026868
0.007412
0
0
0
0
0.229167
1
0.052083
false
0
0.020833
0
0.072917
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
5a13d8b3614f878639aab1f5c25f37f50a754ad3
17
py
Python
tests/errors/semantic/ex4.py
toddrme2178/pyccel
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
[ "MIT" ]
null
null
null
tests/errors/semantic/ex4.py
toddrme2178/pyccel
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
[ "MIT" ]
null
null
null
tests/errors/semantic/ex4.py
toddrme2178/pyccel
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
[ "MIT" ]
null
null
null
x is 1 y is None
5.666667
9
0.647059
6
17
1.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0.090909
0.352941
17
2
10
8.5
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
5a6831d8ec7d93dd05d620a6d41fce88e4531158
138
py
Python
FB2/__init__.py
Ae-Mc/FB2
2c29f774ab08bdad5bd6144b1be71b93146ce8fe
[ "MIT" ]
3
2020-11-15T10:55:22.000Z
2022-02-09T19:45:52.000Z
FB2/__init__.py
Ae-Mc/FB2
2c29f774ab08bdad5bd6144b1be71b93146ce8fe
[ "MIT" ]
1
2020-11-15T11:04:59.000Z
2020-11-19T22:12:52.000Z
FB2/__init__.py
Ae-Mc/FB2
2c29f774ab08bdad5bd6144b1be71b93146ce8fe
[ "MIT" ]
null
null
null
from .FictionBook2 import FictionBook2 from .Author import Author from .TitleInfo import TitleInfo from .DocumentInfo import DocumentInfo
27.6
38
0.855072
16
138
7.375
0.375
0
0
0
0
0
0
0
0
0
0
0.016393
0.115942
138
4
39
34.5
0.95082
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ce6cccfac6a948d40441d5b2f5121b05efacb62f
295
py
Python
forecast_lab/metrics.py
gsimbr/forecast-lab
a26234f3e11b4b8268d6cbe33bb84d79da45ecdd
[ "MIT" ]
5
2019-06-04T11:04:06.000Z
2022-03-29T23:05:25.000Z
forecast_lab/metrics.py
gsimbr/forecast-lab
a26234f3e11b4b8268d6cbe33bb84d79da45ecdd
[ "MIT" ]
1
2022-02-14T13:22:47.000Z
2022-02-14T13:22:47.000Z
forecast_lab/metrics.py
gsimbr/forecast-lab
a26234f3e11b4b8268d6cbe33bb84d79da45ecdd
[ "MIT" ]
2
2020-02-17T11:54:18.000Z
2020-10-06T12:49:15.000Z
import numpy import math from sklearn.metrics import mean_squared_error def root_mean_squared_error(y_true, y_pred): return math.sqrt(mean_squared_error(y_true, y_pred)) def mean_absolute_percentage_error(y_true, y_pred): return numpy.mean(numpy.abs((y_true - y_pred) / y_true)) * 100
29.5
66
0.79322
51
295
4.215686
0.392157
0.116279
0.111628
0.186047
0.367442
0.367442
0.24186
0
0
0
0
0.011538
0.118644
295
9
67
32.777778
0.815385
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.428571
0.285714
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
0
0
0
5
ce9f408bce05df804432eb2ae09a3fb7b0a734bb
67
py
Python
strategy/__init__.py
mmmaaaggg/QABAT
d6f20d926de047af6857e466cf28084d0ba69993
[ "MIT" ]
3
2019-08-31T18:01:10.000Z
2021-04-04T09:51:17.000Z
strategy/__init__.py
mmmaaaggg/QABAT
d6f20d926de047af6857e466cf28084d0ba69993
[ "MIT" ]
null
null
null
strategy/__init__.py
mmmaaaggg/QABAT
d6f20d926de047af6857e466cf28084d0ba69993
[ "MIT" ]
1
2020-08-15T17:04:14.000Z
2020-08-15T17:04:14.000Z
# -*- coding: utf-8 -*- """ Created on 2017/11/18 @author: MG """
9.571429
23
0.522388
10
67
3.5
1
0
0
0
0
0
0
0
0
0
0
0.166667
0.19403
67
6
24
11.166667
0.481481
0.835821
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
cea36ec679e178237a093bbac8b36da547e73bca
46
py
Python
cloudbackup/tests/__init__.py
nloadholtes/python-cloudbackup-sdk
1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6
[ "Apache-2.0" ]
4
2015-02-10T14:28:12.000Z
2016-12-26T22:52:07.000Z
cloudbackup/tests/__init__.py
nloadholtes/python-cloudbackup-sdk
1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6
[ "Apache-2.0" ]
17
2015-01-22T21:58:36.000Z
2018-01-25T19:47:43.000Z
cloudbackup/tests/__init__.py
nloadholtes/python-cloudbackup-sdk
1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6
[ "Apache-2.0" ]
9
2015-01-26T19:25:45.000Z
2018-11-01T20:14:12.000Z
""" Rackspace Cloud Backup API Test Suite """
9.2
26
0.695652
6
46
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.173913
46
4
27
11.5
0.842105
0.804348
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
cebc72a58b425fb7f7cd7143c3625b862489e1f9
178
py
Python
desafios/desafio 021.py
juaoantonio/curso_video_python
7520223d8647929530a1cd96f7c7d8c8f264ba1e
[ "MIT" ]
null
null
null
desafios/desafio 021.py
juaoantonio/curso_video_python
7520223d8647929530a1cd96f7c7d8c8f264ba1e
[ "MIT" ]
null
null
null
desafios/desafio 021.py
juaoantonio/curso_video_python
7520223d8647929530a1cd96f7c7d8c8f264ba1e
[ "MIT" ]
null
null
null
import pygame pygame.mixer.init() pygame.init() pygame.mixer.music.load('/home/jaab/Música/bach_1.wav') pygame.mixer.music.play() input() pygame.event.wait() pygame.mixer.stop()
19.777778
55
0.758427
28
178
4.785714
0.607143
0.328358
0.238806
0
0
0
0
0
0
0
0
0.005917
0.050562
178
8
56
22.25
0.786982
0
0
0
0
0
0.157303
0.157303
0
0
0
0
0
1
0
true
0
0.125
0
0.125
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
0c6c60baa3e34ba265cfea8fd4ef73ba5f9cccb2
383
py
Python
tests/perf/test-prop-write.py
wenq1/duktape
5ed3eee19b291f3b3de0b212cc62c0aba0ab4ecb
[ "MIT" ]
4,268
2015-01-01T17:33:40.000Z
2022-03-31T17:53:31.000Z
tests/perf/test-prop-write.py
KiraanRK/esp32-duktape
1b7fbcb8bd6bfc346d92df30ec099df7f13b03aa
[ "MIT" ]
1,667
2015-01-01T22:43:03.000Z
2022-02-23T22:27:19.000Z
tests/perf/test-prop-write.py
KiraanRK/esp32-duktape
1b7fbcb8bd6bfc346d92df30ec099df7f13b03aa
[ "MIT" ]
565
2015-01-08T14:15:28.000Z
2022-03-31T16:29:31.000Z
def test(): obj = { 'xxx1': 1, 'xxx2': 2, 'xxx3': 4, 'xxx4': 4, 'foo': 123 } i = 0 while i < 1e7: obj['foo'] = 234 obj['foo'] = 234 obj['foo'] = 234 obj['foo'] = 234 obj['foo'] = 234 obj['foo'] = 234 obj['foo'] = 234 obj['foo'] = 234 obj['foo'] = 234 obj['foo'] = 234 i += 1 test()
21.277778
68
0.373368
51
383
2.803922
0.333333
0.41958
0.629371
0.755245
0.629371
0.629371
0.629371
0.629371
0.629371
0.629371
0
0.2
0.412533
383
17
69
22.529412
0.435556
0
0
0.625
0
0
0.127937
0
0
0
0
0
0
1
0.0625
false
0
0
0
0.0625
0
0
0
0
null
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0caedcb03495a9332700a86dd6b9b7674d0e59ac
32
py
Python
gaia-sdk-python/conftest.py
leftshiftone/gaia-sdk
7e0d1ce054fada8ae154da70b71e8a90347c9f97
[ "MIT" ]
null
null
null
gaia-sdk-python/conftest.py
leftshiftone/gaia-sdk
7e0d1ce054fada8ae154da70b71e8a90347c9f97
[ "MIT" ]
10
2019-11-14T07:55:47.000Z
2022-02-26T19:36:45.000Z
gaia-sdk-python/conftest.py
leftshiftone/gaia-sdk
7e0d1ce054fada8ae154da70b71e8a90347c9f97
[ "MIT" ]
2
2020-05-12T11:09:53.000Z
2020-12-25T14:03:04.000Z
# enabled testing relative paths
32
32
0.84375
4
32
6.75
1
0
0
0
0
0
0
0
0
0
0
0
0.125
32
1
32
32
0.964286
0.9375
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
0cb88d9738f070179ad3791e8725e49dddde3cbd
45
py
Python
Weltantschauung/__init__.py
area42/Weltanschauung-
85694740f149aa741f69a67bf234b447ba11fb22
[ "MIT" ]
null
null
null
Weltantschauung/__init__.py
area42/Weltanschauung-
85694740f149aa741f69a67bf234b447ba11fb22
[ "MIT" ]
null
null
null
Weltantschauung/__init__.py
area42/Weltanschauung-
85694740f149aa741f69a67bf234b447ba11fb22
[ "MIT" ]
null
null
null
from .Weltantschauung import Weltantschauung
22.5
44
0.888889
4
45
10
0.75
0
0
0
0
0
0
0
0
0
0
0
0.088889
45
1
45
45
0.97561
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
0cc6f68c50e68c364cd5514c50d107da2d606391
122
py
Python
api/crawller/admin.py
MahsaSeifikar/tweetphus
01b687f38365023cfaaa34739c50b0da79f0b510
[ "MIT" ]
null
null
null
api/crawller/admin.py
MahsaSeifikar/tweetphus
01b687f38365023cfaaa34739c50b0da79f0b510
[ "MIT" ]
1
2021-12-26T16:35:36.000Z
2021-12-29T15:07:01.000Z
api/crawller/admin.py
MahsaSeifikar/tweetphus
01b687f38365023cfaaa34739c50b0da79f0b510
[ "MIT" ]
null
null
null
from django.contrib import admin from crawller.models import User # Register your models here. admin.site.register(User)
20.333333
32
0.811475
18
122
5.5
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.122951
122
6
33
20.333333
0.925234
0.213115
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0b6e713eceaaae29df8407fca294483723c28e41
17,811
py
Python
models/misc/modules.py
zgjslc/Film-Recovery-master1
4497a9930398c9e826ac364056a79e5bcbf6c953
[ "Apache-2.0" ]
null
null
null
models/misc/modules.py
zgjslc/Film-Recovery-master1
4497a9930398c9e826ac364056a79e5bcbf6c953
[ "Apache-2.0" ]
null
null
null
models/misc/modules.py
zgjslc/Film-Recovery-master1
4497a9930398c9e826ac364056a79e5bcbf6c953
[ "Apache-2.0" ]
null
null
null
""" Name: modules.py Desc: This script defines some base module for building networks. """ from typing import Any import torch import torch.nn as nn import torch.nn.functional as F class UNet_down_block(nn.Module): def __init__(self, input_channel, output_channel, down_size=True): super(UNet_down_block, self).__init__() self.conv1 = nn.Conv2d(input_channel, output_channel, 3, padding=1) self.bn1 = nn.GroupNorm(8, output_channel) self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1) self.bn2 = nn.GroupNorm(8, output_channel) self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1) self.bn3 = nn.GroupNorm(8, output_channel) self.max_pool = nn.MaxPool2d(2, 2) self.relu = nn.ReLU() self.down_size = down_size def forward(self, x): x = self.relu(self.bn1(self.conv1(x))) x = self.relu(self.bn2(self.conv2(x))) x = self.relu(self.bn3(self.conv3(x))) if self.down_size: x = self.max_pool(x) return x class UNet_up_block(nn.Module): def __init__(self, prev_channel, input_channel, output_channel, up_sample=True): super(UNet_up_block, self).__init__() self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.conv1 = nn.Conv2d(prev_channel + input_channel, output_channel, 3, padding=1) self.bn1 = nn.GroupNorm(8, output_channel) self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1) self.bn2 = nn.GroupNorm(8, output_channel) self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1) self.bn3 = nn.GroupNorm(8, output_channel) self.relu = torch.nn.ReLU() self.up_sample = up_sample def forward(self, prev_feature_map, x): if self.up_sample: x = self.up_sampling(x) x = torch.cat((x, prev_feature_map), dim=1) x = self.relu(self.bn1(self.conv1(x))) x = self.relu(self.bn2(self.conv2(x))) x = self.relu(self.bn3(self.conv3(x))) return x class UNet(nn.Module): def __init__(self, downsample=6, in_channels=3, out_channels=3): super(UNet, self).__init__() self.in_channels, self.out_channels, self.downsample = in_channels, out_channels, downsample self.down1 = UNet_down_block(in_channels, 16, False) self.down_blocks = nn.ModuleList( [UNet_down_block(2**(4+i), 2**(5+i), True) for i in range(0, downsample)] ) bottleneck = 2**(4 + downsample) self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn1 = nn.GroupNorm(8, bottleneck) self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn2 = nn.GroupNorm(8, bottleneck) self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn3 = nn.GroupNorm(8, bottleneck) self.up_blocks = nn.ModuleList( [UNet_up_block(2**(4+i), 2**(5+i), 2**(4+i)) for i in range(0, downsample)] ) self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1) self.last_bn = nn.GroupNorm(8, 16) self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0) self.relu = nn.ReLU() def forward(self, x): x = self.down1(x) xvals = [x] for i in range(0, self.downsample): x = self.down_blocks[i](x) xvals.append(x) x = self.relu(self.bn1(self.mid_conv1(x))) x = self.relu(self.bn2(self.mid_conv2(x))) x = self.relu(self.bn3(self.mid_conv3(x))) for i in range(0, self.downsample)[::-1]: x = self.up_blocks[i](xvals[i], x) x = self.relu(self.last_bn(self.last_conv1(x))) x = self.relu(self.last_conv2(x)) #x = self.last_conv2(x) return x ''' class UNetDepth(nn.Module): def __init__(self): super(UNetDepth, self).__init__() self.down_block1 = UNet_down_block(3, 16, False) self.down_block2 = UNet_down_block(16, 32, True) self.down_block3 = UNet_down_block(32, 64, True) self.down_block4 = UNet_down_block(64, 128, True) self.down_block5 = UNet_down_block(128, 256, True) self.down_block6 = UNet_down_block(256, 512, True) self.down_block7 = UNet_down_block(512, 1024, False) self.mid_conv1 = nn.Conv2d(1024, 1024, 3, padding=1) self.bn1 = nn.GroupNorm(8, 1024) self.mid_conv2 = nn.Conv2d(1024, 1024, 3, padding=1) self.bn2 = nn.GroupNorm(8, 1024) self.mid_conv3 = torch.nn.Conv2d(1024, 1024, 3, padding=1) self.bn3 = torch.nn.GroupNorm(8, 1024) self.up_block1 = UNet_up_block(512, 1024, 512, False) self.up_block2 = UNet_up_block(256, 512, 256, True) self.up_block3 = UNet_up_block(128, 256, 128, True) self.up_block4 = UNet_up_block(64, 128, 64, True) self.up_block5 = UNet_up_block(32, 64, 32, True) self.up_block6 = UNet_up_block(16, 32, 16, True) self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1) self.last_bn = nn.GroupNorm(8, 16) self.last_conv2 = nn.Conv2d(16, 1, 1, padding=0) self.relu = nn.ReLU() def forward(self, x): x = self.x1 = self.down_block1(x) x = self.x2 = self.down_block2(self.x1) x = self.x3 = self.down_block3(self.x2) x = self.x4 = self.down_block4(self.x3) x = self.x5 = self.down_block5(self.x4) x = self.x6 = self.down_block6(self.x5) x = self.x7 = self.down_block7(self.x6) x = self.relu(self.bn1(self.mid_conv1(x))) x = self.relu(self.bn2(self.mid_conv2(x))) x = self.relu(self.bn3(self.mid_conv3(x))) x = self.up_block1(self.x6, x) x = self.up_block2(self.x5, x) x = self.up_block3(self.x4, x) x = self.up_block4(self.x3, x) x = self.up_block5(self.x2, x) x = self.up_block6(self.x1, x) x = self.relu(self.last_bn(self.last_conv1(x))) x = self.last_conv2(x) return x ''' class UNetDepth(nn.Module): def __init__(self): super(UNetDepth, self).__init__() self.down_block1 = UNet_down_block(3, 16, False) self.down_block2 = UNet_down_block(16, 32, True) self.down_block3 = UNet_down_block(32, 64, True) self.down_block4 = UNet_down_block(64, 128, True) self.down_block5 = UNet_down_block(128, 256, True) self.down_block6 = UNet_down_block(256, 512, False) self.mid_conv1 = nn.Conv2d(512, 512, 3, padding=1) self.bn1 = nn.GroupNorm(8, 512) self.mid_conv2 = nn.Conv2d(512, 512, 3, padding=1) self.bn2 = nn.GroupNorm(8, 512) self.mid_conv3 = torch.nn.Conv2d(512, 512, 3, padding=1) self.bn3 = torch.nn.GroupNorm(8, 512) self.up_block1 = UNet_up_block(256, 512, 256, False) self.up_block2 = UNet_up_block(128, 256, 128, True) self.up_block3 = UNet_up_block(64, 128, 64, True) self.up_block4 = UNet_up_block(32, 64, 32, True) self.up_block5 = UNet_up_block(16, 32, 16, True) self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1) self.last_bn = nn.GroupNorm(8, 16) self.last_conv2 = nn.Conv2d(16, 1, 1, padding=0) self.relu = nn.ReLU() def forward(self, x): x = self.x1 = self.down_block1(x) x = self.x2 = self.down_block2(self.x1) x = self.x3 = self.down_block3(self.x2) x = self.x4 = self.down_block4(self.x3) x = self.x5 = self.down_block5(self.x4) x = self.x6 = self.down_block6(self.x5) x = self.relu(self.bn1(self.mid_conv1(x))) x = self.relu(self.bn2(self.mid_conv2(x))) x = self.relu(self.bn3(self.mid_conv3(x))) x = self.up_block1(self.x5, x) x = self.up_block2(self.x4, x) x = self.up_block3(self.x3, x) x = self.up_block4(self.x2, x) x = self.up_block5(self.x1, x) x = self.relu(self.last_bn(self.last_conv1(x))) x = self.last_conv2(x) return x class UNet_sim(nn.Module): def __init__(self, downsample=4, in_channels=3, out_channels=3): super(UNet_sim, self).__init__() self.downsample, self.in_channels, self.out_channels = downsample, in_channels, out_channels self.conv = ConvBlock(in_channels, 64) self.down_blocks = nn.ModuleList( [UNet_down_block(2 ** (6 + i), 2 ** (7 + i), True) for i in range(0, downsample)] ) bottleneck = 2 ** (6 + downsample) self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn1 = nn.GroupNorm(8, bottleneck) self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn2 = nn.GroupNorm(8, bottleneck) self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn3 = nn.GroupNorm(8, bottleneck) self.up_blocks = nn.ModuleList( [UNet_up_block(2 ** (6 + i), 2 ** (7 + i), 2 ** (6 + i)) for i in range(0, downsample)] ) self.last_conv1 = nn.Conv2d(64, 64, 3, padding=1) self.last_bn = nn.GroupNorm(8, 64) self.last_conv2 = nn.Conv2d(64, out_channels, 1, padding=0) self.relu = nn.ReLU() def forward(self, x): x = self.conv(x) xvals = [x] for i in range(0, self.downsample): x = self.down_blocks[i](x) xvals.append(x) x = self.relu(self.bn1(self.mid_conv1(x))) x = self.relu(self.bn2(self.mid_conv2(x))) x = self.relu(self.bn3(self.mid_conv3(x))) for i in range(0, self.downsample)[::-1]: x = self.up_blocks[i](xvals[i], x) x = self.last_bn(self.last_conv1(x)) x = self.last_conv2(x) return x class Encoder(nn.Module): def __init__(self, downsample=6, in_channels=3): """:downsample the number of down blocks :in_channels the channel of input tensor """ super(Encoder, self).__init__() self.in_channels, self.downsample = in_channels, downsample self.down1 = UNet_down_block(in_channels, 16, False) self.down_blocks = nn.ModuleList( [UNet_down_block(2 ** (4 + i), 2 ** (5 + i), True) for i in range(0, downsample)] ) bottleneck = 2 ** (4 + downsample) self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn1 = nn.GroupNorm(8, bottleneck) self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn2 = nn.GroupNorm(8, bottleneck) self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn3 = nn.GroupNorm(8, bottleneck) self.relu = nn.ReLU() def forward(self, x): x = self.down1(x) xvals = [x] for i in range(0, self.downsample): x = self.down_blocks[i](x) xvals.append(x) x = self.relu(self.bn1(self.mid_conv1(x))) x = self.relu(self.bn2(self.mid_conv2(x))) x = self.relu(self.bn3(self.mid_conv3(x))) return xvals, x class Decoder(nn.Module): def __init__(self, downsample, out_channels, combine_num=0): super(Decoder, self).__init__() self.out_channels, self.downsample = out_channels, downsample self.combine_num = combine_num self.up_blocks = nn.ModuleList( [UNet_up_block(2 ** (4 + i), 2 ** (5 + i), 2 ** (4 + i)) for i in range(0, self.downsample)]) self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1) self.last_bn = nn.GroupNorm(8, 16) self.last_conv2 = nn.Conv2d(16, self.out_channels, 1, padding=0) self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.relu = nn.ReLU() def forward(self, xvals, x): devals = [] for i in range(0, self.downsample)[::-1]: x = self.up_blocks[i](xvals[i], x) if i < self.combine_num: devals.append(x) y = self.last_bn(self.last_conv1(x)) y = self.last_conv2(x) if len(devals) > 0: for j, decode in enumerate(devals): for _ in range(len(devals) - 1 - j): decode = self.up_sampling(decode) devals[j] = decode combine_x = torch.cat(devals[::-1], dim=1) return y, combine_x else: return y, x class Encoder_sim(nn.Module): def __init__(self, downsample=4, in_channels=3): super(Encoder_sim, self).__init__() self.downsample = downsample self.conv = ConvBlock(in_channels, 64) self.down_blocks = nn.ModuleList( [UNet_down_block(2 ** (6 + i), 2 ** (7 + i), True) for i in range(0, downsample)] ) bottleneck = 2 ** (6 + self.downsample) self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn1 = nn.GroupNorm(8, bottleneck) self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn2 = nn.GroupNorm(8, bottleneck) self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn3 = nn.GroupNorm(8, bottleneck) self.relu = nn.ReLU() def forward(self, x): x = self.conv(x) xvals = [x] for i in range(0, self.downsample): x = self.down_blocks[i](x) xvals.append(x) x = self.relu(self.bn1(self.mid_conv1(x))) x = self.relu(self.bn2(self.mid_conv2(x))) x = self.relu(self.bn3(self.mid_conv3(x))) return xvals, x class Decoder_sim(nn.Module): def __init__(self, downsample, out_channels): super(Decoder_sim, self).__init__() self.downsample, self.out_channels = downsample, out_channels self.up_blocks = nn.ModuleList( [UNet_up_block(2 ** (6 + i), 2 ** (7 + i), 2 ** (6 + i)) for i in range(0, self.downsample)] ) self.last_conv1 = nn.Conv2d(64, 64, 3, padding=1) self.last_bn = nn.GroupNorm(8, 64) self.last_conv2 = nn.Conv2d(64, self.out_channels, 1, padding=0) self.relu = nn.ReLU() def forward(self, xvals, x): for i in range(0, self.downsample)[::-1]: x = self.up_blocks[i](xvals[i], x) y = self.last_bn(self.last_conv1(x)) y = self.last_conv2(y) return y, x class ThreeD2NorDepth(nn.Module): def __init__(self, downsample=3, use_simple=True): super(ThreeD2NorDepth, self).__init__() if use_simple: self.threeD_encoder = Encoder_sim(downsample=downsample, in_channels=3) self.normal_decoder = Decoder_sim(downsample=downsample, out_channels=3) self.depth_decoder = Decoder_sim(downsample=downsample, out_channels=1) else: self.threeD_encoder = Encoder(downsample=downsample, in_channels=3) self.normal_decoder = Decoder(downsample=downsample, out_channels=3, combine_num=0) self.depth_decoder = Decoder(downsample=downsample, out_channels=1, combine_num=0) def forward(self, x): xvals, x = self.threeD_encoder(x) nor, _ = self.normal_decoder(xvals, x) dep, _ = self.depth_decoder(xvals, x) return nor, dep class AlbedoDecoder_sim(nn.Module): def __init__(self, downsample=6, out_channels=1): super(AlbedoDecoder_sim, self).__init__() self.out_channels, self.downsample = out_channels, downsample self.up_blocks = nn.ModuleList( [UNet_up_block(2 ** (7 + i), 2 ** (8 + i), 2 ** (7 + i)) for i in range(0, self.downsample)]) self.last_conv1 = nn.Conv2d(128, 64, 3, padding=1) self.last_bn = nn.GroupNorm(8, 64) self.last_conv2 = nn.Conv2d(64, self.out_channels, 1, padding=0) self.relu = nn.ReLU() def forward(self, xvals, x): for i in range(0, self.downsample)[::-1]: x = self.up_blocks[i](xvals[i], x) y = self.last_bn(self.last_conv1(x)) y = self.last_conv2(y) return y, x class AlbedoDecoder(nn.Module): def __init__(self, downsample=6, out_channels=1): super(AlbedoDecoder, self).__init__() self.out_channels, self.downsample = out_channels, downsample self.up_blocks = nn.ModuleList( [UNet_up_block(2 ** (5 + i), 2 ** (6 + i), 2 ** (5 + i)) for i in range(0, self.downsample)]) self.last_conv1 = nn.Conv2d(32, 32, 3, padding=1) self.last_bn = nn.GroupNorm(8, 32) self.last_conv2 = nn.Conv2d(32, self.out_channels, 1, padding=0) self.relu = nn.ReLU() def forward(self, xvals, x): for i in range(0, self.downsample)[::-1]: x = self.up_blocks[i](xvals[i], x) y = self.last_bn(self.last_conv1(x)) y = self.last_conv2(y) return y, x class ConvBlock(nn.Module): def __init__(self, f1, f2, kernel_size=3, padding=1, use_groupnorm=False, groups=8, dilation=1, transpose=False): super(ConvBlock, self).__init__() self.transpose = transpose self.conv = nn.Conv2d(f1, f2, (kernel_size, kernel_size), dilation=dilation, padding=padding*dilation) if self.transpose: self.convt = nn.ConvTranspose2d( f1, f1, (3, 3), dilation=dilation, stride=2, padding=dilation, output_padding=1 ) if use_groupnorm: self.bn = nn.GroupNorm(groups, f1) else: self.bn = nn.BatchNorm2d(f1) def forward(self, x): # x = F.dropout(x, 0.04, self.training) x = self.bn(x) if self.transpose: # x = F.upsample(x, scale_factor=2, mode='bilinear') x = F.relu(self.convt(x)) # x = x[:, :, :-1, :-1] x = F.relu(self.conv(x)) return x
41.133949
117
0.60311
2,633
17,811
3.898975
0.058488
0.036528
0.028638
0.040522
0.82856
0.805864
0.738067
0.718196
0.686636
0.64699
0
0.060643
0.259334
17,811
433
118
41.133949
0.717556
0.016563
0
0.549689
0
0
0.001049
0
0
0
0
0
0
1
0.080745
false
0
0.012422
0
0.177019
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0b83bfc7e85aab893f830a54d4b1eb6b31224483
43
py
Python
examples/getchar.py
scalabli/quo
70b6d4129ee705930f1f8a792fc4c9247d973f9d
[ "MIT" ]
3
2022-03-13T13:22:35.000Z
2022-03-18T08:22:51.000Z
examples/getchar.py
scalabli/quo
70b6d4129ee705930f1f8a792fc4c9247d973f9d
[ "MIT" ]
1
2022-03-21T16:29:54.000Z
2022-03-21T16:29:54.000Z
examples/getchar.py
scalabli/quo
70b6d4129ee705930f1f8a792fc4c9247d973f9d
[ "MIT" ]
null
null
null
from quo.getchar import getchar getchar()
10.75
31
0.790698
6
43
5.666667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.139535
43
3
32
14.333333
0.918919
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
0ba2dfd95ee79027d8c63a0c75d4bd279b8d3f02
30
py
Python
yolov3/utils/__init__.py
hysts/pytorch_yolov3
6d4c7a1e42d366894effac8ca52f7116f891b5ab
[ "MIT" ]
13
2019-03-22T15:22:22.000Z
2021-09-30T21:15:37.000Z
yolov3/utils/__init__.py
hysts/pytorch_yolov3
6d4c7a1e42d366894effac8ca52f7116f891b5ab
[ "MIT" ]
null
null
null
yolov3/utils/__init__.py
hysts/pytorch_yolov3
6d4c7a1e42d366894effac8ca52f7116f891b5ab
[ "MIT" ]
null
null
null
from yolov3.utils import data
15
29
0.833333
5
30
5
1
0
0
0
0
0
0
0
0
0
0
0.038462
0.133333
30
1
30
30
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
e7ea9b418ef09dc2361de5d9ada98bfd38198af3
19
py
Python
login.py
XM001-creater/test_one
1cf96a45c8dfbf988125e3d250d86fb06fe65c34
[ "MIT" ]
null
null
null
login.py
XM001-creater/test_one
1cf96a45c8dfbf988125e3d250d86fb06fe65c34
[ "MIT" ]
null
null
null
login.py
XM001-creater/test_one
1cf96a45c8dfbf988125e3d250d86fb06fe65c34
[ "MIT" ]
null
null
null
num1 =1 num2 = 222
6.333333
10
0.631579
4
19
3
1
0
0
0
0
0
0
0
0
0
0
0.428571
0.263158
19
2
11
9.5
0.428571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f0178f93e06a5ab22b51ea951cf67bdba0d3c339
59
py
Python
pdip/processing/factories/__init__.py
ahmetcagriakca/pdip
c4c16d5666a740154cabdc6762cd44d98b7bdde8
[ "MIT" ]
2
2021-12-09T21:07:46.000Z
2021-12-11T22:18:01.000Z
pdip/processing/factories/__init__.py
PythonDataIntegrator/pdip
c4c16d5666a740154cabdc6762cd44d98b7bdde8
[ "MIT" ]
null
null
null
pdip/processing/factories/__init__.py
PythonDataIntegrator/pdip
c4c16d5666a740154cabdc6762cd44d98b7bdde8
[ "MIT" ]
3
2021-11-15T00:47:00.000Z
2021-12-17T11:35:45.000Z
from .process_manager_factory import ProcessManagerFactory
29.5
58
0.915254
6
59
8.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.067797
59
1
59
59
0.945455
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f042f18d33f05c333a291d256763c607089f137e
214
py
Python
answers/easy/single-number.py
kigawas/lintcode-python
c07177a9969abb3860c6c599fe1e4d8be9dd762e
[ "Apache-2.0" ]
1
2017-11-01T15:00:02.000Z
2017-11-01T15:00:02.000Z
answers/easy/single-number.py
kigawas/lintcode-python
c07177a9969abb3860c6c599fe1e4d8be9dd762e
[ "Apache-2.0" ]
null
null
null
answers/easy/single-number.py
kigawas/lintcode-python
c07177a9969abb3860c6c599fe1e4d8be9dd762e
[ "Apache-2.0" ]
null
null
null
class Solution: """ @param A : an integer array @return : a integer """ def singleNumber(self, A): # write your code here return reduce(lambda x, y: x ^ y, A) if A != [] else 0
21.4
62
0.537383
30
214
3.833333
0.733333
0.034783
0
0
0
0
0
0
0
0
0
0.007092
0.341122
214
9
63
23.777778
0.808511
0.32243
0
0
0
0
0
0
0
0
0
0.111111
0
1
0.333333
false
0
0
0.333333
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
1
1
0
0
5
f046760db9f9c57e0de347811b277f149a454916
49
py
Python
pluploader/upm/exceptions.py
craftamap/pluploader
c44e683282abb6fba8ced156aa807a66736a4ca1
[ "Apache-2.0" ]
12
2020-04-09T12:50:23.000Z
2020-10-30T14:43:40.000Z
pluploader/upm/exceptions.py
livelyapps/pluploader
39f2f50ba9625c038cdb1f5a7ecf2ad64da5577c
[ "Apache-2.0" ]
40
2020-04-12T15:25:46.000Z
2021-06-04T19:47:44.000Z
pluploader/upm/exceptions.py
craftamap/pluploader
c44e683282abb6fba8ced156aa807a66736a4ca1
[ "Apache-2.0" ]
2
2020-09-16T14:07:49.000Z
2020-10-30T14:45:07.000Z
class UploadFailedException(Exception): pass
16.333333
39
0.795918
4
49
9.75
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
49
2
40
24.5
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
f07d5c996cff48d3e4ce4edaac97743f3de1a7ce
171
py
Python
src/ihtt/__init__.py
dekoza/i-hate-time-tracking
adb6018b56c836317535f2e2346dfb8d9cce3aac
[ "Apache-2.0" ]
null
null
null
src/ihtt/__init__.py
dekoza/i-hate-time-tracking
adb6018b56c836317535f2e2346dfb8d9cce3aac
[ "Apache-2.0" ]
null
null
null
src/ihtt/__init__.py
dekoza/i-hate-time-tracking
adb6018b56c836317535f2e2346dfb8d9cce3aac
[ "Apache-2.0" ]
null
null
null
""" I Hate Time Tracking package. Get time tracking out of your way. """ from typing import List __all__: List[str] = [] # noqa: WPS410 (the only __variable__ we use)
17.1
70
0.695906
26
171
4.269231
0.884615
0.216216
0
0
0
0
0
0
0
0
0
0.021898
0.19883
171
9
71
19
0.788321
0.643275
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b2bd1fc6f7777c13168c679b65bd978ef82ec6d2
164
py
Python
pbxproj/pbxsections/PBXResourcesBuildPhase.py
JoliChen/mod-pbxproj
24994416eec9cec838dce696c3cc9262c01ba883
[ "MIT" ]
1
2020-01-16T08:33:38.000Z
2020-01-16T08:33:38.000Z
pbxproj/pbxsections/PBXResourcesBuildPhase.py
JoliChen/mod-pbxproj
24994416eec9cec838dce696c3cc9262c01ba883
[ "MIT" ]
null
null
null
pbxproj/pbxsections/PBXResourcesBuildPhase.py
JoliChen/mod-pbxproj
24994416eec9cec838dce696c3cc9262c01ba883
[ "MIT" ]
null
null
null
from pbxproj.pbxsections.PBXGenericBuildPhase import * class PBXResourcesBuildPhase(PBXGenericBuildPhase): def _get_comment(self): return 'Resources'
23.428571
54
0.786585
14
164
9.071429
0.928571
0
0
0
0
0
0
0
0
0
0
0
0.146341
164
6
55
27.333333
0.907143
0
0
0
0
0
0.054878
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
b2d171ee084b4ded299d8d9b2d8e8e0fa604218a
213
py
Python
src/about.py
jukeboxroundtable/JukeboxRoundtable
06670d2e8511848829b68fddac5bc77806606f98
[ "MIT" ]
1
2019-02-15T17:33:51.000Z
2019-02-15T17:33:51.000Z
src/about.py
jukeboxroundtable/JukeboxRoundtable
06670d2e8511848829b68fddac5bc77806606f98
[ "MIT" ]
37
2019-01-30T18:32:43.000Z
2019-06-11T18:00:11.000Z
src/about.py
jukeboxroundtable/JukeboxRoundtable
06670d2e8511848829b68fddac5bc77806606f98
[ "MIT" ]
null
null
null
from flask import Blueprint, render_template about_blueprint = Blueprint('about', __name__) @about_blueprint.route('/about') def about(): """Show the about page.""" return render_template('about.html')
21.3
46
0.7277
26
213
5.653846
0.576923
0.190476
0.258503
0
0
0
0
0
0
0
0
0
0.13615
213
9
47
23.666667
0.798913
0.093897
0
0
0
0
0.112299
0
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0.6
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
1
0
5
b2feb55d6f844492c6231b317cce3362c8ea498f
69
py
Python
Bronze/Bronze_V/17496.py
masterTyper/baekjoon_solved_ac
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
[ "MIT" ]
null
null
null
Bronze/Bronze_V/17496.py
masterTyper/baekjoon_solved_ac
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
[ "MIT" ]
null
null
null
Bronze/Bronze_V/17496.py
masterTyper/baekjoon_solved_ac
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
[ "MIT" ]
null
null
null
N, T, C, P = map(int, input().split()) print(((N - 1) // T) * C * P)
23
38
0.434783
14
69
2.142857
0.714286
0.133333
0.2
0
0
0
0
0
0
0
0
0.018868
0.231884
69
3
39
23
0.54717
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
650f4d544268699293dfae61c4d5b0971b890ccb
50
py
Python
src/converters/__init__.py
Peilonrayz/json_to_object
ae5ba42dcab71010302f42d78dbfd559c12496c9
[ "MIT" ]
null
null
null
src/converters/__init__.py
Peilonrayz/json_to_object
ae5ba42dcab71010302f42d78dbfd559c12496c9
[ "MIT" ]
null
null
null
src/converters/__init__.py
Peilonrayz/json_to_object
ae5ba42dcab71010302f42d78dbfd559c12496c9
[ "MIT" ]
null
null
null
from .converter import Converter, Converters, ron
25
49
0.82
6
50
6.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.12
50
1
50
50
0.931818
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
651a2825c2a207bb2573070d6b6a820935667fe9
102
py
Python
Code/Python/LeetCode/tempCodeRunnerFile.py
dks1018/CoffeeShopCoding
13ac1700673c86c601eb2758570920620a956e4c
[ "ADSL" ]
null
null
null
Code/Python/LeetCode/tempCodeRunnerFile.py
dks1018/CoffeeShopCoding
13ac1700673c86c601eb2758570920620a956e4c
[ "ADSL" ]
null
null
null
Code/Python/LeetCode/tempCodeRunnerFile.py
dks1018/CoffeeShopCoding
13ac1700673c86c601eb2758570920620a956e4c
[ "ADSL" ]
null
null
null
arr_1 = ["1","2","3","4","5","6","7"] arr_2 = [] for n in arr_1: arr_2.insert(0,n) print(arr_2)
12.75
37
0.5
24
102
1.916667
0.583333
0.26087
0
0
0
0
0
0
0
0
0
0.154762
0.176471
102
7
38
14.571429
0.392857
0
0
0
0
0
0.068627
0
0
0
0
0
0
1
0
false
0
0
0
0
0.2
1
0
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
652bea27b54e41523374e9b0f3eb5a5744c81344
81
py
Python
pyeparse/tests/__init__.py
Eric89GXL/pyeparse
c1907c39276aacb0fad80034d69b537b07f82786
[ "BSD-3-Clause" ]
null
null
null
pyeparse/tests/__init__.py
Eric89GXL/pyeparse
c1907c39276aacb0fad80034d69b537b07f82786
[ "BSD-3-Clause" ]
null
null
null
pyeparse/tests/__init__.py
Eric89GXL/pyeparse
c1907c39276aacb0fad80034d69b537b07f82786
[ "BSD-3-Clause" ]
null
null
null
# Authors: Denis Engemann <[email protected]> # # License: BSD (3-clause)
20.25
52
0.716049
11
81
5.272727
0.818182
0.448276
0
0
0
0
0
0
0
0
0
0.014085
0.123457
81
3
53
27
0.802817
0.91358
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
653f3d126c9950c17fb6dd172757205541017a4a
164
py
Python
solutions/python3/1009.py
sm2774us/amazon_interview_prep_2021
f580080e4a6b712b0b295bb429bf676eb15668de
[ "MIT" ]
42
2020-08-02T07:03:49.000Z
2022-03-26T07:50:15.000Z
solutions/python3/1009.py
ajayv13/leetcode
de02576a9503be6054816b7444ccadcc0c31c59d
[ "MIT" ]
null
null
null
solutions/python3/1009.py
ajayv13/leetcode
de02576a9503be6054816b7444ccadcc0c31c59d
[ "MIT" ]
40
2020-02-08T02:50:24.000Z
2022-03-26T15:38:10.000Z
class Solution: def bitwiseComplement(self, N: int, M = 0, m = 0) -> int: return N ^ M if M and M >= N else self.bitwiseComplement(N, M + 2 ** m, m + 1)
54.666667
86
0.579268
29
164
3.275862
0.517241
0.042105
0
0
0
0
0
0
0
0
0
0.033613
0.27439
164
3
86
54.666667
0.764706
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
6541203743362ff0dff952a51d471197ab93e9cb
109
py
Python
deid/model/layers/__init__.py
KavishBhatia/deid-training-data
9d586cd7f52d929b2571028365587d3f96e44caa
[ "MIT" ]
15
2018-10-28T13:57:55.000Z
2022-01-03T07:25:04.000Z
deid/model/layers/__init__.py
KavishBhatia/deid-training-data
9d586cd7f52d929b2571028365587d3f96e44caa
[ "MIT" ]
7
2019-04-29T13:55:52.000Z
2021-12-13T19:51:30.000Z
deid/model/layers/__init__.py
KavishBhatia/deid-training-data
9d586cd7f52d929b2571028365587d3f96e44caa
[ "MIT" ]
3
2019-08-01T19:02:37.000Z
2021-01-08T09:12:25.000Z
from .gradient_reversal import GradientReversal from .noise import Noise, AdditiveNoise, MultiplicativeNoise
36.333333
60
0.87156
11
109
8.545455
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.091743
109
2
61
54.5
0.949495
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e8db35ba967d9e65ca9d8b55e48764bb1fce3b61
101
py
Python
parents/admin.py
joseph0919/Student_Management_Django
085e839a86ac574f5ebe83a4911c5808841f50cd
[ "MIT" ]
null
null
null
parents/admin.py
joseph0919/Student_Management_Django
085e839a86ac574f5ebe83a4911c5808841f50cd
[ "MIT" ]
null
null
null
parents/admin.py
joseph0919/Student_Management_Django
085e839a86ac574f5ebe83a4911c5808841f50cd
[ "MIT" ]
null
null
null
from django.contrib import admin from parents.models import Guardian admin.site.register(Guardian)
16.833333
35
0.831683
14
101
6
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.108911
101
5
36
20.2
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e8e44f8a42a8c9211af1456c9b87394460144341
3,366
py
Python
openapi_server/controllers/instance_metadata_controller.py
eugenegesdisc/gmuedr
e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd
[ "MIT" ]
null
null
null
openapi_server/controllers/instance_metadata_controller.py
eugenegesdisc/gmuedr
e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd
[ "MIT" ]
null
null
null
openapi_server/controllers/instance_metadata_controller.py
eugenegesdisc/gmuedr
e8b3e5c7b8d18421d875f0f6f778a37a6d8ec3fd
[ "MIT" ]
null
null
null
from typing import List, Dict from aiohttp import web from openapi_server.models.edr_feature_collection_geo_json import EdrFeatureCollectionGeoJSON from openapi_server.models.exception import Exception from openapi_server.models.one_ofobjectobject import OneOfobjectobject from openapi_server import util async def list_data_instance_locations(request: web.Request, collection_id, instance_id, bbox=None, datetime=None, limit=None) -> web.Response: """List available location identifers for the instance List the locations available for the instance of the collection :param collection_id: Identifier (id) of a specific collection :type collection_id: str :param instance_id: Identifier (id) of a specific instance of a collection :type instance_id: str :param bbox: Only features that have a geometry that intersects the bounding box are selected. The bounding box is provided as four or six numbers, depending on whether the coordinate reference system includes a vertical axis (height or depth): * Lower left corner, coordinate axis 1 * Lower left corner, coordinate axis 2 * Minimum value, coordinate axis 3 (optional) * Upper right corner, coordinate axis 1 * Upper right corner, coordinate axis 2 * Maximum value, coordinate axis 3 (optional) The coordinate reference system of the values is specified by the &#x60;crs&#x60; query parameter. If the &#x60;crs&#x60; query parameter is not defined the coordinate reference system is defined by the default &#x60;crs&#x60; for the query type. If a default &#x60;crs&#x60; has not been defined the values will be assumed to be in the WGS 84 longitude/latitude (http://www.opengis.net/def/crs/OGC/1.3/CRS84) coordinate reference system. For WGS 84 longitude/latitude the values are in most cases the sequence of minimum longitude, minimum latitude, maximum longitude and maximum latitude. However, in cases where the box spans the antimeridian the first value (west-most box edge) is larger than the third value (east-most box edge). If the vertical axis is included, the third and the sixth number are the bottom and the top of the 3-dimensional bounding box. If a feature has multiple spatial geometry properties, it is the decision of the server whether only a single spatial geometry property is used to determine the extent or all relevant geometries. :type bbox: dict | bytes :param datetime: Either a date-time or an interval, open or closed. Date and time expressions adhere to RFC 3339. Open intervals are expressed using double-dots. Examples: * A date-time: \&quot;2018-02-12T23:20:50Z\&quot; * A closed interval: \&quot;2018-02-12T00:00:00Z/2018-03-18T12:31:12Z\&quot; * Open intervals: \&quot;2018-02-12T00:00:00Z/..\&quot; or \&quot;../2018-03-18T12:31:12Z\&quot; Only features that have a temporal property that intersects the value of &#x60;datetime&#x60; are selected. If a feature has multiple temporal properties, it is the decision of the server whether only a single temporal property is used to determine the extent or all relevant temporal properties. :type datetime: str :param limit: The optional limit parameter limits the number of results that are presented in the response document. Minimum &#x3D; 1. Maximum &#x3D; 10000. Default &#x3D; 10. :type limit: int """ # bbox = .from_dict(bbox) return web.Response(status=200)
116.068966
1,557
0.778372
529
3,366
4.916824
0.359168
0.032295
0.026144
0.026528
0.246828
0.147636
0.077662
0.077662
0.077662
0.077662
0
0.043173
0.153595
3,366
28
1,558
120.214286
0.869779
0.006833
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e8eaacdbf20c0fc2ed919b5008d2aa81872089dc
192
py
Python
pyGAE/handlers/ManageSubscriptionHandler.py
analyticstraining/pycocms
29d7c3eea9377495bcafd8b8c62016c21c1a74a7
[ "MIT" ]
null
null
null
pyGAE/handlers/ManageSubscriptionHandler.py
analyticstraining/pycocms
29d7c3eea9377495bcafd8b8c62016c21c1a74a7
[ "MIT" ]
null
null
null
pyGAE/handlers/ManageSubscriptionHandler.py
analyticstraining/pycocms
29d7c3eea9377495bcafd8b8c62016c21c1a74a7
[ "MIT" ]
null
null
null
from BaseHandler import BaseHandler, user_required class ManageSubscriptionHandler(BaseHandler): @user_required def get(self): self.render_template('manage_subscription.html')
32
56
0.786458
20
192
7.35
0.75
0.204082
0.312925
0
0
0
0
0
0
0
0
0
0.140625
192
6
56
32
0.890909
0
0
0
0
0
0.124352
0.124352
0
0
0
0
0
1
0.2
false
0
0.2
0
0.6
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
3300e6cf675f4b79e58bd1692aff8eee2b9eee77
124
py
Python
math/PowerModPower.py
silvioedu/HackerRank-Python-Practice
e31ebe49d431c0a23fed0cd67a6984e2b0b7a260
[ "MIT" ]
null
null
null
math/PowerModPower.py
silvioedu/HackerRank-Python-Practice
e31ebe49d431c0a23fed0cd67a6984e2b0b7a260
[ "MIT" ]
null
null
null
math/PowerModPower.py
silvioedu/HackerRank-Python-Practice
e31ebe49d431c0a23fed0cd67a6984e2b0b7a260
[ "MIT" ]
null
null
null
if __name__ == '__main__': a, b, m = int(input()),int(input()),int(input()) print(pow(a,b)) print(pow(a,b,m))
31
53
0.532258
21
124
2.761905
0.47619
0.103448
0.103448
0.551724
0
0
0
0
0
0
0
0
0.201613
124
4
54
31
0.585859
0
0
0
0
0
0.065574
0
0
0
0
0
0
1
0
true
0
0
0
0
0.5
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
3301f92fef2ea95eab5a3e90a808b11c54276e49
273
py
Python
test_autolens/integration/tests/imaging/lens_only/mock_nlo/lens_light__hyper_bg_noise.py
PyJedi/PyAutoLens
bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7
[ "MIT" ]
1
2020-04-06T20:07:56.000Z
2020-04-06T20:07:56.000Z
test_autolens/integration/tests/imaging/lens_only/mock_nlo/lens_light__hyper_bg_noise.py
PyJedi/PyAutoLens
bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7
[ "MIT" ]
null
null
null
test_autolens/integration/tests/imaging/lens_only/mock_nlo/lens_light__hyper_bg_noise.py
PyJedi/PyAutoLens
bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7
[ "MIT" ]
null
null
null
from test_autolens.integration.tests.imaging.lens_only import lens_light__hyper_bg_noise from test_autolens.integration.tests.imaging.runner import run_a_mock class TestCase: def _test__lens_light__hyper_bg_noise(self): run_a_mock(lens_light__hyper_bg_noise)
34.125
88
0.849817
43
273
4.813953
0.488372
0.130435
0.202899
0.231884
0.681159
0.376812
0
0
0
0
0
0
0.098901
273
7
89
39
0.841463
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.8
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
3306b230b7b452f85e6bb142239de2540a26cb53
72
py
Python
marrow/interface/__init__.py
marrow/interface
4d60f9fc16e949c5da3b3756c77d60fe84f0ed2d
[ "MIT" ]
2
2016-03-22T15:21:48.000Z
2017-02-21T23:52:46.000Z
marrow/interface/__init__.py
marrow/interface
4d60f9fc16e949c5da3b3756c77d60fe84f0ed2d
[ "MIT" ]
1
2019-01-21T22:09:00.000Z
2019-01-21T22:09:00.000Z
marrow/interface/__init__.py
marrow/interface
4d60f9fc16e949c5da3b3756c77d60fe84f0ed2d
[ "MIT" ]
2
2015-12-21T03:24:06.000Z
2016-11-10T15:19:27.000Z
from .meta import Interface from .release import version as __version__
24
43
0.833333
10
72
5.6
0.7
0
0
0
0
0
0
0
0
0
0
0
0.138889
72
2
44
36
0.903226
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3334965719b021bbd03119042e95c8563a0cdb7e
9,233
py
Python
tests.py
jpchiodini/Grasp-Planning
e31234244b8f934743605ebea59d9d98a258957e
[ "MIT" ]
null
null
null
tests.py
jpchiodini/Grasp-Planning
e31234244b8f934743605ebea59d9d98a258957e
[ "MIT" ]
null
null
null
tests.py
jpchiodini/Grasp-Planning
e31234244b8f934743605ebea59d9d98a258957e
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ tests.py ======== Created by: hbldh <[email protected]> Created on: 2016-02-07, 23:50 """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import import numpy as np import pyefd lbl_1 = 5 img_1 = np.array( [[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 191, 64, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 64, 0, 0, 0, 0, 64, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 0, 0, 0, 0, 64, 127, 64, 64, 0, 0, 64, 191, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 64, 0, 0, 127, 255, 255, 191, 64, 0, 0, 0, 0, 0, 64, 127, 127, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 64, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 64, 0, 0, 0, 0, 0, 64, 191, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 64, 0, 0, 0, 0, 64, 191, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 191, 127, 0, 0, 0, 0, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 191, 127, 0, 0, 0, 64, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 191, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 127, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 127, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 191, 255, 255, 255, 255, 127, 0, 0, 0, 191, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 127, 255, 255, 191, 64, 0, 0, 0, 191, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 191, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 0, 0, 0, 0, 64, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 0, 64, 191, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255]]) contour_1 = np.array([[24.0, 13.0125], [23.0125, 14.0], [23.004188481675392, 15.0], [23.0, 15.0125], [22.0125, 16.0], [22.00313725490196, 17.0], [22.0, 17.004188481675392], [21.0, 17.004188481675392], [20.004188481675392, 18.0], [20.0, 18.004188481675392], [19.0, 18.006299212598424], [18.0, 18.006299212598424], [17.0, 18.004188481675392], [16.9875, 18.0], [16.0, 17.0125], [15.993700787401576, 17.0], [15.0, 16.006299212598424], [14.995811518324608, 16.0], [14.9875, 15.0], [14.0, 14.0125], [13.995811518324608, 14.0], [13.9875, 13.0], [13.0, 12.0125], [12.996862745098039, 12.0], [12.993700787401576, 11.0], [12.9875, 10.0], [12.0, 9.0125], [11.0, 9.003137254901961], [10.0, 9.006299212598424], [9.006299212598424, 10.0], [9.003137254901961, 11.0], [9.003137254901961, 12.0], [9.004188481675392, 13.0], [9.0125, 14.0], [10.0, 14.9875], [10.003137254901961, 15.0], [10.003137254901961, 16.0], [10.003137254901961, 17.0], [10.003137254901961, 18.0], [10.003137254901961, 19.0], [10.0, 19.0125], [9.0125, 20.0], [9.006299212598424, 21.0], [9.006299212598424, 22.0], [9.0, 22.006299212598424], [8.9875, 22.0], [8.0, 21.0125], [7.996862745098039, 21.0], [7.996862745098039, 20.0], [8.0, 19.9875], [8.9875, 19.0], [8.9875, 18.0], [8.993700787401576, 17.0], [8.9875, 16.0], [8.0, 15.0125], [7.996862745098039, 15.0], [7.9875, 14.0], [7.0, 13.0125], [6.993700787401575, 13.0], [6.0, 12.006299212598424], [5.993700787401575, 12.0], [5.9875, 11.0], [5.995811518324607, 10.0], [6.0, 9.996862745098039], [7.0, 9.9875], [7.9875, 9.0], [8.0, 8.995811518324608], [8.995811518324608, 8.0], [9.0, 7.995811518324607], [10.0, 7.9875], [10.9875, 7.0], [11.0, 6.995811518324607], [12.0, 6.995811518324607], [12.0125, 7.0], [13.0, 7.9875], [13.003137254901961, 8.0], [13.006299212598424, 9.0], [13.0125, 10.0], [14.0, 10.9875], [14.004188481675392, 11.0], [14.006299212598424, 12.0], [15.0, 12.993700787401576], [15.004188481675392, 13.0], [15.006299212598424, 14.0], [16.0, 14.993700787401576], [16.00313725490196, 15.0], [17.0, 15.996862745098039], [17.006299212598424, 16.0], [18.0, 16.993700787401576], [19.0, 16.993700787401576], [19.993700787401576, 16.0], [20.0, 15.993700787401576], [20.993700787401576, 15.0], [21.0, 14.9875], [21.9875, 14.0], [21.995811518324608, 13.0], [21.99686274509804, 12.0], [21.99686274509804, 11.0], [21.993700787401576, 10.0], [21.0, 9.006299212598424], [20.993700787401576, 9.0], [21.0, 8.993700787401576], [22.0, 8.996862745098039], [22.006299212598424, 9.0], [23.0, 9.993700787401576], [23.006299212598424, 10.0], [24.0, 10.993700787401576], [24.00313725490196, 11.0], [24.00313725490196, 12.0], [24.00313725490196, 13.0], [24.0, 13.0125]]) def test_efd_shape_1(): coeffs = pyefd.elliptic_fourier_descriptors(contour_1, order=10) assert coeffs.shape == (10, 4) def test_efd_shape_2(): c = pyefd.elliptic_fourier_descriptors(contour_1, order=40) assert c.shape == (40, 4) def test_normalizing_1(): c = pyefd.elliptic_fourier_descriptors(contour_1, normalize=False) assert np.abs(c[0, 0]) > 0.0 assert np.abs(c[0, 1]) > 0.0 assert np.abs(c[0, 2]) > 0.0 def test_normalizing_2(): c = pyefd.elliptic_fourier_descriptors(contour_1, normalize=True) np.testing.assert_almost_equal(c[0, 0], 1.0, decimal=14) np.testing.assert_almost_equal(c[0, 1], 0.0, decimal=14) np.testing.assert_almost_equal(c[0, 2], 0.0, decimal=14) def test_locus(): locus = pyefd.calculate_dc_coefficients(contour_1) np.testing.assert_array_almost_equal(locus, np.mean(contour_1, axis=0), decimal=0) def test_fit_1(): n = 300 locus = pyefd.calculate_dc_coefficients(contour_1) coeffs = pyefd.elliptic_fourier_descriptors(contour_1, order=20) t = np.linspace(0, 1.0, n) xt = np.ones((n,)) * locus[0] yt = np.ones((n,)) * locus[1] for n in pyefd._range(coeffs.shape[0]): xt += (coeffs[n, 0] * np.cos(2 * (n + 1) * np.pi * t)) + \ (coeffs[n, 1] * np.sin(2 * (n + 1) * np.pi * t)) yt += (coeffs[n, 2] * np.cos(2 * (n + 1) * np.pi * t)) + \ (coeffs[n, 3] * np.sin(2 * (n + 1) * np.pi * t)) assert True
58.069182
120
0.552475
1,533
9,233
3.283757
0.077626
0.725864
1.045888
1.342074
0.53536
0.530393
0.529996
0.499007
0.463051
0.436035
0
0.578673
0.252464
9,233
158
121
58.436709
0.150681
0.014946
0
0.285714
0
0
0
0
0
0
0
0
0.079365
1
0.047619
false
0
0.047619
0
0.095238
0.007937
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
333df86277504bde365d69f5650cddd392f0652c
96
py
Python
venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/parsers/earley_forest.py
Retraces/UkraineBot
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/parsers/earley_forest.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/parsers/earley_forest.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/8f/d6/74/783ee5c7dc6070d67f88eab5cd5dae217fdec6556b8d97a3bd1061e541
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.385417
0
96
1
96
96
0.510417
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
3350e65017c77b155a623adbb0c445784ce6a443
33,168
py
Python
cinder/tests/unit/policies/test_volume.py
arunvinodqmco/cinder
62cb72c6890e458427ba0601646b186b7b36dc01
[ "Apache-2.0" ]
571
2015-01-01T17:47:26.000Z
2022-03-23T07:46:36.000Z
cinder/tests/unit/policies/test_volume.py
arunvinodqmco/cinder
62cb72c6890e458427ba0601646b186b7b36dc01
[ "Apache-2.0" ]
37
2015-01-22T23:27:04.000Z
2021-02-05T16:38:48.000Z
cinder/tests/unit/policies/test_volume.py
arunvinodqmco/cinder
62cb72c6890e458427ba0601646b186b7b36dc01
[ "Apache-2.0" ]
841
2015-01-04T17:17:11.000Z
2022-03-31T12:06:51.000Z
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import ddt from cinder.api.contrib import volume_encryption_metadata from cinder.api.contrib import volume_tenant_attribute from cinder.api.v3 import volumes from cinder import exception from cinder.policies import volumes as volume_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit import fake_constants from cinder.tests.unit.policies import base from cinder.tests.unit.policies import test_base from cinder.tests.unit import utils as test_utils from cinder.volume import api as volume_api # TODO(yikun): The below policy test cases should be added: # * HOST_ATTRIBUTE_POLICY # * MIG_ATTRIBUTE_POLICY class VolumePolicyTests(test_base.CinderPolicyTests): def test_admin_can_create_volume(self): admin_context = self.admin_context path = '/v3/%(project_id)s/volumes' % { 'project_id': admin_context.project_id } body = {"volume": {"size": 1}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_nonadmin_user_can_create_volume(self): user_context = self.user_context path = '/v3/%(project_id)s/volumes' % { 'project_id': user_context.project_id } body = {"volume": {"size": 1}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_admin_can_create_volume_from_image(self): admin_context = self.admin_context path = '/v3/%(project_id)s/volumes' % { 'project_id': admin_context.project_id } body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_nonadmin_user_can_create_volume_from_image(self): user_context = self.user_context path = '/v3/%(project_id)s/volumes' % { 'project_id': user_context.project_id } body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get_volume') def test_admin_can_show_volumes(self, mock_volume): # Make sure administrators are authorized to list volumes admin_context = self.admin_context volume = self._create_fake_volume(admin_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) self.assertEqual(response.json_body['volume']['id'], volume.id) @mock.patch.object(volume_api.API, 'get_volume') def test_owner_can_show_volumes(self, mock_volume): # Make sure owners are authorized to list their volumes user_context = self.user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) self.assertEqual(response.json_body['volume']['id'], volume.id) @mock.patch.object(volume_api.API, 'get_volume') def test_owner_cannot_show_volumes_for_others(self, mock_volume): # Make sure volumes are only exposed to their owners owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } response = self._get_request_response(non_owner_context, path, 'GET') # NOTE(lbragstad): Technically, this user isn't supposed to see this # volume, because they didn't create it and it lives in a different # project. Does cinder return a 404 in cases like this? Or is a 403 # expected? self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_admin_can_get_all_volumes_detail(self): # Make sure administrators are authorized to list volumes admin_context = self.admin_context volume = self._create_fake_volume(admin_context) path = '/v3/%(project_id)s/volumes/detail' % { 'project_id': admin_context.project_id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volumes'][0] self.assertEqual(volume.id, res_vol['id']) def test_owner_can_get_all_volumes_detail(self): # Make sure owners are authorized to list volumes user_context = self.user_context volume = self._create_fake_volume(user_context) path = '/v3/%(project_id)s/volumes/detail' % { 'project_id': user_context.project_id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volumes'][0] self.assertEqual(volume.id, res_vol['id']) @mock.patch.object(volume_api.API, 'get') def test_admin_can_update_volumes(self, mock_volume): admin_context = self.admin_context volume = self._create_fake_volume(admin_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"volume": {"name": "update_name"}} response = self._get_request_response(admin_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_can_update_volumes(self, mock_volume): user_context = self.user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"volume": {"name": "update_name"}} response = self._get_request_response(user_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_update_volumes_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"volume": {"name": "update_name"}} response = self._get_request_response(non_owner_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_can_delete_volumes(self, mock_volume): user_context = self.user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id } response = self._get_request_response(user_context, path, 'DELETE') self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_admin_can_delete_volumes(self, mock_volume): admin_context = self.admin_context volume = self._create_fake_volume(admin_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } response = self._get_request_response(admin_context, path, 'DELETE') self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_delete_volumes_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } response = self._get_request_response(non_owner_context, path, 'DELETE') self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get_volume') def test_admin_can_show_tenant_id_in_volume(self, mock_volume): # Make sure administrators are authorized to show tenant_id admin_context = self.admin_context volume = self._create_fake_volume(admin_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volume'] self.assertEqual(admin_context.project_id, res_vol['os-vol-tenant-attr:tenant_id']) @mock.patch.object(volume_api.API, 'get_volume') def test_owner_can_show_tenant_id_in_volume(self, mock_volume): # Make sure owners are authorized to show tenant_id in volume user_context = self.user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volume'] self.assertEqual(user_context.project_id, res_vol['os-vol-tenant-attr:tenant_id']) def test_admin_can_show_tenant_id_in_volume_detail(self): # Make sure admins are authorized to show tenant_id in volume detail admin_context = self.admin_context self._create_fake_volume(admin_context) path = '/v3/%(project_id)s/volumes/detail' % { 'project_id': admin_context.project_id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volumes'][0] # Make sure owners are authorized to show tenant_id self.assertEqual(admin_context.project_id, res_vol['os-vol-tenant-attr:tenant_id']) def test_owner_can_show_tenant_id_in_volume_detail(self): # Make sure owners are authorized to show tenant_id in volume detail user_context = self.user_context self._create_fake_volume(user_context) path = '/v3/%(project_id)s/volumes/detail' % { 'project_id': user_context.project_id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volumes'][0] # Make sure owners are authorized to show tenant_id self.assertEqual(user_context.project_id, res_vol['os-vol-tenant-attr:tenant_id']) def test_admin_can_create_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k1": "v1"}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) def test_admin_can_get_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v', res_meta['k']) def test_admin_can_update_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k": "v2"}} response = self._get_request_response(admin_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v2', res_meta['k']) def test_admin_can_delete_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id, 'key': 'k' } response = self._get_request_response(admin_context, path, 'DELETE') self.assertEqual(HTTPStatus.OK, response.status_int) def test_owner_can_create_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k1": "v1"}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) def test_owner_can_get_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': user_context.project_id, 'volume_id': volume.id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v', res_meta['k']) def test_owner_can_update_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k": "v2"}} response = self._get_request_response(user_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v2', res_meta['k']) def test_owner_can_delete_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id, 'key': 'k' } response = self._get_request_response(user_context, path, 'DELETE') self.assertEqual(HTTPStatus.OK, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_create_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k1": "v1"}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_get_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } response = self._get_request_response(non_owner_context, path, 'GET') self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_update_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k": "v2"}} response = self._get_request_response(non_owner_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_delete_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id, 'key': 'k' } response = self._get_request_response(non_owner_context, path, 'DELETE') self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @ddt.ddt class VolumesPolicyTest(base.BasePolicyTest): authorized_readers = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_readers = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_members = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] create_authorized_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', # The other_* users are allowed because we don't have any check # mechanism in the code to validate this, these are validated on # the WSGI layer 'other_project_member', 'other_project_reader', ] create_unauthorized_users = [ 'system_member', 'system_reader', 'system_foo', ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = volumes.VolumeController(mock.MagicMock()) self.api_path = '/v3/%s/volumes' % (self.project_id) def _create_volume(self): vol_type = test_utils.create_volume_type(self.project_admin_context, name='fake_vol_type', testcase_instance=self) volume = test_utils.create_volume(self.project_member_context, volume_type_id=vol_type.id, testcase_instance=self) return volume @ddt.data(*base.all_users) def test_create_volume_policy(self, user_id): rule_name = volume_policies.CREATE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url) req.method = 'POST' body = {"volume": {"size": 1}} unauthorized_exceptions = [] self.common_policy_check(user_id, self.create_authorized_users, self.create_unauthorized_users, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.api.v3.volumes.VolumeController._image_uuid_from_ref', return_value=fake_constants.IMAGE_ID) @mock.patch('cinder.api.v3.volumes.VolumeController._get_image_snapshot', return_value=None) @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask._get_image_metadata', return_value=None) def test_create_volume_from_image_policy( self, user_id, mock_image_from_ref, mock_image_snap, mock_img_meta): rule_name = volume_policies.CREATE_FROM_IMAGE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url) req.method = 'POST' body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}} unauthorized_exceptions = [] self.common_policy_check(user_id, self.create_authorized_users, self.create_unauthorized_users, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) def test_create_multiattach_volume_policy(self, user_id): vol_type = test_utils.create_volume_type( self.project_admin_context, name='multiattach_type', extra_specs={'multiattach': '<is> True'}) rule_name = volume_policies.MULTIATTACH_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url) req.method = 'POST' body = {"volume": {"size": 1, "volume_type": vol_type.id}} # Relax the CREATE_POLICY in order to get past that check. self.policy.set_rules({volume_policies.CREATE_POLICY: ""}, overwrite=False) unauthorized_exceptions = [] self.common_policy_check(user_id, self.create_authorized_users, self.create_unauthorized_users, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) def test_get_volume_policy(self, user_id): volume = self._create_volume() rule_name = volume_policies.GET_POLICY url = '%s/%s' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url) unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, unauthorized_exceptions, rule_name, self.controller.show, req, id=volume.id) @ddt.data(*base.all_users) def test_get_all_volumes_policy(self, user_id): self._create_volume() rule_name = volume_policies.GET_ALL_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url) # Generally, any logged in user can list all volumes. authorized_users = [user_id] unauthorized_users = [] # The exception is when deprecated rules are disabled, in which case # roles are enforced. Users without the 'reader' role should be # blocked. if self.enforce_new_defaults: context = self.create_context(user_id) if 'reader' not in context.roles: authorized_users = [] unauthorized_users = [user_id] response = self.common_policy_check(user_id, authorized_users, unauthorized_users, [], rule_name, self.controller.index, req) # For some users, even if they're authorized, the list of volumes # will be empty if they are not in the volume's project. empty_response_users = [ *self.unauthorized_readers, # legacy_admin and system_admin do not have a project_id, and # so the list of volumes returned will be empty. 'legacy_admin', 'system_admin', ] volumes = response['volumes'] if response else [] volume_count = 0 if user_id in empty_response_users else 1 self.assertEqual(volume_count, len(volumes)) @ddt.data(*base.all_users) @mock.patch('cinder.db.volume_encryption_metadata_get') def test_get_volume_encryption_meta_policy(self, user_id, mock_encrypt_meta): encryption_key_id = fake_constants.ENCRYPTION_KEY_ID mock_encrypt_meta.return_value = ( {'encryption_key_id': encryption_key_id}) controller = ( volume_encryption_metadata.VolumeEncryptionMetadataController()) volume = self._create_volume() rule_name = volume_policies.ENCRYPTION_METADATA_POLICY url = '%s/%s/encryption' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url) unauthorized_exceptions = [ exception.VolumeNotFound, ] resp = self.common_policy_check( user_id, self.authorized_readers, self.unauthorized_readers, unauthorized_exceptions, rule_name, controller.index, req, volume.id) if user_id in self.authorized_readers: self.assertEqual(encryption_key_id, resp['encryption_key_id']) @ddt.data(*base.all_users) def test_get_volume_tenant_attr_policy(self, user_id): controller = volume_tenant_attribute.VolumeTenantAttributeController() volume = self._create_volume() volume = volume.obj_to_primitive()['versioned_object.data'] rule_name = volume_policies.TENANT_ATTRIBUTE_POLICY url = '%s/%s' % (self.api_path, volume['id']) req = fake_api.HTTPRequest.blank(url) req.get_db_volume = mock.MagicMock() req.get_db_volume.return_value = volume resp_obj = mock.MagicMock(obj={'volume': volume}) unauthorized_exceptions = [ exception.VolumeNotFound, ] self.assertNotIn('os-vol-tenant-attr:tenant_id', volume.keys()) self.common_policy_check( user_id, self.authorized_readers, self.unauthorized_readers, unauthorized_exceptions, rule_name, controller.show, req, resp_obj, volume['id'], fatal=False) if user_id in self.authorized_readers: self.assertIn('os-vol-tenant-attr:tenant_id', volume.keys()) @ddt.data(*base.all_users) def test_update_volume_policy(self, user_id): volume = self._create_volume() rule_name = volume_policies.UPDATE_POLICY url = '%s/%s' % (self.api_path, volume.id) body = {"volume": {"name": "update_name"}} req = fake_api.HTTPRequest.blank(url) req.method = 'PUT' unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check( user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.update, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_delete_volume_policy(self, user_id): volume = self._create_volume() rule_name = volume_policies.DELETE_POLICY url = '%s/%s' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url) req.method = 'DELETE' unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check( user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.delete, req, id=volume.id) class VolumesPolicySecureRbacTest(VolumesPolicyTest): create_authorized_users = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'other_project_member', ] create_unauthorized_users = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'other_project_reader', 'project_foo', 'project_reader', ] authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_readers = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_members = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs)
40.00965
78
0.630306
3,883
33,168
5.056915
0.075972
0.045376
0.025973
0.023681
0.796853
0.776991
0.756773
0.745722
0.708087
0.688022
0
0.002841
0.267758
33,168
828
79
40.057971
0.805657
0.068862
0
0.693498
0
0
0.130246
0.055653
0
0
0
0.001208
0.078947
1
0.066563
false
0
0.021672
0
0.113003
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
335166caa2fb8bf7ef55ff8016ed6d28cfe4b088
4,302
py
Python
testing/logging/test_formatter.py
christian-steinmeyer/pytest
5cc295e74b81ca7a106e5a096834043738f14dc5
[ "MIT" ]
4
2018-03-10T16:59:59.000Z
2019-12-17T09:16:09.000Z
testing/logging/test_formatter.py
christian-steinmeyer/pytest
5cc295e74b81ca7a106e5a096834043738f14dc5
[ "MIT" ]
71
2015-10-28T08:10:14.000Z
2021-12-06T03:02:07.000Z
testing/logging/test_formatter.py
christian-steinmeyer/pytest
5cc295e74b81ca7a106e5a096834043738f14dc5
[ "MIT" ]
2
2020-08-01T22:09:38.000Z
2020-10-13T08:17:24.000Z
import logging from typing import Any from _pytest._io import TerminalWriter from _pytest.logging import ColoredLevelFormatter def test_coloredlogformatter() -> None: logfmt = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s" record = logging.LogRecord( name="dummy", level=logging.INFO, pathname="dummypath", lineno=10, msg="Test Message", args=(), exc_info=None, ) class ColorConfig: class option: pass tw = TerminalWriter() tw.hasmarkup = True formatter = ColoredLevelFormatter(tw, logfmt) output = formatter.format(record) assert output == ( "dummypath 10 \x1b[32mINFO \x1b[0m Test Message" ) tw.hasmarkup = False formatter = ColoredLevelFormatter(tw, logfmt) output = formatter.format(record) assert output == ("dummypath 10 INFO Test Message") def test_multiline_message() -> None: from _pytest.logging import PercentStyleMultiline logfmt = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s" record: Any = logging.LogRecord( name="dummy", level=logging.INFO, pathname="dummypath", lineno=10, msg="Test Message line1\nline2", args=(), exc_info=None, ) # this is called by logging.Formatter.format record.message = record.getMessage() ai_on_style = PercentStyleMultiline(logfmt, True) output = ai_on_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\n" " line2" ) ai_off_style = PercentStyleMultiline(logfmt, False) output = ai_off_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\nline2" ) ai_none_style = PercentStyleMultiline(logfmt, None) output = ai_none_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\nline2" ) record.auto_indent = False output = ai_on_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\nline2" ) record.auto_indent = True output = ai_off_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\n" " line2" ) record.auto_indent = "False" output = ai_on_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\nline2" ) record.auto_indent = "True" output = ai_off_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\n" " line2" ) # bad string values default to False record.auto_indent = "junk" output = ai_off_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\nline2" ) # anything other than string or int will default to False record.auto_indent = dict() output = ai_off_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\nline2" ) record.auto_indent = "5" output = ai_off_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\n line2" ) record.auto_indent = 5 output = ai_off_style.format(record) assert output == ( "dummypath 10 INFO Test Message line1\n line2" ) def test_colored_short_level() -> None: logfmt = "%(levelname).1s %(message)s" record = logging.LogRecord( name="dummy", level=logging.INFO, pathname="dummypath", lineno=10, msg="Test Message", args=(), exc_info=None, ) class ColorConfig: class option: pass tw = TerminalWriter() tw.hasmarkup = True formatter = ColoredLevelFormatter(tw, logfmt) output = formatter.format(record) # the I (of INFO) is colored assert output == ("\x1b[32mI\x1b[0m Test Message")
28.490066
80
0.57113
444
4,302
5.423423
0.195946
0.077658
0.097176
0.129568
0.736296
0.736296
0.711379
0.711379
0.711379
0.70515
0
0.027056
0.338447
4,302
150
81
28.68
0.819044
0.037192
0
0.638655
0
0
0.298526
0
0
0
0
0
0.117647
1
0.02521
false
0.016807
0.042017
0
0.10084
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
685bf7a01002d00376abfb0f9c41e4c0b30ff038
229
py
Python
datasets/hscic/scrape.py
nhsengland/publish-o-matic
dc8f16cb83a2360989afa44d887e63b5cde6af29
[ "MIT" ]
null
null
null
datasets/hscic/scrape.py
nhsengland/publish-o-matic
dc8f16cb83a2360989afa44d887e63b5cde6af29
[ "MIT" ]
11
2015-03-02T16:30:20.000Z
2016-11-29T12:16:15.000Z
datasets/hscic/scrape.py
nhsengland/publish-o-matic
dc8f16cb83a2360989afa44d887e63b5cde6af29
[ "MIT" ]
2
2020-12-25T20:38:31.000Z
2021-04-11T07:35:01.000Z
from datasets.hscic.hscic_datasets import scrape as datasets_scrape from datasets.hscic.hscic_indicators import scrape as indicators_scrape def main(workspace): datasets_scrape(workspace) indicators_scrape(workspace)
22.9
71
0.829694
29
229
6.344828
0.344828
0.130435
0.184783
0.23913
0
0
0
0
0
0
0
0
0.122271
229
10
72
22.9
0.915423
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.6
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
688059ffe96ee1e3cfd3aaf09375c53be6410286
50
py
Python
app/__init__.py
SLB974/GrandPyBot-dev
7a0268d4ffa58c37eed37253c6afb00874dbabe4
[ "MIT" ]
null
null
null
app/__init__.py
SLB974/GrandPyBot-dev
7a0268d4ffa58c37eed37253c6afb00874dbabe4
[ "MIT" ]
null
null
null
app/__init__.py
SLB974/GrandPyBot-dev
7a0268d4ffa58c37eed37253c6afb00874dbabe4
[ "MIT" ]
null
null
null
from flask import Flask from app.views import app
16.666667
25
0.82
9
50
4.555556
0.555556
0
0
0
0
0
0
0
0
0
0
0
0.16
50
2
26
25
0.97619
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d79c6838f09a05f79a094ede5cbcfc88762b1778
36
py
Python
pydeps/__main__.py
miketheman/pydeps
907a1a29cc0e04ad3698a812082775ccf39b6479
[ "BSD-2-Clause" ]
981
2015-07-08T15:55:30.000Z
2022-03-31T08:53:30.000Z
pydeps/__main__.py
miketheman/pydeps
907a1a29cc0e04ad3698a812082775ccf39b6479
[ "BSD-2-Clause" ]
129
2016-09-03T16:51:52.000Z
2022-03-19T23:07:50.000Z
pydeps/__main__.py
miketheman/pydeps
907a1a29cc0e04ad3698a812082775ccf39b6479
[ "BSD-2-Clause" ]
88
2015-03-24T03:25:54.000Z
2022-03-24T07:35:02.000Z
from .pydeps import pydeps pydeps()
12
26
0.777778
5
36
5.6
0.6
0
0
0
0
0
0
0
0
0
0
0
0.138889
36
2
27
18
0.903226
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
d7ac1e6f92f48c0869fd34f768c9769c6cff0aee
78
py
Python
tests/data/custom_loader2.py
cambiegroup/aizynthfinder
f5bafb2ac4749284571c05ae6df45b6f45cccd30
[ "MIT" ]
219
2020-06-15T08:04:53.000Z
2022-03-31T09:02:47.000Z
tests/data/custom_loader2.py
cambiegroup/aizynthfinder
f5bafb2ac4749284571c05ae6df45b6f45cccd30
[ "MIT" ]
56
2020-08-14T14:50:42.000Z
2022-03-22T12:49:06.000Z
tests/data/custom_loader2.py
cambiegroup/aizynthfinder
f5bafb2ac4749284571c05ae6df45b6f45cccd30
[ "MIT" ]
58
2020-06-15T13:36:42.000Z
2022-03-21T06:18:02.000Z
def extract_smiles(): return ["c1ccccc1", "Cc1ccccc1", "c1ccccc1", "CCO"]
26
55
0.653846
8
78
6.25
0.875
0
0
0
0
0
0
0
0
0
0
0.089552
0.141026
78
2
56
39
0.656716
0
0
0
0
0
0.358974
0
0
0
0
0
0
1
0.5
true
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
0
0
0
5
d7d94e78e81c338291612ee9c829d5e728b8d936
68
py
Python
boxuegu/apps/courses/views.py
libin-c/bxg
c509a5b39bc3f3f34ad9d7fbfb61a63d2f67bc23
[ "MIT" ]
1
2019-06-13T10:08:25.000Z
2019-06-13T10:08:25.000Z
boxuegu/apps/courses/views.py
libin-c/bxg
c509a5b39bc3f3f34ad9d7fbfb61a63d2f67bc23
[ "MIT" ]
null
null
null
boxuegu/apps/courses/views.py
libin-c/bxg
c509a5b39bc3f3f34ad9d7fbfb61a63d2f67bc23
[ "MIT" ]
null
null
null
from django.views import View class CourseListView(View): pass
13.6
29
0.764706
9
68
5.777778
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.176471
68
5
30
13.6
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
5
d7e53f75f37c8e40de012dfc26ba2d6bccb8b7ab
200
py
Python
practice/calculator.py
kristenpicard/python-practice
71e6b7e0af68b1eba5f57ad8c836fe250ab7d6db
[ "MIT" ]
null
null
null
practice/calculator.py
kristenpicard/python-practice
71e6b7e0af68b1eba5f57ad8c836fe250ab7d6db
[ "MIT" ]
null
null
null
practice/calculator.py
kristenpicard/python-practice
71e6b7e0af68b1eba5f57ad8c836fe250ab7d6db
[ "MIT" ]
null
null
null
class Calculator: def add(self,a,b): return a+b def subtract(self,a,b): return a-b def multiply(self,a,b): return a*b def divide(self,a,b): return a/b
18.181818
27
0.54
34
200
3.176471
0.323529
0.148148
0.222222
0.444444
0.601852
0.601852
0.472222
0
0
0
0
0
0.335
200
10
28
20
0.81203
0
0
0
0
0
0
0
0
0
0
0
0
1
0.444444
false
0
0
0.444444
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
d7fc48fb5d168909d619b0248ed17c5cf0a539aa
63
py
Python
adventofcode/2020/25/crack_key/__init__.py
bneradt/toy
982e80ec98f4e951f7275e5f22cb0197f8f86c08
[ "Apache-2.0" ]
null
null
null
adventofcode/2020/25/crack_key/__init__.py
bneradt/toy
982e80ec98f4e951f7275e5f22cb0197f8f86c08
[ "Apache-2.0" ]
null
null
null
adventofcode/2020/25/crack_key/__init__.py
bneradt/toy
982e80ec98f4e951f7275e5f22cb0197f8f86c08
[ "Apache-2.0" ]
null
null
null
from .crack_key import derive_loop_size, derive_encryption_key
31.5
62
0.888889
10
63
5.1
0.8
0
0
0
0
0
0
0
0
0
0
0
0.079365
63
1
63
63
0.87931
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cc213e43355eccd688ab41131dff02e635716ed2
209
py
Python
samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/datatypes_date_time/timex_relative_convert.py
luzeunice/BotBuilder-Samples
b62be4e8863125a567902b736b7b74313d9d4f28
[ "MIT" ]
null
null
null
samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/datatypes_date_time/timex_relative_convert.py
luzeunice/BotBuilder-Samples
b62be4e8863125a567902b736b7b74313d9d4f28
[ "MIT" ]
null
null
null
samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/datatypes_date_time/timex_relative_convert.py
luzeunice/BotBuilder-Samples
b62be4e8863125a567902b736b7b74313d9d4f28
[ "MIT" ]
null
null
null
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. class TimexRelativeConvert: @staticmethod def convert_timex_to_string_relative(timex): return ''
23.222222
59
0.746411
23
209
6.608696
0.956522
0
0
0
0
0
0
0
0
0
0
0
0.186603
209
8
60
26.125
0.894118
0.425837
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
cc254320a120806976fb2027ee1ac70ba2ebda77
42
py
Python
GitSearch.py
inishchith/GithubTools
6b346d063bb727045407498d6710ee2680ad4d8f
[ "MIT" ]
1
2017-05-23T11:23:09.000Z
2017-05-23T11:23:09.000Z
GitSearch.py
inishchith/GithubTools
6b346d063bb727045407498d6710ee2680ad4d8f
[ "MIT" ]
null
null
null
GitSearch.py
inishchith/GithubTools
6b346d063bb727045407498d6710ee2680ad4d8f
[ "MIT" ]
null
null
null
# Find file contents via various criteria.
42
42
0.809524
6
42
5.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
42
1
42
42
0.944444
0.952381
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
04428cad6e41d07f787bb5f233146087def3d4cc
50
py
Python
__main__.py
wwakabobik/openweather_pws
bda08b550982b7e3d797a57c23ae97d4d4ececf7
[ "MIT" ]
null
null
null
__main__.py
wwakabobik/openweather_pws
bda08b550982b7e3d797a57c23ae97d4d4ececf7
[ "MIT" ]
null
null
null
__main__.py
wwakabobik/openweather_pws
bda08b550982b7e3d797a57c23ae97d4d4ececf7
[ "MIT" ]
null
null
null
from openweather_pws import Station, Measurements
25
49
0.88
6
50
7.166667
1
0
0
0
0
0
0
0
0
0
0
0
0.1
50
1
50
50
0.955556
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0454da70bf56318261f39eb6d452f26d17c27244
430
py
Python
app/routes/__init__.py
Poketnans/capstone-q3
38d550a54ff41387534241df85eb8aa8c9b6ba7e
[ "MIT" ]
null
null
null
app/routes/__init__.py
Poketnans/capstone-q3
38d550a54ff41387534241df85eb8aa8c9b6ba7e
[ "MIT" ]
4
2022-03-03T12:47:02.000Z
2022-03-08T18:10:34.000Z
app/routes/__init__.py
Poketnans/capstone-q3
38d550a54ff41387534241df85eb8aa8c9b6ba7e
[ "MIT" ]
1
2022-03-17T14:21:30.000Z
2022-03-17T14:21:30.000Z
from flask import Flask from .storage_blueprint import bp_storage from .tattooists_blueprint import bp_tattooists from .tattoos_blueprint import bp_tattoos from .clients_blueprint import bp_clients def init_app(app: Flask) -> None: ''' Registra as blueprints ''' app.register_blueprint(bp_storage) app.register_blueprint(bp_tattooists) app.register_blueprint(bp_tattoos) app.register_blueprint(bp_clients)
26.875
47
0.8
57
430
5.736842
0.298246
0.183486
0.207951
0.269113
0
0
0
0
0
0
0
0
0.134884
430
15
48
28.666667
0.879032
0.051163
0
0
0
0
0
0
0
0
0
0
0
1
0.1
false
0
0.5
0
0.6
0.8
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
5
04562a0ab45671a8901e0ae58163340c6b6aee32
160
py
Python
oaff/app/oaff/app/data/sources/common/provider.py
JBurkinshaw/ogc-api-fast-features
4fc6ba3cc4df1600450fe4c9f35320b00c69f158
[ "MIT" ]
19
2021-07-06T16:35:27.000Z
2022-02-08T04:59:21.000Z
oaff/app/oaff/app/data/sources/common/provider.py
JBurkinshaw/ogc-api-fast-features
4fc6ba3cc4df1600450fe4c9f35320b00c69f158
[ "MIT" ]
30
2021-07-14T04:13:11.000Z
2021-11-22T20:45:15.000Z
oaff/app/oaff/app/data/sources/common/provider.py
JBurkinshaw/ogc-api-fast-features
4fc6ba3cc4df1600450fe4c9f35320b00c69f158
[ "MIT" ]
6
2021-07-06T16:35:28.000Z
2021-09-17T19:24:49.000Z
from typing import List, Optional from pydantic import BaseModel class Provider(BaseModel): url: str name: str roles: Optional[List[str]] = None
16
37
0.7125
21
160
5.428571
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.2125
160
9
38
17.777778
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f0e928a38ffedd9b5a5fdacb57e277620b6e15be
339
py
Python
headlineVsHeadline/polls/models.py
bacarpenter/headline-vs-headline
2dc59e75fadbf7d5bb0a5b15a0bf41784712a290
[ "MIT" ]
null
null
null
headlineVsHeadline/polls/models.py
bacarpenter/headline-vs-headline
2dc59e75fadbf7d5bb0a5b15a0bf41784712a290
[ "MIT" ]
1
2020-12-28T01:28:19.000Z
2020-12-28T01:56:47.000Z
headlineVsHeadline/polls/models.py
bacarpenter/headline-vs-headline
2dc59e75fadbf7d5bb0a5b15a0bf41784712a290
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class HeadlineListing(models.Model): headline_text = models.CharField(max_length=500) accessed = models.DateTimeField() source_url = models.CharField(max_length=200) author = models.CharField(default="", max_length=200) source = models.CharField(max_length=200)
33.9
57
0.755162
43
339
5.813953
0.55814
0.24
0.216
0.288
0.216
0
0
0
0
0
0
0.041237
0.141593
339
9
58
37.666667
0.817869
0.070796
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.142857
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
f0f04e94a81beb88cb190deaab3f4d7bf4be0477
125
py
Python
pyaz/mariadb/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
null
null
null
pyaz/mariadb/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
null
null
null
pyaz/mariadb/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
1
2022-02-03T09:12:01.000Z
2022-02-03T09:12:01.000Z
''' Manage Azure Database for MariaDB servers. ''' from .. pyaz_utils import _call_az from . import db, server, server_logs
17.857143
42
0.744
18
125
4.944444
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.16
125
6
43
20.833333
0.847619
0.336
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
0b0149c561d9b693e298cce2051643d984729630
154
py
Python
Python/8 kyu/Grasshopper - Terminal Game Move Function/solution.py
Hsins/CodeWars
7e7b912fdd0647c0af381d8b566408e383ea5df8
[ "MIT" ]
1
2020-01-09T21:47:56.000Z
2020-01-09T21:47:56.000Z
Python/8 kyu/Grasshopper - Terminal Game Move Function/solution.py
Hsins/CodeWars
7e7b912fdd0647c0af381d8b566408e383ea5df8
[ "MIT" ]
1
2020-01-20T12:39:03.000Z
2020-01-20T12:39:03.000Z
Python/8 kyu/Grasshopper - Terminal Game Move Function/solution.py
Hsins/CodeWars
7e7b912fdd0647c0af381d8b566408e383ea5df8
[ "MIT" ]
null
null
null
# [8 kyu] Grasshopper - Terminal Game Move Function # # Author: Hsins # Date: 2019/12/20 def move(position, roll): return position + 2 * roll
17.111111
51
0.649351
21
154
4.761905
0.857143
0
0
0
0
0
0
0
0
0
0
0.08547
0.24026
154
8
52
19.25
0.769231
0.558442
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
0b115c280b92cd1887912e7b5b880c8d693ab762
987
py
Python
final_project/machinetranslation/tests.py
eduardomecchia/xzceb-flask_eng_fr
fa2da58ecabd43385a77be42f1eb6cf5401d6757
[ "Apache-2.0" ]
null
null
null
final_project/machinetranslation/tests.py
eduardomecchia/xzceb-flask_eng_fr
fa2da58ecabd43385a77be42f1eb6cf5401d6757
[ "Apache-2.0" ]
null
null
null
final_project/machinetranslation/tests.py
eduardomecchia/xzceb-flask_eng_fr
fa2da58ecabd43385a77be42f1eb6cf5401d6757
[ "Apache-2.0" ]
null
null
null
import unittest import translator class TestEnglishToFrench(unittest.TestCase): def test_love(self): self.assertEqual(translator.english_to_french('Love'), 'Amour') def test_sun(self): self.assertEqual(translator.english_to_french('Sun'), 'Soleil') def test_null(self): self.assertRaises(ValueError, translator.english_to_french, None) def test_hello(self): self.assertEqual(translator.english_to_french('Hello'), 'Bonjour') class TestFrenchToEnglish(unittest.TestCase): def test_love(self): self.assertEqual(translator.french_to_english('Amour'), 'Love') def test_sun(self): self.assertEqual(translator.french_to_english('Soleil'), 'Sun') def test_null(self): self.assertRaises(ValueError, translator.french_to_english, None) def test_hello(self): self.assertEqual(translator.french_to_english('Bonjour'), 'Hello') if __name__ == '__main__': unittest.main()
31.83871
74
0.703141
113
987
5.858407
0.238938
0.084592
0.172205
0.26284
0.712991
0.712991
0.712991
0.459215
0.169184
0
0
0
0.177305
987
31
75
31.83871
0.815271
0
0
0.363636
0
0
0.068826
0
0
0
0
0
0.363636
1
0.363636
false
0
0.090909
0
0.545455
0
0
0
0
null
0
0
1
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
0b14ffda334fedd5c264042439a74b0107e9cb74
766
py
Python
code/chapter_04/listing_04_13.py
guinslym/python_earth_science_book
f4dd0115dbbce140c6713989f630a71238daa72c
[ "MIT" ]
80
2021-04-19T10:03:57.000Z
2022-03-30T15:34:47.000Z
code/chapter_04/listing_04_13.py
guinslym/python_earth_science_book
f4dd0115dbbce140c6713989f630a71238daa72c
[ "MIT" ]
null
null
null
code/chapter_04/listing_04_13.py
guinslym/python_earth_science_book
f4dd0115dbbce140c6713989f630a71238daa72c
[ "MIT" ]
23
2021-04-25T03:50:07.000Z
2022-03-22T03:06:19.000Z
# Go to new line using \n print('-------------------------------------------------------') print("My name is\nMaurizio Petrelli") # Inserting characters using octal values print('-------------------------------------------------------') print("\100 \136 \137 \077 \176") # Inserting characters using hex values print('-------------------------------------------------------') print("\x23 \x24 \x25 \x26 \x2A") print('-------------------------------------------------------') '''Output: ------------------------------------------------------- My name is Maurizio Petrelli ------------------------------------------------------- @ ^ _ ? ~ ------------------------------------------------------- # $ % & * ------------------------------------------------------- '''
31.916667
64
0.26893
44
766
4.659091
0.659091
0.146341
0.078049
0
0
0
0
0
0
0
0
0.034043
0.079634
766
23
65
33.304348
0.256738
0.131854
0
0.571429
0
0
0.794118
0.588235
0
0
0
0
0
1
0
true
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
0b1c55f58b7ea7cb439e436ac338143526ad5ff4
2,383
py
Python
pwndbg/color/__init__.py
R2S4X/pwndbg
351d479f08a43c71d30a8d8c098b7657bbb9ef0e
[ "MIT" ]
287
2015-03-23T17:22:49.000Z
2022-01-06T19:57:21.000Z
pwndbg/color/__init__.py
R2S4X/pwndbg
351d479f08a43c71d30a8d8c098b7657bbb9ef0e
[ "MIT" ]
28
2015-04-13T19:59:44.000Z
2016-05-27T19:09:55.000Z
pwndbg/color/__init__.py
R2S4X/pwndbg
351d479f08a43c71d30a8d8c098b7657bbb9ef0e
[ "MIT" ]
42
2015-04-17T18:13:00.000Z
2020-07-23T08:37:51.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import re import pwndbg.memoize NORMAL = "\x1b[0m" BLACK = "\x1b[30m" RED = "\x1b[31m" GREEN = "\x1b[32m" YELLOW = "\x1b[33m" BLUE = "\x1b[34m" PURPLE = "\x1b[35m" CYAN = "\x1b[36m" LIGHT_GREY = LIGHT_GRAY = "\x1b[37m" FOREGROUND = "\x1b[39m" GREY = GRAY = "\x1b[90m" LIGHT_RED = "\x1b[91m" LIGHT_GREEN = "\x1b[92m" LIGHT_YELLOW = "\x1b[93m" LIGHT_BLUE = "\x1b[94m" LIGHT_PURPLE = "\x1b[95m" LIGHT_CYAN = "\x1b[96m" WHITE = "\x1b[97m" BOLD = "\x1b[1m" UNDERLINE = "\x1b[4m" def none(x): return str(x) def normal(x): return colorize(x, NORMAL) def black(x): return colorize(x, BLACK) def red(x): return colorize(x, RED) def green(x): return colorize(x, GREEN) def yellow(x): return colorize(x, YELLOW) def blue(x): return colorize(x, BLUE) def purple(x): return colorize(x, PURPLE) def cyan(x): return colorize(x, CYAN) def light_gray(x): return colorize(x, LIGHT_GRAY) def foreground(x): return colorize(x, FOREGROUND) def gray(x): return colorize(x, GRAY) def light_red(x): return colorize(x, LIGHT_RED) def light_green(x): return colorize(x, LIGHT_GREEN) def light_yellow(x): return colorize(x, LIGHT_YELLOW) def light_blue(x): return colorize(x, LIGHT_BLUE) def light_purple(x): return colorize(x, LIGHT_PURPLE) def light_cyan(x): return colorize(x, LIGHT_CYAN) def white(x): return colorize(x, WHITE) def bold(x): return colorize(x, BOLD) def underline(x): return colorize(x, UNDERLINE) def colorize(x, color): return color + terminateWith(str(x), color) + NORMAL @pwndbg.memoize.reset_on_stop def generateColorFunctionInner(old, new): def wrapper(text): return new(old(text)) return wrapper def generateColorFunction(config): function = lambda x: x for color in str(config).split(','): function = generateColorFunctionInner(function, globals()[color.lower().replace('-', '_')]) return function def strip(x): return re.sub('\x1b\\[\d+m', '', x) def terminateWith(x, color): return re.sub('\x1b\\[0m', NORMAL + color, x) def ljust_colored(x, length, char=' '): return x + (length - len(strip(x))) * char
31.355263
99
0.666807
347
2,383
4.449568
0.256484
0.099741
0.194301
0.207254
0.209197
0
0
0
0
0
0
0.031427
0.18548
2,383
75
100
31.773333
0.764039
0.017625
0
0
0
0
0.077384
0
0
0
0
0
0
1
0.4375
false
0
0.09375
0.40625
0.625
0.015625
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
9bc97edd815322dc6a747c2ab760525b0de68666
1,057
py
Python
main(terminal).py
fcondo/GUI-sudoku-solver
6d5af3790d1e1c13402bdc6fa6f8ae4ed79121d2
[ "Apache-2.0" ]
null
null
null
main(terminal).py
fcondo/GUI-sudoku-solver
6d5af3790d1e1c13402bdc6fa6f8ae4ed79121d2
[ "Apache-2.0" ]
null
null
null
main(terminal).py
fcondo/GUI-sudoku-solver
6d5af3790d1e1c13402bdc6fa6f8ae4ed79121d2
[ "Apache-2.0" ]
null
null
null
""" main(terminal).py Author: Fabio Condomitti """ from solver import print_grid, solve def main(): sudoku_grid = [ [0,8,0, 0,0,0, 2,0,0], [0,0,0, 0,8,4, 0,9,0], [0,0,6, 3,2,0, 0,1,0], [0,9,7, 0,0,0, 0,8,0], [8,0,0, 9,0,3, 0,0,2], [0,1,0, 0,0,0, 9,5,0], [0,7,0, 0,4,5, 8,0,0], [0,3,0, 7,1,0, 0,0,0], [0,0,8, 0,0,0, 0,4,0] ] sudoku_grid = [ [2,5,0, 0,9,7, 3,0,6], [0,0,7, 3,0,0, 1,0,2], [0,3,1, 4,0,5, 8,0,0], [0,6,0, 8,0,0, 0,2,7], [0,2,4, 0,0,1, 0,3,8], [0,8,0, 9,0,0, 6,1,0], [3,0,5, 0,0,4, 0,0,1], [0,0,6, 0,0,9, 7,0,0], [0,7,0, 5,1,0, 4,0,3] ] print_grid(sudoku_grid) print('....................................') copy = sudoku_grid solve(copy, 9) print_grid(copy) if __name__ == "__main__": main()
27.102564
49
0.325449
198
1,057
1.661616
0.136364
0.285714
0.191489
0.121581
0.231003
0.12462
0.085106
0
0
0
0
0.265041
0.418165
1,057
39
50
27.102564
0.269919
0.040681
0
0
0
0
0.044044
0.036036
0
0
0
0
0
1
0.033333
false
0
0.033333
0
0.066667
0.133333
0
0
1
null
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
9bf44b2d04766a196cb4f85df49ddd09eb5801af
65
py
Python
sql/__init__.py
realDragonium/small-flask-project
5688210418997dbdf5b38e0ce77750eab7eefd3a
[ "Unlicense" ]
null
null
null
sql/__init__.py
realDragonium/small-flask-project
5688210418997dbdf5b38e0ce77750eab7eefd3a
[ "Unlicense" ]
null
null
null
sql/__init__.py
realDragonium/small-flask-project
5688210418997dbdf5b38e0ce77750eab7eefd3a
[ "Unlicense" ]
null
null
null
from .sql_models import SQLQuiz, SQLQuestion, SQLAnswerOption
13
61
0.815385
7
65
7.428571
1
0
0
0
0
0
0
0
0
0
0
0
0.138462
65
4
62
16.25
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9bf5111951f211230177602f66faaa2faeecb8c9
65
py
Python
tyled/tileset/orthogonal/__init__.py
kfields/tyled
65f57c3f060c369d1e3875d94363a03a11fe7c3e
[ "MIT" ]
1
2020-04-14T09:25:25.000Z
2020-04-14T09:25:25.000Z
tyled/tileset/orthogonal/__init__.py
kfields/tyled
65f57c3f060c369d1e3875d94363a03a11fe7c3e
[ "MIT" ]
2
2021-09-08T01:52:49.000Z
2022-01-13T02:32:08.000Z
tyled/tileset/orthogonal/__init__.py
kfields/tyled
65f57c3f060c369d1e3875d94363a03a11fe7c3e
[ "MIT" ]
null
null
null
from tyled.tileset.orthogonal.orthogonal import OrthogonalTileset
65
65
0.907692
7
65
8.428571
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.046154
65
1
65
65
0.951613
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9bf527aac7e8ff43497cfbcad130e15221237397
107
py
Python
run_length_encoding.py
wolfd/jpeg-svg
42370757ec98642b57486c7d2fd3fae7df9bc271
[ "MIT" ]
null
null
null
run_length_encoding.py
wolfd/jpeg-svg
42370757ec98642b57486c7d2fd3fae7df9bc271
[ "MIT" ]
null
null
null
run_length_encoding.py
wolfd/jpeg-svg
42370757ec98642b57486c7d2fd3fae7df9bc271
[ "MIT" ]
null
null
null
import typing as T import numpy as np def decode_run_length(compressed: T.Iterable[int]): compressed
15.285714
51
0.766355
17
107
4.705882
0.764706
0
0
0
0
0
0
0
0
0
0
0
0.168224
107
6
52
17.833333
0.898876
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.5
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
5
502575823cd86b4b2ecb13d681cb93aec7e91dfa
202
py
Python
main/admin.py
ericrobskyhuntley/vialab.mit.edu
1318d03b8eeb106c1662052e1caa53290e206ae7
[ "MIT" ]
null
null
null
main/admin.py
ericrobskyhuntley/vialab.mit.edu
1318d03b8eeb106c1662052e1caa53290e206ae7
[ "MIT" ]
null
null
null
main/admin.py
ericrobskyhuntley/vialab.mit.edu
1318d03b8eeb106c1662052e1caa53290e206ae7
[ "MIT" ]
null
null
null
from django.contrib import admin from simple_history.admin import SimpleHistoryAdmin from .models import MainMetadata # Register your models here. admin.site.register(MainMetadata, SimpleHistoryAdmin)
28.857143
53
0.851485
24
202
7.125
0.583333
0
0
0
0
0
0
0
0
0
0
0
0.09901
202
7
53
28.857143
0.93956
0.128713
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ac8dbd9e323eeca0d685d4a4900b4bd4b422d5be
11,396
py
Python
Thirdparty/libcurl/build.py
reven86/dava.engine
ca47540c8694668f79774669b67d874a30188c20
[ "BSD-3-Clause" ]
5
2020-02-11T12:04:17.000Z
2022-01-30T10:18:29.000Z
Thirdparty/libcurl/build.py
reven86/dava.engine
ca47540c8694668f79774669b67d874a30188c20
[ "BSD-3-Clause" ]
null
null
null
Thirdparty/libcurl/build.py
reven86/dava.engine
ca47540c8694668f79774669b67d874a30188c20
[ "BSD-3-Clause" ]
4
2019-11-28T19:24:34.000Z
2021-08-24T19:12:50.000Z
import os import shutil import build_utils def get_supported_targets(platform): if platform == 'win32': return ['win32', 'win10'] elif platform == 'darwin': return ['macos', 'ios', 'android'] elif platform == 'linux': return ['android', 'linux'] else: return [] def get_dependencies_for_target(target): if target == 'android': return ['openssl'] else: return [] def build_for_target(target, working_directory_path, root_project_path): if target == 'win32': _build_win32(working_directory_path, root_project_path) elif target == 'win10': _build_win10(working_directory_path, root_project_path) elif target == 'macos': _build_macos(working_directory_path, root_project_path) elif target == 'ios': _build_ios(working_directory_path, root_project_path) elif target == 'android': _build_android(working_directory_path, root_project_path) elif target == 'linux': _build_linux(working_directory_path, root_project_path) def get_download_info(): return {'macos_and_ios': 'maintained by curl-ios-build-scripts (bundled)', 'others': 'https://curl.haxx.se/download/curl-7.50.3.tar.gz'} def _download_and_extract(working_directory_path): source_folder_path = os.path.join(working_directory_path, 'libcurl_source') url = get_download_info()['others'] build_utils.download_and_extract( url, working_directory_path, source_folder_path, build_utils.get_url_file_name_no_ext(url)) return source_folder_path @build_utils.run_once def _patch_sources(source_folder_path, working_directory_path): # Apply fixes build_utils.apply_patch( os.path.abspath('patch.diff'), working_directory_path) def _build_win32(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) vc12_solution_file_path = os.path.join( source_folder_path, 'projects/Windows/VC12/curl-all.sln') build_utils.build_vs( vc12_solution_file_path, 'LIB Debug - DLL Windows SSPI', 'Win32', 'libcurl') build_utils.build_vs( vc12_solution_file_path, 'LIB Release - DLL Windows SSPI', 'Win32', 'libcurl') build_utils.build_vs( vc12_solution_file_path, 'LIB Debug - DLL Windows SSPI', 'x64', 'libcurl') build_utils.build_vs( vc12_solution_file_path, 'LIB Release - DLL Windows SSPI', 'x64', 'libcurl') libs_win_root = os.path.join(root_project_path, 'Libs/lib_CMake/win') shutil.copyfile( os.path.join( source_folder_path, 'build/Win32/VC12/LIB Debug - DLL Windows SSPI/libcurld.lib'), os.path.join(libs_win_root, 'x86/Debug/libcurl.lib')) shutil.copyfile( os.path.join( source_folder_path, 'build/Win32/VC12/LIB Release - DLL Windows SSPI/libcurl.lib'), os.path.join(libs_win_root, 'x86/Release/libcurl.lib')) shutil.copyfile( os.path.join( source_folder_path, 'build/Win64/VC12/LIB Debug - DLL Windows SSPI/libcurld.lib'), os.path.join(libs_win_root, 'x64/Debug/libcurl_a_debug.lib')) shutil.copyfile( os.path.join( source_folder_path, 'build/Win64/VC12/LIB Release - DLL Windows SSPI/libcurl.lib'), os.path.join(libs_win_root, 'x64/Release/libcurl_a.lib')) _copy_headers(source_folder_path, root_project_path, 'Others') def _build_win10(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) _patch_sources(source_folder_path, working_directory_path) vc14_solution_folder_path = os.path.join( source_folder_path, 'projects/Windows/VC14') vc14_solution_file_path = os.path.join( vc14_solution_folder_path, 'curl-all.sln') build_utils.build_vs( vc14_solution_file_path, 'LIB Debug - DLL Windows SSPI', 'Win32', 'libcurl') build_utils.build_vs( vc14_solution_file_path, 'LIB Release - DLL Windows SSPI', 'Win32', 'libcurl') build_utils.build_vs( vc14_solution_file_path, 'LIB Debug - DLL Windows SSPI', 'x64', 'libcurl') build_utils.build_vs( vc14_solution_file_path, 'LIB Release - DLL Windows SSPI', 'x64', 'libcurl') build_utils.build_vs( vc14_solution_file_path, 'LIB Debug - DLL Windows SSPI', 'ARM', 'libcurl') build_utils.build_vs( vc14_solution_file_path, 'LIB Release - DLL Windows SSPI', 'ARM', 'libcurl') shutil.copyfile( os.path.join( source_folder_path, 'build/Win32/VC14/LIB Debug - DLL Windows SSPI/libcurld.lib'), os.path.join( root_project_path, 'Libs/lib_CMake/win10/Win32/Debug/libcurl.lib')) shutil.copyfile( os.path.join( source_folder_path, 'build/Win32/VC14/LIB Release - DLL Windows SSPI/libcurl.lib'), os.path.join( root_project_path, 'Libs/lib_CMake/win10/Win32/Release/libcurl.lib')) shutil.copyfile( os.path.join( source_folder_path, 'build/Win64/VC14/LIB Debug - DLL Windows SSPI/libcurld.lib'), os.path.join( root_project_path, 'Libs/lib_CMake/win10/x64/Debug/libcurl.lib')) shutil.copyfile( os.path.join( source_folder_path, 'build/Win64/VC14/LIB Release - DLL Windows SSPI/libcurl.lib'), os.path.join( root_project_path, 'Libs/lib_CMake/win10/x64/Release/libcurl.lib')) # ARM outptu folder isn't specifically set by solution, so it's a default one shutil.copyfile( os.path.join( vc14_solution_folder_path, 'ARM/LIB Debug - DLL Windows SSPI/libcurld.lib'), os.path.join( root_project_path, 'Libs/lib_CMake/win10/arm/Debug/libcurl.lib')) shutil.copyfile( os.path.join( vc14_solution_folder_path, 'ARM/LIB Release - DLL Windows SSPI/libcurl.lib'), os.path.join( root_project_path, 'Libs/lib_CMake/win10/arm/Release/libcurl.lib')) _copy_headers(source_folder_path, root_project_path, 'Others') def _build_macos(working_directory_path, root_project_path): build_curl_run_dir = os.path.join(working_directory_path, 'gen/build_osx') if not os.path.exists(build_curl_run_dir): os.makedirs(build_curl_run_dir) build_curl_args = [ './build_curl', '--arch', 'x86_64', '--run-dir', build_curl_run_dir] if (build_utils.verbose): build_curl_args.append('--verbose') build_utils.run_process( build_curl_args, process_cwd='curl-ios-build-scripts-master') output_path = os.path.join(build_curl_run_dir, 'curl/osx/lib/libcurl.a') shutil.copyfile( output_path, os.path.join( root_project_path, os.path.join('Libs/lib_CMake/mac/libcurl_macos.a'))) include_path = os.path.join( root_project_path, os.path.join('Libs/include/curl/iOS_MacOS')) build_utils.copy_files( os.path.join(build_curl_run_dir, 'curl/osx/include'), include_path, '*.h') def _build_ios(working_directory_path, root_project_path): build_curl_run_dir = os.path.join(working_directory_path, 'gen/build_ios') if not os.path.exists(build_curl_run_dir): os.makedirs(build_curl_run_dir) build_curl_args = [ './build_curl', '--arch', 'armv7,armv7s,arm64', '--run-dir', build_curl_run_dir] if (build_utils.verbose): build_curl_args.append('--verbose') build_utils.run_process( build_curl_args, process_cwd='curl-ios-build-scripts-master') output_path = os.path.join( build_curl_run_dir, 'curl/ios-appstore/lib/libcurl.a') shutil.copyfile( output_path, os.path.join( root_project_path, os.path.join('Libs/lib_CMake/ios/libcurl_ios.a'))) include_path = os.path.join( root_project_path, os.path.join('Libs/include/curl/iOS_MacOS')) build_utils.copy_files( os.path.join(build_curl_run_dir, 'curl/ios-appstore/include'), include_path, '*.h') def _build_android(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) env = os.environ.copy() original_path_var = env["PATH"] # ARM toolchain_path_arm = os.path.join( working_directory_path, 'gen/ndk_toolchain_arm') build_utils.android_ndk_make_toolchain( root_project_path, 'arm', 'android-14', 'darwin-x86_64', toolchain_path_arm) env['PATH'] = '{}:{}'.format( os.path.join(toolchain_path_arm, 'bin'), original_path_var) install_dir_arm = os.path.join(working_directory_path, 'gen/install_arm') configure_args = [ '--host=arm-linux-androideabi', '--disable-shared', '--with-ssl=' + os.path.abspath( os.path.join( working_directory_path, '../openssl/gen/install_arm/'))] build_utils.build_with_autotools( source_folder_path, configure_args, install_dir_arm, env) # x86 toolchain_path_x86 = os.path.join( working_directory_path, 'gen/ndk_toolchain_x86') build_utils.android_ndk_make_toolchain( root_project_path, 'x86', 'android-14', 'darwin-x86_64', toolchain_path_x86) env['PATH'] = '{}:{}'.format( os.path.join(toolchain_path_x86, 'bin'), original_path_var) install_dir_arm = os.path.join(working_directory_path, 'gen/install_x86') configure_args = [ '--host=i686-linux-android', '--disable-shared', '--with-ssl=' + os.path.abspath( os.path.join( working_directory_path, '../openssl/gen/install_x86/'))] build_utils.build_with_autotools( source_folder_path, configure_args, install_dir_arm, env) _copy_headers(source_folder_path, root_project_path, 'Others') def _build_linux(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) env = build_utils.get_autotools_linux_env() install_dir = os.path.join(working_directory_path, 'gen/install_linux') openssl_install_dir = os.path.abspath(os.path.join(working_directory_path, '../openssl/gen/install_linux/')) configure_args = [ '--disable-shared', '--with-ssl=' + openssl_install_dir] build_utils.build_with_autotools( source_folder_path, configure_args, install_dir, env) shutil.copyfile(os.path.join(install_dir, 'lib/libcurl.a'), os.path.join(root_project_path, 'Libs/lib_CMake/linux/libcurl.a')) _copy_headers(source_folder_path, root_project_path, 'Others') def _copy_headers(source_folder_path, root_project_path, target_folder): include_path = os.path.join( root_project_path, os.path.join('Libs/include/curl', target_folder)) build_utils.copy_files( os.path.join(source_folder_path, 'include/curl'), include_path, '*.h')
33.616519
112
0.660495
1,469
11,396
4.773315
0.097345
0.051341
0.077011
0.048774
0.828009
0.816743
0.782088
0.761124
0.668711
0.617941
0
0.019597
0.225342
11,396
338
113
33.715976
0.774694
0.008336
0
0.536765
0
0.003676
0.223708
0.079763
0
0
0
0
0
1
0.047794
false
0
0.011029
0.003676
0.088235
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
aca742d4a6a7d5b4d70457cb7408186ab91efbca
117
py
Python
my_app/admin.py
gh-8/FullSend-List
52544d1413b413eb9f646fb38613ca9865e5a88b
[ "MIT" ]
1
2020-08-06T06:32:32.000Z
2020-08-06T06:32:32.000Z
book_app/admin.py
Dhrutiman/my_book
412200f185cd760f3c3c182cf61321f05f59d920
[ "MIT" ]
4
2020-06-05T21:40:29.000Z
2021-06-02T00:54:34.000Z
book_app/admin.py
Dhrutiman/my_book
412200f185cd760f3c3c182cf61321f05f59d920
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Search # Register your models here. admin.site.register(Search)
23.4
32
0.811966
17
117
5.588235
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.119658
117
5
33
23.4
0.92233
0.222222
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
acb6313fbce86ba3db7156bb75d62a0032b83dff
38
py
Python
04/py/q2.py
RussellDash332/practice-makes-perfect
917822b461550a2e3679351e467362f95d9e428d
[ "MIT" ]
2
2021-11-18T06:22:09.000Z
2021-12-25T09:52:57.000Z
04/py/q2.py
RussellDash332/practice-makes-perfect
917822b461550a2e3679351e467362f95d9e428d
[ "MIT" ]
2
2021-11-17T16:28:00.000Z
2021-12-01T09:59:40.000Z
04/py/q2.py
RussellDash332/practice-makes-perfect
917822b461550a2e3679351e467362f95d9e428d
[ "MIT" ]
null
null
null
print((lambda x: lambda y: 2*x)(3)(4))
38
38
0.605263
9
38
2.555556
0.777778
0
0
0
0
0
0
0
0
0
0
0.088235
0.105263
38
1
38
38
0.588235
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
acc55edad5b04c2e0b24f12c37f946370c522b36
62
py
Python
studentData.py
seanmacb/COMP-115-Exercises
fbe7e5b158f2db785b886b6c600f1a8beb19ab1f
[ "MIT" ]
null
null
null
studentData.py
seanmacb/COMP-115-Exercises
fbe7e5b158f2db785b886b6c600f1a8beb19ab1f
[ "MIT" ]
null
null
null
studentData.py
seanmacb/COMP-115-Exercises
fbe7e5b158f2db785b886b6c600f1a8beb19ab1f
[ "MIT" ]
null
null
null
Schmoe Joe 12345 90 91 94 87 89 Doe Jane 74836 91 99 82 81 100
31
31
0.758065
16
62
2.9375
0.9375
0
0
0
0
0
0
0
0
0
0
0.659574
0.241935
62
2
32
31
0.340426
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
acd027d5fe8415bf29567d076248cd34dc5cdc41
94
py
Python
tests/basic/tuple.py
MoonStarCZW/py2rb
89b247717d33d780fbf143e1583bfe9252984da4
[ "MIT" ]
null
null
null
tests/basic/tuple.py
MoonStarCZW/py2rb
89b247717d33d780fbf143e1583bfe9252984da4
[ "MIT" ]
null
null
null
tests/basic/tuple.py
MoonStarCZW/py2rb
89b247717d33d780fbf143e1583bfe9252984da4
[ "MIT" ]
null
null
null
tup = ('a','b',1,2,3) print(tup[0]) print(tup[1]) print(tup[2]) print(tup[3]) print(tup[4])
10.444444
21
0.56383
21
94
2.52381
0.428571
0.754717
0.339623
0
0
0
0
0
0
0
0
0.095238
0.106383
94
8
22
11.75
0.535714
0
0
0
0
0
0.021505
0
0
0
0
0
0
1
0
false
0
0
0
0
0.833333
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
acd101863dd3fde86f97b4f8931de856caab8a8b
101
py
Python
frameworks/Python/aiohttp/app/gunicorn.py
xsoheilalizadeh/FrameworkBenchmarks
855527008f7488e4fd508d1e72dfa9953874a2c6
[ "BSD-3-Clause" ]
5
2015-11-05T12:57:32.000Z
2021-02-24T05:03:05.000Z
frameworks/Python/aiohttp/app/gunicorn.py
xsoheilalizadeh/FrameworkBenchmarks
855527008f7488e4fd508d1e72dfa9953874a2c6
[ "BSD-3-Clause" ]
122
2021-04-16T02:04:24.000Z
2022-01-13T20:17:26.000Z
frameworks/Python/aiohttp/app/gunicorn.py
xsoheilalizadeh/FrameworkBenchmarks
855527008f7488e4fd508d1e72dfa9953874a2c6
[ "BSD-3-Clause" ]
2
2018-03-22T00:37:28.000Z
2018-03-22T00:56:57.000Z
import asyncio from .main import create_app loop = asyncio.get_event_loop() app = create_app(loop)
14.428571
31
0.782178
16
101
4.6875
0.5625
0.24
0.346667
0
0
0
0
0
0
0
0
0
0.138614
101
6
32
16.833333
0.862069
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
4a1d8a246738fe7d87ca8aa627f3e6f93ad3cc07
3,561
py
Python
tests/amqp/test_rpc_client.py
OpenMatchmaking/sage-utils-python
348394bf9cd3adb96fe3915d1d4d99daa46ab437
[ "BSD-3-Clause" ]
null
null
null
tests/amqp/test_rpc_client.py
OpenMatchmaking/sage-utils-python
348394bf9cd3adb96fe3915d1d4d99daa46ab437
[ "BSD-3-Clause" ]
2
2018-04-07T23:24:19.000Z
2018-05-25T08:31:31.000Z
tests/amqp/test_rpc_client.py
OpenMatchmaking/sage-utils-python
348394bf9cd3adb96fe3915d1d4d99daa46ab437
[ "BSD-3-Clause" ]
null
null
null
import pytest from sage_utils.amqp.clients import RpcAmqpClient from sage_utils.amqp.extension import AmqpExtension from sage_utils.constants import VALIDATION_ERROR from sage_utils.wrappers import Response from tests.fixtures import Application, FakeConfig, FakeRegisterMicroserviceWorker REQUEST_QUEUE = FakeRegisterMicroserviceWorker.QUEUE_NAME REQUEST_EXCHANGE = FakeRegisterMicroserviceWorker.REQUEST_EXCHANGE_NAME RESPONSE_EXCHANGE_NAME = FakeRegisterMicroserviceWorker.RESPONSE_EXCHANGE_NAME VALIDATION_ERROR_DECR = FakeRegisterMicroserviceWorker.ERROR_DESCRIPTION @pytest.mark.asyncio async def test_rpc_amqp_client_returns_ok(event_loop): app = Application(config=FakeConfig(), loop=event_loop) register_worker = FakeRegisterMicroserviceWorker(app) extension = AmqpExtension(app) extension.register_worker(register_worker) await extension.init(event_loop) client = RpcAmqpClient( app=app, routing_key=REQUEST_QUEUE, request_exchange=REQUEST_EXCHANGE, response_queue='', response_exchange=RESPONSE_EXCHANGE_NAME ) response = await client.send(payload={'name': 'microservice', 'version': '1.0.0'}) assert Response.CONTENT_FIELD_NAME in response.keys() assert response[Response.CONTENT_FIELD_NAME] == 'OK' assert Response.EVENT_FIELD_NAME in response.keys() assert response[Response.EVENT_FIELD_NAME] is None await extension.deinit(event_loop) @pytest.mark.asyncio async def test_rpc_amqp_client_returns_ok_with_custom_event_loop(event_loop): app = Application(config=FakeConfig(), loop=event_loop) register_worker = FakeRegisterMicroserviceWorker(app) extension = AmqpExtension(app) extension.register_worker(register_worker) await extension.init(event_loop) client = RpcAmqpClient( app=app, routing_key=REQUEST_QUEUE, request_exchange=REQUEST_EXCHANGE, response_queue='', response_exchange=RESPONSE_EXCHANGE_NAME, loop=event_loop ) response = await client.send(payload={'name': 'microservice', 'version': '1.0.0'}) assert Response.CONTENT_FIELD_NAME in response.keys() assert response[Response.CONTENT_FIELD_NAME] == 'OK' assert Response.EVENT_FIELD_NAME in response.keys() assert response[Response.EVENT_FIELD_NAME] is None await extension.deinit(event_loop) @pytest.mark.asyncio async def test_rpc_amqp_client_returns_an_error(event_loop): app = Application(config=FakeConfig(), loop=event_loop) register_worker = FakeRegisterMicroserviceWorker(app) extension = AmqpExtension(app) extension.register_worker(register_worker) await extension.init(event_loop) client = RpcAmqpClient( app=app, routing_key=REQUEST_QUEUE, request_exchange=REQUEST_EXCHANGE, response_queue='', response_exchange=RESPONSE_EXCHANGE_NAME ) response = await client.send(payload={}) assert Response.ERROR_FIELD_NAME in response.keys() assert Response.ERROR_TYPE_FIELD_NAME in response[Response.ERROR_FIELD_NAME].keys() assert response[Response.ERROR_FIELD_NAME][Response.ERROR_TYPE_FIELD_NAME] == VALIDATION_ERROR # NOQA assert Response.ERROR_DETAILS_FIELD_NAME in response[Response.ERROR_FIELD_NAME].keys() assert response[Response.ERROR_FIELD_NAME][Response.ERROR_DETAILS_FIELD_NAME] == VALIDATION_ERROR_DECR # NOQA assert Response.EVENT_FIELD_NAME in response.keys() assert response[Response.EVENT_FIELD_NAME] is None await extension.deinit(event_loop)
35.61
114
0.775344
419
3,561
6.28401
0.155131
0.064945
0.033422
0.057729
0.769085
0.747057
0.747057
0.733004
0.733004
0.733004
0
0.001977
0.147711
3,561
99
115
35.969697
0.865568
0.002527
0
0.675676
0
0
0.016906
0
0
0
0
0
0.202703
1
0
false
0
0.081081
0
0.081081
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c5d26f124d7803a5d0483030310d5f3218904ee3
20
py
Python
checkov/version.py
jmeredith16/checkov
91dc9e970609c7ce53e325b8b70fec788dc12c96
[ "Apache-2.0" ]
null
null
null
checkov/version.py
jmeredith16/checkov
91dc9e970609c7ce53e325b8b70fec788dc12c96
[ "Apache-2.0" ]
null
null
null
checkov/version.py
jmeredith16/checkov
91dc9e970609c7ce53e325b8b70fec788dc12c96
[ "Apache-2.0" ]
null
null
null
version = '2.0.706'
10
19
0.6
4
20
3
1
0
0
0
0
0
0
0
0
0
0
0.294118
0.15
20
1
20
20
0.411765
0
0
0
0
0
0.35
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c5efdfd7527a9d610cd0e6cd62680c8957d0b58f
14,512
py
Python
restler/unit_tests/test_basic_functionality_end_to_end.py
Ayudjj/mvp
a0ba706a2156e31cf6053b639b57aa1b9acad442
[ "MIT" ]
1
2020-12-05T14:23:08.000Z
2020-12-05T14:23:08.000Z
restler/unit_tests/test_basic_functionality_end_to_end.py
Ayudjj/mvp
a0ba706a2156e31cf6053b639b57aa1b9acad442
[ "MIT" ]
null
null
null
restler/unit_tests/test_basic_functionality_end_to_end.py
Ayudjj/mvp
a0ba706a2156e31cf6053b639b57aa1b9acad442
[ "MIT" ]
null
null
null
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. """ Runs functional tests, which invoke the RESTler engine and check the RESTler output logs for correctness. When new baseline logs are necessary due to known breaking changes in the logic, a run that matches the test should be run manually and the appropriate logs should be replaced in the unit_tests/log_baseline_test_files directory. Each log is named <test-type_log-type.txt> """ import unittest import os import glob import sys import shutil import subprocess import utils.logger as logger from collections import namedtuple from test_servers.log_parser import * Test_File_Directory = os.path.join( os.path.dirname(__file__), 'log_baseline_test_files' ) Restler_Path = os.path.join(os.path.dirname(__file__), '..', 'restler.py') Common_Settings = [ "python", "-B", Restler_Path, "--use_test_socket", '--custom_mutations', f'{os.path.join(Test_File_Directory, "test_dict.json")}', "--garbage_collection_interval", "30", "--host", "unittest" ] class FunctionalityTests(unittest.TestCase): def get_experiments_dir(self): """ Returns the most recent experiments directory that contains the restler logs @return: The experiments dir @rtype : Str """ results_dir = os.path.join(os.getcwd(), 'RestlerResults') # Return the newest experiments directory in RestlerResults return max(glob.glob(os.path.join(results_dir, 'experiment*/')), key=os.path.getmtime) def get_network_log_path(self, dir, log_type): """ Returns the path to the network log of the specified type @param dir: The directory that contains the log @type dir: Str @param log_type: The type of network log to get @type log_type: Str @return: The path to the network log @rtype : Str """ return glob.glob(os.path.join(dir, 'logs', f'network.{log_type}.*.1.txt'))[0] def tearDown(self): try: shutil.rmtree(self.get_experiments_dir()) except Exception as err: print(f"tearDown function failed: {err!s}.\n" "Experiments directory was not deleted.") def test_smoke_test(self): """ This checks that the directed smoke test executes all of the expected requests in the correct order with correct arguments from the dictionary. """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "smoke_test_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Smoke test failed: Fuzzing") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "smoke_test_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Smoke test failed: Garbage Collector") def test_create_once(self): """ This checks that a directed smoke test, using create once endpoints, executes all of the expected requests in the correct order with correct arguments from the dictionary. """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}', '--settings', f'{os.path.join(Test_File_Directory, "test_settings_createonce.json")}' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "create_once_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Create-once failed: Fuzzing") try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "create_once_pre_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_PREPROCESSING)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Create-once failed: Preprocessing") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "create_once_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Create-once failed: Garbage Collector") def test_checkers(self): """ This checks that a directed smoke test, with checkers enabled (sans namespacerule, payloadbody, examples), bugs planted for each checker, and a main driver bug, will produce the appropriate bug buckets and the requests will be sent in the correct order. """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar_bugs.py")}', '--enable_checkers', '*' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "checkers_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Checkers failed: Fuzzing") try: default_parser = BugLogParser(os.path.join(Test_File_Directory, "checkers_bug_buckets.txt")) test_parser = BugLogParser(os.path.join(experiments_dir, 'bug_buckets', 'bug_buckets.txt')) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Checkers failed: Bug Buckets") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "checkers_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Checkers failed: Garbage Collector") def test_multi_dict(self): """ This checks that the directed smoke test executes all of the expected requests in the correct order when a second dictionary is specified in the settings file to be used for one of the endpoints. """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}', '--settings', f'{os.path.join(Test_File_Directory, "test_settings_multidict.json")}' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "multidict_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Multi-dict failed: Fuzzing") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "multidict_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Multi-dict failed: Garbage Collector") def test_fuzz(self): """ This checks that a bfs-cheap fuzzing run executes all of the expected requests in the correct order with correct arguments from the dictionary. The test runs for 3 minutes and checks 100 sequences """ Fuzz_Time = 0.1 # 6 minutes Num_Sequences = 300 args = Common_Settings + [ '--fuzzing_mode', 'bfs-cheap', '--restler_grammar',f'{os.path.join(Test_File_Directory, "test_grammar.py")}', '--time_budget', f'{Fuzz_Time}', '--enable_checkers', '*', '--disable_checkers', 'namespacerule' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "fuzz_testing_log.txt"), max_seq=Num_Sequences) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING), max_seq=Num_Sequences) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Fuzz failed: Fuzzing") def test_payload_body_checker(self): """ This checks that the payload body checker sends all of the correct requests in the correct order and an expected 500 bug is logged. """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}', '--enable_checkers', 'payloadbody' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "payloadbody_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) except TestFailedException: self.fail("Payload body failed: Fuzzing") try: default_parser = BugLogParser(os.path.join(Test_File_Directory, "payloadbody_bug_buckets.txt")) test_parser = BugLogParser(os.path.join(experiments_dir, 'bug_buckets', 'bug_buckets.txt')) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Payload body failed: Bug Buckets") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "payloadbody_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Payload body failed: Garbage Collector") def test_examples_checker(self): """ This checks that the examples checker sends the correct requests in the correct order when query or body examples are present """ args = Common_Settings + [ '--fuzzing_mode', 'directed-smoke-test', '--restler_grammar', f'{os.path.join(Test_File_Directory, "test_grammar.py")}', '--enable_checkers', 'examples' ] result = subprocess.run(args, capture_output=True) if result.stderr: self.fail(result.stderr) try: result.check_returncode() except subprocess.CalledProcessError: self.fail(f"Restler returned non-zero exit code: {result.returncode}") experiments_dir = self.get_experiments_dir() try: default_parser = FuzzingLogParser(os.path.join(Test_File_Directory, "examples_testing_log.txt")) test_parser = FuzzingLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_TESTING)) except TestFailedException: self.fail("Payload body failed: Fuzzing") try: default_parser = GarbageCollectorLogParser(os.path.join(Test_File_Directory, "examples_gc_log.txt")) test_parser = GarbageCollectorLogParser(self.get_network_log_path(experiments_dir, logger.LOG_TYPE_GC)) self.assertTrue(default_parser.diff_log(test_parser)) except TestFailedException: self.fail("Payload body failed: Garbage Collector")
44.790123
134
0.673512
1,702
14,512
5.514101
0.13161
0.023015
0.035162
0.038785
0.777411
0.757166
0.745658
0.722749
0.708684
0.708684
0
0.001526
0.232153
14,512
323
135
44.928793
0.840707
0.144708
0
0.620536
0
0
0.217549
0.066017
0
0
0
0
0.0625
1
0.044643
false
0
0.040179
0
0.098214
0.004464
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
681b7c879fe3f2d09f237ea35bdbdb7e97ac6241
2,002
py
Python
tests/garage/replay_buffer/test_replay_buffer.py
Maltimore/garage
a3f44b37eeddca37d157766a9a72e8772f104bcd
[ "MIT" ]
1
2019-07-31T06:53:38.000Z
2019-07-31T06:53:38.000Z
tests/garage/replay_buffer/test_replay_buffer.py
Maltimore/garage
a3f44b37eeddca37d157766a9a72e8772f104bcd
[ "MIT" ]
null
null
null
tests/garage/replay_buffer/test_replay_buffer.py
Maltimore/garage
a3f44b37eeddca37d157766a9a72e8772f104bcd
[ "MIT" ]
1
2020-02-05T00:34:07.000Z
2020-02-05T00:34:07.000Z
import numpy as np from garage.replay_buffer import SimpleReplayBuffer from tests.fixtures.envs.dummy import DummyDiscreteEnv class TestReplayBuffer: def test_add_transition_dtype(self): env = DummyDiscreteEnv() obs = env.reset() replay_buffer = SimpleReplayBuffer( env_spec=env, size_in_transitions=3, time_horizon=1) replay_buffer.add_transition( observation=obs, action=env.action_space.sample()) sample = replay_buffer.sample(1) sample_obs = sample['observation'] sample_action = sample['action'] assert sample_obs.dtype == env.observation_space.dtype assert sample_action.dtype == env.action_space.dtype def test_add_transitions_dtype(self): env = DummyDiscreteEnv() obs = env.reset() replay_buffer = SimpleReplayBuffer( env_spec=env, size_in_transitions=3, time_horizon=1) replay_buffer.add_transitions( observation=[obs], action=[env.action_space.sample()]) sample = replay_buffer.sample(1) sample_obs = sample['observation'] sample_action = sample['action'] assert sample_obs.dtype == env.observation_space.dtype assert sample_action.dtype == env.action_space.dtype def test_eviction_policy(self): env = DummyDiscreteEnv() obs = env.reset() replay_buffer = SimpleReplayBuffer( env_spec=env, size_in_transitions=3, time_horizon=1) replay_buffer.add_transitions(observation=[obs, obs], action=[1, 2]) assert not replay_buffer.full replay_buffer.add_transitions(observation=[obs, obs], action=[3, 4]) assert replay_buffer.full replay_buffer.add_transitions(observation=[obs, obs], action=[5, 6]) replay_buffer.add_transitions(observation=[obs, obs], action=[7, 8]) assert np.array_equal(replay_buffer._buffer['action'], [[7], [8], [6]]) assert replay_buffer.n_transitions_stored == 3
39.254902
79
0.679321
237
2,002
5.485232
0.219409
0.147692
0.069231
0.1
0.774615
0.774615
0.774615
0.774615
0.73
0.73
0
0.012796
0.219281
2,002
50
80
40.04
0.818938
0
0
0.536585
0
0
0.01998
0
0
0
0
0
0.195122
1
0.073171
false
0
0.073171
0
0.170732
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
a84bbb52eda077f44cc7b316828fb87f5e2b4397
3,371
py
Python
ansible-devel/test/units/galaxy/test_role_requirements.py
satishcarya/ansible
ed091e174c26316f621ac16344a95c99f56bdc43
[ "MIT" ]
null
null
null
ansible-devel/test/units/galaxy/test_role_requirements.py
satishcarya/ansible
ed091e174c26316f621ac16344a95c99f56bdc43
[ "MIT" ]
null
null
null
ansible-devel/test/units/galaxy/test_role_requirements.py
satishcarya/ansible
ed091e174c26316f621ac16344a95c99f56bdc43
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright: (c) 2020, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import pytest from ansible.playbook.role.requirement import RoleRequirement def test_null_role_url(): role = RoleRequirement.role_yaml_parse('') assert role['src'] == '' assert role['name'] == '' assert role['scm'] is None assert role['version'] is None def test_git_file_role_url(): role = RoleRequirement.role_yaml_parse('git+file:///home/bennojoy/nginx') assert role['src'] == 'file:///home/bennojoy/nginx' assert role['name'] == 'nginx' assert role['scm'] == 'git' assert role['version'] is None def test_https_role_url(): role = RoleRequirement.role_yaml_parse('https://github.com/bennojoy/nginx') assert role['src'] == 'https://github.com/bennojoy/nginx' assert role['name'] == 'nginx' assert role['scm'] is None assert role['version'] is None def test_git_https_role_url(): role = RoleRequirement.role_yaml_parse('git+https://github.com/geerlingguy/ansible-role-composer.git') assert role['src'] == 'https://github.com/geerlingguy/ansible-role-composer.git' assert role['name'] == 'ansible-role-composer' assert role['scm'] == 'git' assert role['version'] is None def test_git_version_role_url(): role = RoleRequirement.role_yaml_parse('git+https://github.com/geerlingguy/ansible-role-composer.git,main') assert role['src'] == 'https://github.com/geerlingguy/ansible-role-composer.git' assert role['name'] == 'ansible-role-composer' assert role['scm'] == 'git' assert role['version'] == 'main' @pytest.mark.parametrize("url", [ ('https://some.webserver.example.com/files/main.tar.gz'), ('https://some.webserver.example.com/files/main.tar.bz2'), ('https://some.webserver.example.com/files/main.tar.xz'), ]) def test_tar_role_url(url): role = RoleRequirement.role_yaml_parse(url) assert role['src'] == url assert role['name'].startswith('main') assert role['scm'] is None assert role['version'] is None def test_git_ssh_role_url(): role = RoleRequirement.role_yaml_parse('[email protected]:mygroup/ansible-base.git') assert role['src'] == '[email protected]:mygroup/ansible-base.git' assert role['name'].startswith('ansible-base') assert role['scm'] is None assert role['version'] is None def test_token_role_url(): role = RoleRequirement.role_yaml_parse('git+https://gitlab+deploy-token-312644:[email protected]/akasurde/ansible-demo') assert role['src'] == 'https://gitlab+deploy-token-312644:[email protected]/akasurde/ansible-demo' assert role['name'].startswith('ansible-demo') assert role['scm'] == 'git' assert role['version'] is None def test_token_new_style_role_url(): role = RoleRequirement.role_yaml_parse({"src": "git+https://gitlab+deploy-token-312644:[email protected]/akasurde/ansible-demo"}) assert role['src'] == 'https://gitlab+deploy-token-312644:[email protected]/akasurde/ansible-demo' assert role['name'].startswith('ansible-demo') assert role['scm'] == 'git' assert role['version'] == ''
37.876404
147
0.703055
452
3,371
5.099558
0.196903
0.156182
0.056399
0.101518
0.811714
0.801302
0.767896
0.711931
0.597831
0.553145
0
0.016046
0.131118
3,371
88
148
38.306818
0.770912
0.052803
0
0.412698
0
0.063492
0.389586
0.060853
0
0
0
0
0.571429
1
0.142857
false
0
0.047619
0
0.190476
0.015873
0
0
0
null
0
0
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
a87bc89cece67977cd20dc1b823775f648a5a6ea
238
py
Python
glue/core/exceptions.py
HPLegion/glue
1843787ccb4de852dfe103ff58473da13faccf5f
[ "BSD-3-Clause" ]
550
2015-01-08T13:51:06.000Z
2022-03-31T11:54:47.000Z
glue/core/exceptions.py
HPLegion/glue
1843787ccb4de852dfe103ff58473da13faccf5f
[ "BSD-3-Clause" ]
1,362
2015-01-03T19:15:52.000Z
2022-03-30T13:23:11.000Z
glue/core/exceptions.py
HPLegion/glue
1843787ccb4de852dfe103ff58473da13faccf5f
[ "BSD-3-Clause" ]
142
2015-01-08T13:08:00.000Z
2022-03-18T13:25:57.000Z
class IncompatibleAttribute(Exception): pass class IncompatibleDataException(Exception): pass class UndefinedROI(Exception): pass class InvalidSubscriber(Exception): pass class InvalidMessage(Exception): pass
11.9
43
0.752101
20
238
8.95
0.4
0.363128
0.402235
0
0
0
0
0
0
0
0
0
0.184874
238
19
44
12.526316
0.92268
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
a89a91a25c7d71cc53adcd1adb0b082bb4aacfbb
59
py
Python
config.py
retry0/botTelegram-nCov
e75cac6db6ca0f3e8394c82a612bd150fc9c9d44
[ "MIT" ]
null
null
null
config.py
retry0/botTelegram-nCov
e75cac6db6ca0f3e8394c82a612bd150fc9c9d44
[ "MIT" ]
null
null
null
config.py
retry0/botTelegram-nCov
e75cac6db6ca0f3e8394c82a612bd150fc9c9d44
[ "MIT" ]
null
null
null
api_key = "1108029941:AAGkHMtVFPT1-SsL5dRZn7gR65hxWD9-HH0"
29.5
58
0.847458
6
59
8.166667
1
0
0
0
0
0
0
0
0
0
0
0.303571
0.050847
59
1
59
59
0.571429
0
0
0
0
0
0.779661
0.779661
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5