hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27e6185cd1321c58ae5c06b94cfd558705c422cd
| 365
|
py
|
Python
|
Divergence analysis/splitreference.py
|
MarniTausen/CloverAnalysisPipeline
|
ae169b46c7be40cdf0d97101480be12df87fc58e
|
[
"Unlicense"
] | 4
|
2018-03-26T08:54:50.000Z
|
2021-07-28T13:34:07.000Z
|
Divergence analysis/splitreference.py
|
MarniTausen/CloverAnalysisPipeline
|
ae169b46c7be40cdf0d97101480be12df87fc58e
|
[
"Unlicense"
] | null | null | null |
Divergence analysis/splitreference.py
|
MarniTausen/CloverAnalysisPipeline
|
ae169b46c7be40cdf0d97101480be12df87fc58e
|
[
"Unlicense"
] | 4
|
2017-10-26T12:59:39.000Z
|
2021-07-12T08:40:56.000Z
|
from sys import argv
def make_new_reference_files(filename, sub1, sub2, divider=">chr9"):
genomes = open(filename).read().split(divider)
f = open(sub1, "w")
f.write(genomes[0])
f.close()
f = open(sub2, "w")
f.write(">chr9"+genomes[1])
f.close()
if __name__=="__main__":
make_new_reference_files(argv[1], argv[2], argv[3], argv[4])
| 26.071429
| 68
| 0.635616
| 56
| 365
| 3.892857
| 0.535714
| 0.06422
| 0.146789
| 0.192661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039867
| 0.175342
| 365
| 13
| 69
| 28.076923
| 0.684385
| 0
| 0
| 0.181818
| 0
| 0
| 0.054795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
27ea8174cb81713dd5c70d96704d5a2c63cec32e
| 325
|
py
|
Python
|
server/dev.py
|
Khanable/Photography-Portfolio-Website
|
5019e8316e078dcb672888dd847fdd6b732443a9
|
[
"MIT"
] | null | null | null |
server/dev.py
|
Khanable/Photography-Portfolio-Website
|
5019e8316e078dcb672888dd847fdd6b732443a9
|
[
"MIT"
] | null | null | null |
server/dev.py
|
Khanable/Photography-Portfolio-Website
|
5019e8316e078dcb672888dd847fdd6b732443a9
|
[
"MIT"
] | null | null | null |
from sys import modules
from importlib import import_module
modules['server'] = import_module('src')
from werkzeug.serving import run_simple
from server.app import App
from server.mode import Mode
if __name__=='__main__':
app = App(mode=Mode.Development)
run_simple('localhost', 8000, app, use_reloader=True)
| 27.083333
| 55
| 0.76
| 46
| 325
| 5.086957
| 0.5
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01444
| 0.147692
| 325
| 11
| 56
| 29.545455
| 0.830325
| 0
| 0
| 0
| 0
| 0
| 0.082803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
27f5b22f4011155a67ce267a26bf5d2d27c8298e
| 6,955
|
py
|
Python
|
adlmagics/adlmagics/adlmagics_main.py
|
Azure/Azure-Data-Service-Notebook
|
6bd28587c9fa0a7c1f9113f638b790b1773c5585
|
[
"MIT"
] | 6
|
2018-06-06T08:37:53.000Z
|
2020-06-01T13:13:13.000Z
|
adlmagics/adlmagics/adlmagics_main.py
|
Azure/Azure-Data-Service-Notebook
|
6bd28587c9fa0a7c1f9113f638b790b1773c5585
|
[
"MIT"
] | 30
|
2018-06-08T02:47:18.000Z
|
2018-07-25T07:07:07.000Z
|
adlmagics/adlmagics/adlmagics_main.py
|
Azure/Azure-Data-Service-Notebook
|
6bd28587c9fa0a7c1f9113f638b790b1773c5585
|
[
"MIT"
] | 5
|
2018-06-06T08:37:55.000Z
|
2021-01-07T09:15:15.000Z
|
from IPython.core.magic import Magics, magics_class, line_cell_magic
from sys import stdout
from os import linesep
from os.path import join, expanduser
from adlmagics.version import adlmagics_version
from adlmagics.converters.dataframe_converter import DataFrameConverter
from adlmagics.utils.json_file_persister import JsonFilePersister
from adlmagics.utils.ipshell_result_receiver import IPShellResultReceiver
from adlmagics.presenters.presenter_base import PresenterBase
from adlmagics.presenters.text_presenter import TextPresenter
from adlmagics.presenters.adla_job_presenter import AdlaJobPresenter
from adlmagics.presenters.adla_jobs_presenter import AdlaJobsPresenter
from adlmagics.presenters.adls_files_presenter import AdlsFilesPresenter
from adlmagics.presenters.adls_folders_presenter import AdlsFoldersPresenter
from adlmagics.services.azure_token_service import AzureTokenService
from adlmagics.services.adla_service_sdk_impl import AdlaServiceSdkImpl
from adlmagics.services.adls_service_sdk_impl import AdlsServiceSdkImpl
from adlmagics.services.session_service import SessionService
from adlmagics.services.presenter_factory import PresenterFactory
from adlmagics.magics.session.session_magic_base import SessionMagicBase
from adlmagics.magics.session.session_viewing_magic import SessionViewingMagic
from adlmagics.magics.session.session_item_setting_magic import SessionItemSettingMagic
from adlmagics.magics.azure.azure_magic_base import AzureMagicBase
from adlmagics.magics.azure.azure_login_magic import AzureLoginMagic
from adlmagics.magics.azure.azure_logout_magic import AzureLogoutMagic
from adlmagics.magics.adla.adla_magic_base import AdlaMagicBase
from adlmagics.magics.adla.adla_accounts_listing_magic import AdlaAccountsListingMagic
from adlmagics.magics.adla.adla_job_viewing_magic import AdlaJobViewingMagic
from adlmagics.magics.adla.adla_job_submission_magic import AdlaJobSubmissionMagic
from adlmagics.magics.adla.adla_jobs_listing_magic import AdlaJobsListingMagic
from adlmagics.magics.adls.adls_magic_base import AdlsMagicBase
from adlmagics.magics.adls.adls_accounts_listing_magic import AdlsAccountsListingMagic
from adlmagics.magics.adls.adls_folders_listing_magic import AdlsFoldersListingMagic
from adlmagics.magics.adls.adls_files_listing_magic import AdlsFilesListingMagic
from adlmagics.magics.adls.adls_file_sampling_magic import AdlsFileSamplingMagic
@magics_class
class AdlMagics(Magics):
def __init__(self, shell, data = None):
super(AdlMagics, self).__init__(shell)
self.__session_service = SessionService(JsonFilePersister(join(expanduser("~"), "adlmagics.session"), "utf-8"))
self.__presenter_factory = PresenterFactory()
self.__register_presenter(TextPresenter)
self.__register_presenter(AdlaJobPresenter)
self.__register_presenter(AdlaJobsPresenter)
self.__register_presenter(AdlsFilesPresenter)
self.__register_presenter(AdlsFoldersPresenter)
self.__token_service = AzureTokenService(self.__presenter_factory)
self.__adla_service = AdlaServiceSdkImpl(self.__token_service)
self.__adls_service = AdlsServiceSdkImpl(self.__token_service)
self.__initialize_magics()
self.__write_line("%s %s initialized" % (AdlMagics.__name__, adlmagics_version))
@line_cell_magic
def adl(self, line, cell = ""):
cmd = line.strip()
arg_string = ""
try:
cmd_end_index = cmd.index(" ")
cmd = cmd[0:cmd_end_index].strip().lower()
arg_string = line[cmd_end_index:].strip()
except:
pass
if (cmd not in self.__magics):
raise ValueError("Unsupported command '%s'" % cmd)
magic = self.__magics[cmd]
return magic.execute(arg_string, cell)
def __register_presenter(self, presenter_class):
if (not issubclass(presenter_class, PresenterBase)):
raise TypeError("%s not a presenter class." % (presenter_class.__name__))
presenter = presenter_class()
self.__presenter_factory.register_presenter(presenter)
def __initialize_magics(self):
df_converter = DataFrameConverter()
self.__magics = dict()
self.__register_session_magic(SessionViewingMagic)
self.__register_session_magic(SessionItemSettingMagic)
self.__register_azure_magic(AzureLoginMagic)
self.__register_azure_magic(AzureLogoutMagic)
self.__register_adla_magic(AdlaAccountsListingMagic, df_converter)
self.__register_adla_magic(AdlaJobViewingMagic, df_converter)
self.__register_adla_magic(AdlaJobsListingMagic, df_converter)
adla_job_submission_magic = AdlaJobSubmissionMagic(self.__session_service, self.__presenter_factory, df_converter, IPShellResultReceiver(), self.__adla_service)
self.__magics[adla_job_submission_magic.cmd_name.lower()] = adla_job_submission_magic
self.__register_adls_magic(AdlsAccountsListingMagic, df_converter)
self.__register_adls_magic(AdlsFoldersListingMagic, df_converter)
self.__register_adls_magic(AdlsFilesListingMagic, df_converter)
self.__register_adls_magic(AdlsFileSamplingMagic, df_converter)
def __register_session_magic(self, session_magic_class):
if (not issubclass(session_magic_class, SessionMagicBase)):
raise TypeError("%s not a session magic class." % (session_magic_class.__name__))
session_magic = session_magic_class(self.__session_service, self.__presenter_factory)
self.__magics[session_magic.cmd_name.lower()] = session_magic
def __register_azure_magic(self, azure_magic_class):
if (not issubclass(azure_magic_class, AzureMagicBase)):
raise TypeError("%s not a azure magic class." % (azure_magic_class.__name__))
azure_magic = azure_magic_class(self.__session_service, self.__presenter_factory, self.__token_service)
self.__magics[azure_magic.cmd_name.lower()] = azure_magic
def __register_adla_magic(self, adla_magic_class, result_converter):
if (not issubclass(adla_magic_class, AdlaMagicBase)):
raise TypeError("%s not a adla magic class." % (adla_magic_class.__name__))
adla_magic = adla_magic_class(self.__session_service, self.__presenter_factory, result_converter, self.__adla_service)
self.__magics[adla_magic.cmd_name.lower()] = adla_magic
def __register_adls_magic(self, adls_magic_class, result_converter):
if (not issubclass(adls_magic_class, AdlsMagicBase)):
raise TypeError("%s not a adls magic class." % (adls_magic_class.__name__))
adls_magic = adls_magic_class(self.__session_service, self.__presenter_factory, result_converter, self.__adls_service)
self.__magics[adls_magic.cmd_name.lower()] = adls_magic
def __write_line(self, text):
stdout.write(text + linesep)
| 48.298611
| 168
| 0.78404
| 790
| 6,955
| 6.439241
| 0.159494
| 0.079222
| 0.05976
| 0.022607
| 0.23963
| 0.123845
| 0.062512
| 0.046786
| 0.046786
| 0.026342
| 0
| 0.000337
| 0.146801
| 6,955
| 144
| 169
| 48.298611
| 0.856926
| 0
| 0
| 0
| 0
| 0
| 0.028465
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.009259
| 0.324074
| 0
| 0.425926
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
7e03585ae9ededa10d0e3ad01e0e054a8d2b1e4e
| 1,998
|
py
|
Python
|
tests/summary/test_binning_config.py
|
rob-tay/fast-carpenter
|
a8b128ba00b9a6808b2f0de40cefa2a360466897
|
[
"Apache-2.0"
] | null | null | null |
tests/summary/test_binning_config.py
|
rob-tay/fast-carpenter
|
a8b128ba00b9a6808b2f0de40cefa2a360466897
|
[
"Apache-2.0"
] | null | null | null |
tests/summary/test_binning_config.py
|
rob-tay/fast-carpenter
|
a8b128ba00b9a6808b2f0de40cefa2a360466897
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import fast_carpenter.summary.binning_config as mgr
from . import dummy_binning_descriptions as binning
def test_create_one_region():
cfg = {"_" + k: v for k, v in binning.bins_nmuon.items()}
_in, _out, _bins, _index = mgr.create_one_dimension("test_create_one_region", **cfg)
assert _in == "NMuon"
assert _out == "nmuon"
assert _index is None
assert _bins is None
def test_create_one_dimension_aT():
cfg = {"_" + k: v for k, v in binning.bins_met_px.items()}
_in, _out, _bins, _index = mgr.create_one_dimension("test_create_one_dimension_aT", **cfg)
assert _in == "MET_px"
assert _out == "met_px"
assert _index is None
assert isinstance(_bins, np.ndarray)
assert np.all(_bins[1:-1] == np.linspace(0, 100, 11))
assert _bins[0] == float("-inf")
assert _bins[-1] == float("inf")
def test_create_one_dimension_HT():
cfg = {"_" + k: v for k, v in binning.bins_py.items()}
_in, _out, _bins, _index = mgr.create_one_dimension("test_create_one_dimension_HT", **cfg)
assert _in == "Jet_Py"
assert _out == "py_leadJet"
assert _index == 0
assert isinstance(_bins, np.ndarray)
assert np.all(_bins[1:-1] == [0, 20, 100])
assert _bins[0] == float("-inf")
assert _bins[-1] == float("inf")
def test_create_binning_list():
ins, outs, bins = mgr.create_binning_list("test_create_binning_list", [binning.bins_nmuon, binning.bins_met_px])
assert ins == ["NMuon", "MET_px"]
assert outs == ["nmuon", "met_px"]
assert len(bins) == 2
assert bins[0] is None
def test_create_weights_list():
name = "test_create_weights_list"
weights = mgr.create_weights(name, binning.weight_list)
assert len(weights) == 1
assert weights["EventWeight"] == "EventWeight"
def test_create_weights_dict():
name = "test_create_weights_dict"
weights = mgr.create_weights(name, binning.weight_dict)
assert len(weights) == 1
assert weights["weighted"] == "EventWeight"
| 33.864407
| 116
| 0.68018
| 292
| 1,998
| 4.304795
| 0.208904
| 0.095465
| 0.100239
| 0.070008
| 0.575975
| 0.499602
| 0.413683
| 0.35004
| 0.35004
| 0.295147
| 0
| 0.015347
| 0.184685
| 1,998
| 58
| 117
| 34.448276
| 0.756292
| 0
| 0
| 0.217391
| 0
| 0
| 0.134134
| 0.075075
| 0
| 0
| 0
| 0
| 0.565217
| 1
| 0.130435
| false
| 0
| 0.065217
| 0
| 0.195652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7e09cc367a70ac9496d060fdad8e3eb6e83f2472
| 141
|
py
|
Python
|
code/tenka1_2019_c_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | 3
|
2019-08-16T16:55:48.000Z
|
2021-04-11T10:21:40.000Z
|
code/tenka1_2019_c_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
code/tenka1_2019_c_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
input()
S=input()
dot=S.count(".")
ans=dot
count=0
for s in S:
if s=="#":count+=1
else:dot-=1
ans=(min(ans,count+dot))
print(ans)
| 14.1
| 28
| 0.574468
| 28
| 141
| 2.892857
| 0.464286
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025862
| 0.177305
| 141
| 10
| 29
| 14.1
| 0.672414
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7e0e399837934c037868f72f1f2ece1fe8884d6e
| 328
|
py
|
Python
|
blog/urls.py
|
Halo-Developers/Halo-Learn
|
4c8f9e395c0145df39fa3333fefa23d02a370688
|
[
"MIT"
] | 1
|
2021-09-23T16:02:51.000Z
|
2021-09-23T16:02:51.000Z
|
blog/urls.py
|
kuyesu/Halo-Learn
|
abd60d45c191297daedd20b3b308a30a78cba9c7
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
kuyesu/Halo-Learn
|
abd60d45c191297daedd20b3b308a30a78cba9c7
|
[
"MIT"
] | 2
|
2021-09-20T09:50:45.000Z
|
2022-02-20T06:42:42.000Z
|
from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.post_list, name='post_list'),
path('<slug:post>/',views.post_detail,name="post_detail"),
path('comment/reply/', views.reply_page, name="reply"),
path('tag/<slug:tag_slug>/',views.post_list, name='post_tag'),
]
| 27.333333
| 68
| 0.67378
| 47
| 328
| 4.510638
| 0.382979
| 0.127358
| 0.122642
| 0.160377
| 0.198113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134146
| 328
| 11
| 69
| 29.818182
| 0.746479
| 0
| 0
| 0
| 0
| 0
| 0.253049
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7e11a4d77fb343f77a786063eff503d5200a6c2d
| 1,313
|
py
|
Python
|
126-Word_Ladder_II.py
|
QuenLo/leecode
|
ce861103949510dc54fd5cb336bd992c40748de2
|
[
"MIT"
] | 6
|
2018-06-13T06:48:42.000Z
|
2020-11-25T10:48:13.000Z
|
126-Word_Ladder_II.py
|
QuenLo/leecode
|
ce861103949510dc54fd5cb336bd992c40748de2
|
[
"MIT"
] | null | null | null |
126-Word_Ladder_II.py
|
QuenLo/leecode
|
ce861103949510dc54fd5cb336bd992c40748de2
|
[
"MIT"
] | null | null | null |
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
tree, words, len_w = collections.defaultdict(set), set( wordList ), len(beginWord)
if endWord not in words:
return []
found, q, nextq = False, {beginWord}, set()
while q and not found:
words -= set(q)
for x in q:
# a -> z
for char in string.ascii_lowercase:
for i in range(len_w):
test = x[:i] + char + x[i+1:]
if test == endWord:
found = True
tree[x].add(test)
elif test in words:
nextq.add(test)
tree[x].add(test)
q, nextq = nextq, set()
def back(x):
if x == endWord:
return [[x]]
else:
ans = []
for test in tree[x]:
for y in back(test):
ans.append( [x] + y )
return ans
# [[x]] if x == endWord else [[x] + rest for y in tree[x] for rest in bt(y)]
return back(beginWord)
| 35.486486
| 96
| 0.393755
| 141
| 1,313
| 3.64539
| 0.347518
| 0.038911
| 0.031128
| 0.046693
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001538
| 0.504951
| 1,313
| 36
| 97
| 36.472222
| 0.789231
| 0.061691
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fd684e3bf1de0c4b9c2f1d5a15a6a2d42e862075
| 286
|
py
|
Python
|
output/models/nist_data/list_pkg/unsigned_short/schema_instance/nistschema_sv_iv_list_unsigned_short_min_length_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/list_pkg/unsigned_short/schema_instance/nistschema_sv_iv_list_unsigned_short_min_length_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/list_pkg/unsigned_short/schema_instance/nistschema_sv_iv_list_unsigned_short_min_length_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.nist_data.list_pkg.unsigned_short.schema_instance.nistschema_sv_iv_list_unsigned_short_min_length_2_xsd.nistschema_sv_iv_list_unsigned_short_min_length_2 import NistschemaSvIvListUnsignedShortMinLength2
__all__ = [
"NistschemaSvIvListUnsignedShortMinLength2",
]
| 47.666667
| 221
| 0.905594
| 34
| 286
| 6.882353
| 0.617647
| 0.166667
| 0.119658
| 0.153846
| 0.350427
| 0.350427
| 0.350427
| 0.350427
| 0.350427
| 0
| 0
| 0.014706
| 0.048951
| 286
| 5
| 222
| 57.2
| 0.845588
| 0
| 0
| 0
| 0
| 0
| 0.143357
| 0.143357
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fd6f10a9e5cd95371737b186d651e8e464b2660c
| 389
|
py
|
Python
|
examples/urls.py
|
sayanjap/DynamicForms
|
071707de36d109fe3a17ae5df239240ea5ba707f
|
[
"BSD-3-Clause"
] | 42
|
2018-01-18T14:50:05.000Z
|
2022-03-24T18:34:19.000Z
|
examples/urls.py
|
sayanjap/DynamicForms
|
071707de36d109fe3a17ae5df239240ea5ba707f
|
[
"BSD-3-Clause"
] | 14
|
2018-12-05T21:39:23.000Z
|
2022-02-27T06:43:48.000Z
|
examples/urls.py
|
sayanjap/DynamicForms
|
071707de36d109fe3a17ae5df239240ea5ba707f
|
[
"BSD-3-Clause"
] | 5
|
2018-01-18T16:32:20.000Z
|
2021-06-07T10:15:18.000Z
|
from django.conf.urls import include, url
from rest_framework.documentation import include_docs_urls
from examples.rest import router
from .views import index
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^', include(router.urls)),
url(r'^dynamicforms/', include('dynamicforms.urls')),
url(r'^api-docs/', include_docs_urls(title='Example API documentation')),
]
| 29.923077
| 77
| 0.722365
| 52
| 389
| 5.307692
| 0.423077
| 0.057971
| 0.108696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128535
| 389
| 12
| 78
| 32.416667
| 0.814159
| 0
| 0
| 0
| 0
| 0
| 0.190231
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
fd6fc2f8e9fb0cf4963f53e0dd218bc472fd9daa
| 4,572
|
py
|
Python
|
passmanBackend/vault_backend/models.py
|
sharanvarma0/passman-backend
|
d210fcc43886bd9be40ceaba3411209799cb8476
|
[
"BSD-3-Clause"
] | null | null | null |
passmanBackend/vault_backend/models.py
|
sharanvarma0/passman-backend
|
d210fcc43886bd9be40ceaba3411209799cb8476
|
[
"BSD-3-Clause"
] | null | null | null |
passmanBackend/vault_backend/models.py
|
sharanvarma0/passman-backend
|
d210fcc43886bd9be40ceaba3411209799cb8476
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Mostly these are internal imports related to django and rest_framework.
The os and io imports are for creating files, paths and parsing bytes objects respectively
'''
from django.db import models
from django.contrib.auth.models import User
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from vault_backend.extra_functions import *
import os
import io
'''
The Vault model represents the basic password vault in passman. This model will store the directory path, filename and vault_name specified. This is linked to the User model for only displaying vaults belonging
to the authenticated user. The Vault model is later referenced in different places for creating and updating records stored in it.
'''
class Vault(models.Model):
number = models.IntegerField(primary_key=True)
username = models.ForeignKey(User, on_delete=models.CASCADE)
vault_name = models.CharField(max_length=200, unique=True)
directory = models.CharField(max_length=200, default='/home/sharan/.vaults')
filename = models.CharField(max_length=200, default="vault")
# Create a new vault as a file in specified directory for future use and store of encrypted passwords.
def create_vault(self):
default_directory = self.directory
default_filename = self.filename
if not os.path.exists(default_directory):
os.mkdir(default_directory)
file = open(default_directory + '/' + default_filename, 'w').close()
def check_data(self, term, data):
if term in data:
return True
return False
# adding passwords to the vault file after encrypting them
def add_data(self, sitename, password):
try:
user = self.username
key = generate_key(user)
default_directory = self.directory
default_filename = self.filename
arr_of_passwords = self.get_data()
print(arr_of_passwords)
if arr_of_passwords == '':
arr_of_passwords = [] # passwords stored as a JSON array for easy future retrieval and storage
write_descriptor = open(default_directory + '/' + default_filename, 'w')
write_data = {'site_name': sitename, 'password': password}
if self.check_data(write_data, arr_of_passwords):
return 2
arr_of_passwords.append(write_data)
write_data = JSONRenderer().render(arr_of_passwords)
encrypted_data = encrypt_data(key, write_data) # this encrypt_data function is defined in extra_functions module. It takes some data and encrypts it using cryptography.fernet (refer cryptography.fernet module).
write_descriptor.write(encrypted_data)
write_descriptor.close()
return 0
except:
if (write_descriptor):
write_descriptor.close()
return 1
# read data from the vault file and decrypt them before dispatch
def get_data(self):
try:
user = self.username
key = generate_key(user)
default_directory = self.directory
default_filename = self.filename
read_descriptor = open(default_directory + '/' + default_filename, 'r')
data = read_descriptor.read()
if data == '':
read_descriptor.close()
return data
read_data = io.BytesIO(decrypt_data(key, data)) # the decrypt_data function is defined in extra_functions module. It decrypts data given by generating a fernet key from PBKDF2 using user creds.
json_read_data = JSONParser().parse(read_data)
read_descriptor.close()
return json_read_data
except:
read_descriptor.close()
return 1
# Delete Record functionality in vault.Not tested delete functionality yet. Might implement in future.
''' def delete_data(self, sitename, password):
try:
delete_data = {'site_name':sitename, 'password':password}
data = self.get_data()
if self.check_data(delete_data, data):
data.remove(delete_data)
if data:
for dictionary_data in data:
self.add_data(dictionary_data['site_name'], dictionary_data['password'])
return 0
else:
self.create_vault()
return 0
except ValueError:
return 'No Such Value'
'''
| 41.563636
| 227
| 0.649606
| 548
| 4,572
| 5.25365
| 0.312044
| 0.04446
| 0.03404
| 0.025009
| 0.255297
| 0.208058
| 0.115318
| 0.115318
| 0.095867
| 0.064606
| 0
| 0.004866
| 0.28084
| 4,572
| 109
| 228
| 41.944954
| 0.870742
| 0.188758
| 0
| 0.323077
| 0
| 0
| 0.017429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0.138462
| 0.107692
| 0
| 0.384615
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
fd71ae1315e427ea9c9874263b95024d2ffb8696
| 1,852
|
py
|
Python
|
api/myapi/serializers.py
|
UmmuRasul/sbvbn
|
3d4705b9eb5e6bc996028ecc0a8ec43a435ef18b
|
[
"MIT"
] | null | null | null |
api/myapi/serializers.py
|
UmmuRasul/sbvbn
|
3d4705b9eb5e6bc996028ecc0a8ec43a435ef18b
|
[
"MIT"
] | null | null | null |
api/myapi/serializers.py
|
UmmuRasul/sbvbn
|
3d4705b9eb5e6bc996028ecc0a8ec43a435ef18b
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from api.models import User, UserProfile, Post, News, Video
from datetime import datetime
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = ('address', 'country', 'city', 'zip', 'photo')
class UserSerializer(serializers.HyperlinkedModelSerializer):
profile = UserProfileSerializer(required=True)
class Meta:
model = User
fields = ('url', 'email', 'first_name', 'last_name', 'password', 'profile')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
profile_data = validated_data.pop('profile')
password = validated_data.pop('password')
user = User(**validated_data)
user.set_password(password)
user.save()
UserProfile.objects.create(user=user, **profile_data)
return user
def update(self, instance, validated_data):
profile_data = validated_data.pop('profile')
profile = instance.profile
instance.email = validated_data.get('email', instance.email)
instance.save()
profile.address = profile_data.get('address', profile.address)
profile.country = profile_data.get('country', profile.country)
profile.city = profile_data.get('city', profile.city)
profile.zip = profile_data.get('zip', profile.zip)
profile.photo = profile_data.get('photo', profile.photo)
profile.save()
return instance
class PostSerializer(serializers.Serializer):
class Meta:
model = Post
fields = '__all__'
class NewSerializer(serializers.Serializer):
class Meta:
model = News
fields = '__all__'
class VideoSerializer(serializers.Serializer):
class Meta:
model = Video
fields = '__all__'
| 31.389831
| 83
| 0.665227
| 193
| 1,852
| 6.212435
| 0.295337
| 0.073395
| 0.058382
| 0.075063
| 0.165972
| 0.078399
| 0.078399
| 0.078399
| 0
| 0
| 0
| 0
| 0.224622
| 1,852
| 59
| 84
| 31.389831
| 0.834958
| 0
| 0
| 0.222222
| 0
| 0
| 0.086346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0.088889
| 0.066667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
fd76b6a6e3bed41850763cc3f44afdab15844d51
| 427
|
py
|
Python
|
wsgi_microservice_middleware/__init__.py
|
presalytics/WSGI-Microservice-Middleware
|
1dfcd1121d25569312d7c605d162cb52f38101e3
|
[
"MIT"
] | 1
|
2020-08-13T05:31:01.000Z
|
2020-08-13T05:31:01.000Z
|
wsgi_microservice_middleware/__init__.py
|
presalytics/WSGI-Microservice-Middleware
|
1dfcd1121d25569312d7c605d162cb52f38101e3
|
[
"MIT"
] | null | null | null |
wsgi_microservice_middleware/__init__.py
|
presalytics/WSGI-Microservice-Middleware
|
1dfcd1121d25569312d7c605d162cb52f38101e3
|
[
"MIT"
] | null | null | null |
import environs
env = environs.Env()
env.read_env()
from wsgi_microservice_middleware.cors import CORSMiddleware
from wsgi_microservice_middleware.request_id import (
RequestIdFilter,
RequestIdMiddleware,
current_request_id,
RequestIdJsonLogFormatter
)
__all__ = [
'CORSMiddleware',
'RequestIdFilter',
'RequestIdMiddleware',
'current_request_id',
'RequestIdJsonLogFormatter'
]
| 17.791667
| 60
| 0.744731
| 36
| 427
| 8.444444
| 0.472222
| 0.088816
| 0.131579
| 0.197368
| 0.493421
| 0.493421
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18267
| 427
| 23
| 61
| 18.565217
| 0.87106
| 0
| 0
| 0
| 0
| 0
| 0.213615
| 0.058685
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fd77738934a082ed69675adc328a1ec23a42bd8b
| 686
|
py
|
Python
|
car_manager.py
|
njiang6/turtle_crossing
|
5445ca941bc53002299c60a0587d84f8a111f1be
|
[
"Apache-2.0"
] | 1
|
2021-03-24T02:21:03.000Z
|
2021-03-24T02:21:03.000Z
|
car_manager.py
|
njiang6/turtle_crossing
|
5445ca941bc53002299c60a0587d84f8a111f1be
|
[
"Apache-2.0"
] | null | null | null |
car_manager.py
|
njiang6/turtle_crossing
|
5445ca941bc53002299c60a0587d84f8a111f1be
|
[
"Apache-2.0"
] | null | null | null |
import turtle as t
import random
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 1
def next_level():
global STARTING_MOVE_DISTANCE
STARTING_MOVE_DISTANCE += MOVE_INCREMENT
class CarManager(t.Turtle):
def __init__(self):
super().__init__()
self.penup()
self.setheading(180)
self.y = round(random.randint(-250, 250) / 10) * 10
self.x = 300
self.goto(self.x, self.y)
self.shape("square")
self.shapesize(stretch_wid=1, stretch_len=2)
self.color(random.choice(COLORS))
def go_forward(self):
self.forward(STARTING_MOVE_DISTANCE)
| 23.655172
| 63
| 0.644315
| 89
| 686
| 4.719101
| 0.550562
| 0.114286
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037807
| 0.228863
| 686
| 28
| 64
| 24.5
| 0.756144
| 0
| 0
| 0
| 0
| 0
| 0.052478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.095238
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fd7a3d5f8bd77ce667a1424c233439cb51d4d806
| 2,032
|
py
|
Python
|
examples/plot_tissue_specific_corrections.py
|
imagejan/starfish
|
adf48f4b30cfdf44ac8c9cc78fc469665ce7d594
|
[
"MIT"
] | null | null | null |
examples/plot_tissue_specific_corrections.py
|
imagejan/starfish
|
adf48f4b30cfdf44ac8c9cc78fc469665ce7d594
|
[
"MIT"
] | null | null | null |
examples/plot_tissue_specific_corrections.py
|
imagejan/starfish
|
adf48f4b30cfdf44ac8c9cc78fc469665ce7d594
|
[
"MIT"
] | null | null | null |
"""
Tissue Corrections
==================
"""
###################################################################################################
# .. _tutorial_removing_autoflourescence:
#
# Removing autofluorescence
# =========================
#
# In addition to the bright spots (signal) that we want to detect, microscopy experiments on tissue
# slices often have a non-zero amount of auto-fluorescence from the cell bodies. This can be mitigated
# by "clearing" strategies whereby tissue lipids and proteins are digested, or computationally by
# estimating and subtracting the background values.
#
# We use the same test image from the previous section to demonstrate how this can work.
#
# Clipping
# --------
# The simplest way to remove background is to set a global, (linear) cut-off and clip out the
# background values.
import starfish
import starfish.data
from starfish.image import Filter
from starfish.types import Axes
experiment: starfish.Experiment = starfish.data.ISS(use_test_data=True)
field_of_view: starfish.FieldOfView = experiment["fov_001"]
image: starfish.ImageStack = field_of_view.get_image("primary")
###################################################################################################
# Next, create the clip filter. Here we clip at the 50th percentile, optimally separates the spots
# from the background
clip_50 = Filter.Clip(p_min=97)
clipped: starfish.ImageStack = clip_50.run(image)
###################################################################################################
# plot both images
import matplotlib.pyplot as plt
import xarray as xr
# get the images
orig_plot: xr.DataArray = image.sel({Axes.CH: 0, Axes.ROUND: 0}).xarray.squeeze()
clip_plot: xr.DataArray = clipped.sel({Axes.CH: 0, Axes.ROUND: 0}).xarray.squeeze()
f, (ax1, ax2) = plt.subplots(ncols=2)
ax1.imshow(orig_plot)
ax1.set_title("original")
ax2.imshow(clip_plot)
ax2.set_title("clipped")
###################################################################################################
#
| 35.034483
| 102
| 0.599902
| 238
| 2,032
| 5.037815
| 0.542017
| 0.017515
| 0.031693
| 0.016681
| 0.055046
| 0.055046
| 0.055046
| 0.055046
| 0.055046
| 0
| 0
| 0.012141
| 0.108268
| 2,032
| 57
| 103
| 35.649123
| 0.649559
| 0.413386
| 0
| 0
| 0
| 0
| 0.03776
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
fd88d6da1a9e31351274cdc8c9d06c97bd2fa421
| 884
|
py
|
Python
|
rl_server/tensorflow/networks/layer_norm.py
|
parilo/tars-rl
|
17595905a0d1bdc90fe3d8f793acb60de96ea897
|
[
"MIT"
] | 9
|
2019-03-11T11:02:12.000Z
|
2022-03-10T12:53:25.000Z
|
rl_server/tensorflow/networks/layer_norm.py
|
parilo/tars-rl
|
17595905a0d1bdc90fe3d8f793acb60de96ea897
|
[
"MIT"
] | 1
|
2021-01-06T20:18:33.000Z
|
2021-01-06T20:19:53.000Z
|
rl_server/tensorflow/networks/layer_norm.py
|
parilo/tars-rl
|
17595905a0d1bdc90fe3d8f793acb60de96ea897
|
[
"MIT"
] | 3
|
2019-01-19T03:32:26.000Z
|
2020-11-29T18:15:57.000Z
|
from tensorflow.python.keras.layers import Layer
from tensorflow.python.keras import backend as K
class LayerNorm(Layer):
def __init__(self, axis=-1, eps=1e-6, **kwargs):
self.axis = axis
self.eps = eps
super(LayerNorm, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[self.axis]
self.gamma = self.add_weight(shape=(input_dim,), initializer="ones", name="gamma")
self.beta = self.add_weight(shape=(input_dim,), initializer="zeros", name="beta")
super(LayerNorm, self).build(input_shape)
def call(self, x):
mean = K.mean(x, axis=self.axis, keepdims=True)
std = K.std(x, axis=self.axis, keepdims=True)
out = self.gamma * (x - mean) / (std + self.eps) + self.beta
return out
def compute_output_shape(self, input_shape):
return input_shape
| 34
| 90
| 0.645928
| 123
| 884
| 4.479675
| 0.357724
| 0.072595
| 0.07078
| 0.090744
| 0.225045
| 0.225045
| 0.134301
| 0
| 0
| 0
| 0
| 0.004348
| 0.219457
| 884
| 25
| 91
| 35.36
| 0.794203
| 0
| 0
| 0
| 0
| 0
| 0.020362
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210526
| false
| 0
| 0.105263
| 0.052632
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fd89a08e7b302e42c7342a9d210047fac1b9c3fb
| 3,399
|
py
|
Python
|
hasdrubal/visitors/type_var_resolver.py
|
Armani-T/hasdrubal
|
7fac381b866114533e589e964ec7c27adbd1deff
|
[
"MIT"
] | 2
|
2021-06-25T15:46:16.000Z
|
2022-02-20T22:04:36.000Z
|
hasdrubal/visitors/type_var_resolver.py
|
Armani-T/hasdrubal
|
7fac381b866114533e589e964ec7c27adbd1deff
|
[
"MIT"
] | 42
|
2021-07-06T05:38:23.000Z
|
2022-03-04T18:09:30.000Z
|
hasdrubal/visitors/type_var_resolver.py
|
Armani-T/hasdrubal
|
7fac381b866114533e589e964ec7c27adbd1deff
|
[
"MIT"
] | null | null | null |
from typing import Container
from asts.typed import Name as TypedName
from asts import base, visitor, types_ as types
PREDEFINED_TYPES: Container[str] = (
",",
"->",
"Bool",
"Float",
"Int",
"List",
"String",
"Unit",
)
def resolve_type_vars(
node: base.ASTNode,
defined_types: Container[str] = PREDEFINED_TYPES,
) -> base.ASTNode:
"""
Convert `TypeName`s in the AST to `TypeVar`s using `defined_types`
to determine which ones should be converted.
Parameters
----------
node: ASTNode
The AST where `TypeName`s will be searched for.
defined_types: Container[str] = PREDEFINED_TYPES
If a `TypeName` is inside this, it will remain a `TypeName`.
Returns
-------
ASTNode
The AST but with the appropriate `TypeName`s converted to
`TypeVar`s.
"""
resolver = TypeVarResolver(defined_types)
return resolver.run(node)
class TypeVarResolver(visitor.BaseASTVisitor[base.ASTNode]):
"""
Convert undefined `TypeName`s into `TypeVar`s using `defined_types`
as a kind of symbol table to check whether a name should remain
a `TypeName` or be converted to a `TypeVar`.
Attributes
----------
defined_types: Container[str]
The identifiers that are known to actually be type names.
"""
def __init__(self, defined_types: Container[str] = PREDEFINED_TYPES) -> None:
self.defined_types: Container[str] = defined_types
def visit_block(self, node: base.Block) -> base.Block:
return base.Block(node.span, [expr.visit(self) for expr in node.body])
def visit_cond(self, node: base.Cond) -> base.Cond:
return base.Cond(
node.span,
node.pred.visit(self),
node.cons.visit(self),
node.else_.visit(self),
)
def visit_define(self, node: base.Define) -> base.Define:
return base.Define(node.span, node.target.visit(self), node.value.visit(self))
def visit_func_call(self, node: base.FuncCall) -> base.FuncCall:
return base.FuncCall(
node.span,
node.caller.visit(self),
node.callee.visit(self),
)
def visit_function(self, node: base.Function) -> base.Function:
return base.Function(node.span, node.param.visit(self), node.body.visit(self))
def visit_name(self, node: base.Name) -> base.Name:
if isinstance(node, TypedName):
return TypedName(node.span, node.type_.visit(self), node.value)
return node
def visit_scalar(self, node: base.Scalar) -> base.Scalar:
return node
def visit_type(self, node: types.Type) -> types.Type:
if isinstance(node, types.TypeApply):
return types.TypeApply(
node.span,
node.caller.visit(self),
node.callee.visit(self),
)
if isinstance(node, types.TypeName) and node.value not in self.defined_types:
return types.TypeVar(node.span, node.value)
if isinstance(node, types.TypeScheme):
return types.TypeScheme(node.actual_type.visit(self), node.bound_types)
return node
def visit_vector(self, node: base.Vector) -> base.Vector:
return base.Vector(
node.span,
node.vec_type,
[element.visit(self) for element in node.elements],
)
| 31.183486
| 86
| 0.626655
| 424
| 3,399
| 4.938679
| 0.268868
| 0.064947
| 0.045845
| 0.057307
| 0.138968
| 0.099809
| 0.043935
| 0.043935
| 0.043935
| 0.043935
| 0
| 0
| 0.259194
| 3,399
| 108
| 87
| 31.472222
| 0.831612
| 0.208591
| 0
| 0.169231
| 0
| 0
| 0.01121
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169231
| false
| 0
| 0.046154
| 0.107692
| 0.446154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
fd8bcae4d3f693f9766ffc4188aca7183ade4a41
| 980
|
py
|
Python
|
python/leetcode/401.py
|
ParkinWu/leetcode
|
b31312bdefbb2be795f3459e1a76fbc927cab052
|
[
"MIT"
] | null | null | null |
python/leetcode/401.py
|
ParkinWu/leetcode
|
b31312bdefbb2be795f3459e1a76fbc927cab052
|
[
"MIT"
] | null | null | null |
python/leetcode/401.py
|
ParkinWu/leetcode
|
b31312bdefbb2be795f3459e1a76fbc927cab052
|
[
"MIT"
] | null | null | null |
# 二进制手表顶部有 4 个 LED 代表小时(0-11),底部的 6 个 LED 代表分钟(0-59)。
#
# 每个 LED 代表一个 0 或 1,最低位在右侧。
#
#
#
# 例如,上面的二进制手表读取 “3:25”。
#
# 给定一个非负整数 n 代表当前 LED 亮着的数量,返回所有可能的时间。
#
# 案例:
#
# 输入: n = 1
# 返回: ["1:00", "2:00", "4:00", "8:00", "0:01", "0:02", "0:04", "0:08", "0:16", "0:32"]
#
#
# 注意事项:
#
# 输出的顺序没有要求。
# 小时不会以零开头,比如 “01:00” 是不允许的,应为 “1:00”。
# 分钟必须由两位数组成,可能会以零开头,比如 “10:2” 是无效的,应为 “10:02”。
#
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/binary-watch
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
from typing import List
class Solution:
def numOfBin(self, pow: int, n: int) -> [int]:
if n == 0:
return [0]
if pow == 0:
return [1]
if pow <= n:
return []
return list(map(lambda x: x + 2 ** pow, self.numOfBin(pow - 1, n - 1))) + self.numOfBin(pow - 1, n)
def readBinaryWatch(self, num: int) -> List[str]:
pass
if __name__ == '__main__':
s = Solution()
res = s.numOfBin(4, 2)
print(res)
| 19.6
| 107
| 0.542857
| 156
| 980
| 3.358974
| 0.544872
| 0.015267
| 0.057252
| 0.061069
| 0.064886
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093407
| 0.257143
| 980
| 49
| 108
| 20
| 0.626374
| 0.445918
| 0
| 0
| 0
| 0
| 0.015534
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.0625
| 0.0625
| 0
| 0.5
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
fd8c2c7eeb82fbf804fe620863dfe2ae2700ad4d
| 337
|
py
|
Python
|
setup.py
|
xoolive/cartotools
|
2d6217fde9dadcdb860603cd4b33814b99ae451e
|
[
"MIT"
] | 3
|
2018-01-09T10:53:24.000Z
|
2020-06-04T16:04:52.000Z
|
setup.py
|
xoolive/cartotools
|
2d6217fde9dadcdb860603cd4b33814b99ae451e
|
[
"MIT"
] | 1
|
2018-12-16T13:49:06.000Z
|
2019-02-19T20:23:19.000Z
|
setup.py
|
xoolive/cartotools
|
2d6217fde9dadcdb860603cd4b33814b99ae451e
|
[
"MIT"
] | 1
|
2018-01-09T11:00:39.000Z
|
2018-01-09T11:00:39.000Z
|
from setuptools import setup
setup(
name="cartotools",
version="1.2.1",
description="Making cartopy suit my needs",
license="MIT",
packages=[
"cartotools",
"cartotools.crs",
"cartotools.img_tiles",
"cartotools.osm",
],
author="Xavier Olive",
install_requires=['pandas']
)
| 19.823529
| 47
| 0.596439
| 34
| 337
| 5.852941
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012048
| 0.261128
| 337
| 16
| 48
| 21.0625
| 0.787149
| 0
| 0
| 0
| 0
| 0
| 0.362018
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fd8e56403a239049d047cb2ae7cdbf57d5c4d0b6
| 3,144
|
py
|
Python
|
CLE/Module_DeploymentMonitoring/urls.py
|
CherBoon/Cloudtopus
|
41e4b3743d8f5f988373d14937a1ed8dbdf92afb
|
[
"MIT"
] | 3
|
2019-03-04T02:55:25.000Z
|
2021-02-13T09:00:52.000Z
|
CLE/Module_DeploymentMonitoring/urls.py
|
MartinTeo/Cloudtopus
|
ebcc57cdf10ca2b73343f03abb33a12ab9c6edef
|
[
"MIT"
] | 6
|
2019-02-28T08:54:25.000Z
|
2022-03-02T14:57:04.000Z
|
CLE/Module_DeploymentMonitoring/urls.py
|
MartinTeo/Cloudtopus
|
ebcc57cdf10ca2b73343f03abb33a12ab9c6edef
|
[
"MIT"
] | 2
|
2019-02-28T08:52:15.000Z
|
2019-09-24T18:32:01.000Z
|
from django.contrib import admin
from Module_DeploymentMonitoring import views
from django.urls import path,re_path
urlpatterns = [
path('instructor/ITOperationsLab/setup/awskeys/',views.faculty_Setup_GetAWSKeys,name='itopslab_setup_AWSKeys'),
path('instructor/ITOperationsLab/monitor/',views.faculty_Monitor_Base,name='itopslab_monitor'),
path('student/ITOperationsLab/deploy/',views.student_Deploy_Base,name='itopslab_studeploy'),
path('student/ITOperationsLab/deploy/<str:course_title>/2',views.student_Deploy_Upload,name='itopslab_studeployUpload'),
path('student/ITOperationsLab/monitor/',views.student_Monitor_Base,name='itopslab_stumonitor'),
# For adding deployment packages into system
path('instructor/ITOperationsLab/setup/deployment_package/',views.faculty_Setup_GetGitHubLinks,name='dp_list'),
path('instructor/ITOperationsLab/setup/deployment_package/create/', views.faculty_Setup_AddGitHubLinks, name='dp_create'),
path('instructor/ITOperationsLab/setup/deployment_package/<str:course_title>/<str:pk>/update/', views.faculty_Setup_UpdateGitHubLinks, name='dp_update'),
path('instructor/ITOperationsLab/setup/deployment_package/<str:course_title>/<str:pk>/delete/', views.faculty_Setup_DeleteGitHubLinks, name='dp_delete'),
path('instructor/ITOperationsLab/setup/deployment_package/<str:course_title>/delete/all/', views.faculty_Setup_DeleteAllGitHubLinks, name='dp_delete_all'),
# For retrieving and sharing of AMI
path('instructor/ITOperationsLab/setup/',views.faculty_Setup_Base,name='itopslab_setup'),
path('instructor/ITOperationsLab/setup/ami/get/',views.faculty_Setup_GetAMI,name='itopslab_setup_AMI_get'),
path('instructor/ITOperationsLab/setup/ami/accounts/get/',views.faculty_Setup_GetAMIAccounts,name='itopslab_setup_AMI_Accounts_get'),
path('instructor/ITOperationsLab/setup/ami/accounts/share/',views.faculty_Setup_ShareAMI,name='itopslab_setup_AMI_Accounts_share'),
# For standard student deployment page
path('student/ITOperationsLab/deploy/standard/',views.student_Deploy_Standard_Base,name='itopslab_studeploy_standard'),
path('student/ITOperationsLab/deploy/standard/deployment_package/',views.student_Deploy_Standard_GetDeploymentPackages,name='dp_list_student'),
path('student/ITOperationsLab/deploy/standard/account/',views.student_Deploy_Standard_AddAccount,name='itopslab_studeploy_standard_AddAccount'),
path('student/ITOperationsLab/deploy/standard/server/',views.student_Deploy_Standard_GetIPs,name='server_list'),
path('student/ITOperationsLab/deploy/standard/server/create/',views.student_Deploy_Standard_AddIPs,name='server_create'),
path('student/ITOperationsLab/deploy/standard/server/<str:course_title>/<str:pk>/update/',views.student_Deploy_Standard_UpdateIPs,name='server_update'),
path('student/ITOperationsLab/deploy/standard/server/<str:course_title>/<str:pk>/delete/',views.student_Deploy_Standard_DeleteIPs,name='server_delete'),
path('student/ITOperationsLab/deploy/standard/server/<str:course_title>/delete/all/',views.student_Deploy_Standard_DeleteAllIPs,name='server_delete_all'),
]
| 89.828571
| 159
| 0.818702
| 375
| 3,144
| 6.586667
| 0.194667
| 0.090688
| 0.12915
| 0.137652
| 0.392713
| 0.306478
| 0.216599
| 0.159919
| 0.159919
| 0.109312
| 0
| 0.000336
| 0.054071
| 3,144
| 34
| 160
| 92.470588
| 0.830195
| 0.035941
| 0
| 0
| 0
| 0
| 0.533532
| 0.468781
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fd9c0ebbfa6af60ce30b95d414b6de4fd9d68823
| 479
|
py
|
Python
|
api/utils/signal_hup.py
|
AutoCoinDCF/NEW_API
|
f4abc48fff907a0785372b941afcd67e62eec825
|
[
"Apache-2.0"
] | null | null | null |
api/utils/signal_hup.py
|
AutoCoinDCF/NEW_API
|
f4abc48fff907a0785372b941afcd67e62eec825
|
[
"Apache-2.0"
] | null | null | null |
api/utils/signal_hup.py
|
AutoCoinDCF/NEW_API
|
f4abc48fff907a0785372b941afcd67e62eec825
|
[
"Apache-2.0"
] | null | null | null |
import signal
import ConfigParser
def get_config():
conf = ConfigParser.ConfigParser()
conf.read("config.cfg")
name = conf.get("test", "name")
print(name)
def update_config(signum, frame):
print("update config")
get_config()
def ctrl_c(signum, frame):
print("input ctrl c")
exit(1)
# 捕获HUP
signal.signal(signal.SIGHUP, update_config)
# 捕获ctrl+c
signal.signal(signal.SIGINT, ctrl_c)
print("test signal")
get_config()
while True:
pass
| 14.96875
| 43
| 0.682672
| 65
| 479
| 4.923077
| 0.415385
| 0.15
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002558
| 0.183716
| 479
| 31
| 44
| 15.451613
| 0.815857
| 0.029228
| 0
| 0.105263
| 0
| 0
| 0.116883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0.052632
| 0.105263
| 0
| 0.263158
| 0.210526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
fd9ef32448c7395440ff57be12cda76c221914ac
| 12,319
|
py
|
Python
|
maccli/service/instance.py
|
manageacloud/manageacloud-cli
|
1b7d9d5239f9e51f97d0377d223db0f58ca0ca7c
|
[
"MIT"
] | 6
|
2015-09-21T09:02:04.000Z
|
2017-02-08T23:40:18.000Z
|
maccli/service/instance.py
|
manageacloud/manageacloud-cli
|
1b7d9d5239f9e51f97d0377d223db0f58ca0ca7c
|
[
"MIT"
] | 3
|
2015-11-03T01:44:29.000Z
|
2016-03-25T08:36:15.000Z
|
maccli/service/instance.py
|
manageacloud/manageacloud-cli
|
1b7d9d5239f9e51f97d0377d223db0f58ca0ca7c
|
[
"MIT"
] | 4
|
2015-07-06T01:46:13.000Z
|
2019-01-10T23:08:19.000Z
|
import os
import tempfile
import time
import urllib
import urllib.parse
import pexpect
import maccli.dao.api_instance
import maccli.helper.cmd
import maccli.helper.simplecache
import maccli.helper.metadata
from maccli.helper.exception import InstanceDoesNotExistException, InstanceNotReadyException
def list_instances(name_or_ids=None):
"""
List available instances in the account
"""
instances = []
instances_raw = maccli.dao.api_instance.get_list()
if name_or_ids is not None:
# if name_or_ids is string, convert to list
if isinstance(name_or_ids, str):
name_or_ids = [name_or_ids]
for instance_raw in instances_raw:
if instance_raw['servername'] in name_or_ids or instance_raw['id'] in name_or_ids:
instances.append(instance_raw)
else:
instances = instances_raw
return instances
def list_by_infrastructure(name, version):
instances = maccli.dao.api_instance.get_list()
filtered_instances = []
for instance in instances:
if 'metadata' in instance and 'infrastructure' in instance['metadata'] and 'name' in instance['metadata'][
'infrastructure']:
infrastructure_name = instance['metadata']['infrastructure']['name']
else:
infrastructure_name = ""
if 'metadata' in instance and 'infrastructure' in instance['metadata'] and 'version' in instance['metadata'][
'infrastructure']:
infrastructure_version = instance['metadata']['infrastructure']['version']
else:
infrastructure_version = ""
if infrastructure_name == name and infrastructure_version == version:
filtered_instances.append(instance)
return filtered_instances
def ssh_command_instance(instance_id, cmd):
rc, stdout, stderr = -1, "", ""
cache_hash = maccli.helper.simplecache.hash_value(cmd)
cache_key = 'ssh_%s_%s' % (instance_id, cache_hash)
cached_value = maccli.helper.simplecache.get(cache_key) # read from cache
if cached_value is not None:
rc = cached_value['rc']
stdout = cached_value['stdout']
stderr = cached_value['stderr']
else:
instance = maccli.dao.api_instance.credentials(instance_id)
# strict host check
ssh_params = ""
if maccli.disable_strict_host_check:
maccli.logger.debug("SSH String Host Checking disabled")
ssh_params = "-o 'StrictHostKeyChecking no'"
if instance is None:
raise InstanceDoesNotExistException(instance_id)
if not (instance['privateKey'] or instance['password']):
raise InstanceNotReadyException(instance_id)
if instance is not None and (instance['privateKey'] or instance['password']):
if instance['privateKey']:
fd, path = tempfile.mkstemp()
try:
with open(path, "wb") as f:
f.write(bytes(instance['privateKey'], encoding='utf8'))
f.close()
# openssh 7.6, it defaults to a new more secure format.
command = "ssh-keygen -f %s -p -N ''" % f.name
maccli.helper.cmd.run(command)
command = "ssh %s %s@%s -i %s %s" % (ssh_params, instance['user'], instance['ip'], f.name, cmd)
rc, stdout, stderr = maccli.helper.cmd.run(command)
finally:
os.close(fd)
os.remove(path)
else:
""" Authentication with password """
command = "ssh %s %s@%s %s" % (ssh_params, instance['user'], instance['ip'], cmd)
child = pexpect.spawn(command)
(rows, cols) = gettermsize()
child.setwinsize(rows, cols) # set the child to the size of the user's term
i = child.expect(['.* password:', "yes/no", '(.*)'], timeout=60)
if i == 0:
child.sendline(instance['password'])
child.expect(pexpect.EOF, timeout=120)
elif i == 1:
child.sendline("yes")
child.expect('.* password:', timeout=60)
child.sendline(instance['password'])
child.expect(pexpect.EOF, timeout=120)
elif i == 2:
child.expect(pexpect.EOF, timeout=120)
output = child.before
while child.isalive():
time.sleep(0.1)
rc = child.exitstatus
# HACK: we do not really capture stderr
if rc:
stdout = ""
stderr = output
else:
stdout = output
stderr = ""
# save cache
if not rc:
cached_value = {
'rc': rc,
'stdout': stdout,
'stderr': stderr
}
maccli.helper.simplecache.set_value(cache_key, cached_value)
return rc, stdout, stderr
def ssh_interactive_instance(instance_id):
"""
ssh to an existing instance for an interactive session
"""
stdout = None
instance = maccli.dao.api_instance.credentials(instance_id)
# strict host check
ssh_params = ""
maccli.logger.debug("maccli.strict_host_check: " + str(maccli.disable_strict_host_check))
if maccli.disable_strict_host_check:
maccli.logger.debug("SSH String Host Checking disabled")
ssh_params = "-o 'StrictHostKeyChecking no'"
if instance is not None:
if instance['privateKey']:
""" Authentication with private key """
fd, path = tempfile.mkstemp()
try:
with open(path, "wb") as f:
f.write(bytes(instance['privateKey'], encoding='utf8'))
f.close()
command = "ssh %s %s@%s -i %s " % (ssh_params, instance['user'], instance['ip'], f.name)
os.system(command)
finally:
os.close(fd)
os.remove(path)
else:
""" Authentication with password """
command = "ssh %s %s@%s" % (ssh_params, instance['user'], instance['ip'])
child = pexpect.spawn(command)
(rows, cols) = gettermsize()
child.setwinsize(rows, cols) # set the child to the size of the user's term
i = child.expect(['.* password:', "yes/no", '[#\$] '], timeout=60)
if i == 0:
child.sendline(instance['password'])
child.interact()
elif i == 1:
child.sendline("yes")
child.expect('.* password:', timeout=60)
child.sendline(instance['password'])
child.interact()
elif i == 2:
child.sendline("\n")
child.interact()
return stdout
def gettermsize():
""" horrible non-portable hack to get the terminal size to transmit
to the child process spawned by pexpect """
(rows, cols) = os.popen("stty size").read().split() # works on Mac OS X, YMMV
rows = int(rows)
cols = int(cols)
return rows, cols
def create_instance(cookbook_tag, bootstrap, deployment, location, servername, provider, release, release_version,
branch, hardware, lifespan,
environments, hd, port, net, metadata=None, applyChanges=True):
"""
List available instances in the account
"""
return maccli.dao.api_instance.create(cookbook_tag, bootstrap, deployment, location, servername, provider, release,
release_version,
branch, hardware, lifespan, environments, hd, port, net, metadata,
applyChanges)
def destroy_instance(instanceid):
"""
Destroy the server
:param servername:
:return:
"""
return maccli.dao.api_instance.destroy(instanceid)
def credentials(servername, session_id):
"""
Gets the server credentials: public ip, username, password and private key
:param instance_id;
:return:
"""
return maccli.dao.api_instance.credentials(servername, session_id)
def facts(instance_id):
"""
Returns facts about the system
:param instance_id;
:return:
"""
return maccli.dao.api_instance.facts(instance_id)
def log(instance_id, follow):
"""
Returns server logs
:param instance_id;
:return:
"""
if follow:
logs = maccli.dao.api_instance.log_follow(instance_id)
else:
logs = maccli.dao.api_instance.log(instance_id)
return logs
def lifespan(instance_id, amount):
"""
Set new instance lifespan
:param instance_id;
:return:
"""
return maccli.dao.api_instance.update(instance_id, amount)
def update_configuration(cookbook_tag, bootstrap, instance_id, new_metadata=None):
"""
Update server configuration with given cookbook
:param cookbook_tag:
:param instance_id:
:return:
"""
return maccli.dao.api_instance.update_configuration(cookbook_tag, bootstrap, instance_id, new_metadata)
def create_instances_for_role(root, infrastructure, roles, infrastructure_key, quiet):
""" Create all the instances for the given tier """
maccli.logger.debug("Processing infrastructure %s" % infrastructure_key)
roles_created = {}
maccli.logger.debug("Type role")
infrastructure_role = infrastructure['role']
role_raw = roles[infrastructure_role]["instance create"]
metadata = maccli.helper.metadata.metadata_instance(root, infrastructure_key, infrastructure_role, role_raw,
infrastructure)
instances = create_tier(role_raw, infrastructure, metadata, quiet)
roles_created[infrastructure_role] = instances
return roles_created
def create_tier(role, infrastructure, metadata, quiet):
"""
Creates the instances that represents a role in a given infrastructure
:param role:
:param infrastructure:
:return:
"""
lifespan = None
try:
lifespan = infrastructure['lifespan']
except KeyError:
pass
hardware = ""
try:
hardware = infrastructure["hardware"]
except KeyError:
pass
environment = maccli.helper.metadata.get_environment(role, infrastructure)
hd = None
try:
hd = role["hd"]
except KeyError:
pass
configuration = None
try:
configuration = role["configuration"]
except KeyError:
pass
bootstrap = None
try:
bootstrap = role["bootstrap bash"]
except KeyError:
pass
port = None
try:
port = infrastructure["port"]
except KeyError:
pass
net = None
try:
net = infrastructure["net"]
except KeyError:
pass
deployment = None
try:
deployment = infrastructure["deployment"]
except KeyError:
pass
release = None
try:
release = infrastructure["release"]
except KeyError:
pass
release_version = None
try:
release_version = infrastructure["release_version"]
except KeyError:
pass
branch = None
try:
branch = infrastructure["branch"]
except KeyError:
pass
provider = None
try:
provider = infrastructure["provider"]
except KeyError:
pass
instances = []
amount = 1
if 'amount' in infrastructure:
amount = infrastructure['amount']
for x in range(0, amount):
instance = maccli.dao.api_instance.create(configuration, bootstrap, deployment,
infrastructure["location"], infrastructure["name"],
provider, release, release_version, branch, hardware, lifespan,
environment, hd, port, net, metadata, False)
instances.append(instance)
maccli.logger.info("Creating instance '%s'" % (instance['id']))
return instances
| 29.973236
| 119
| 0.583976
| 1,277
| 12,319
| 5.51527
| 0.175411
| 0.031237
| 0.023853
| 0.039756
| 0.39983
| 0.348147
| 0.314213
| 0.302996
| 0.299872
| 0.250887
| 0
| 0.0038
| 0.316341
| 12,319
| 410
| 120
| 30.046341
| 0.832463
| 0.092215
| 0
| 0.403101
| 0
| 0
| 0.082694
| 0.006382
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054264
| false
| 0.085271
| 0.042636
| 0
| 0.151163
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
fdad3ab7cf57f8eea008adff9a2f0ea59bd908a4
| 1,553
|
py
|
Python
|
signup_instagram.py
|
cnfreitax/all_scrapers
|
35597cd3845c64b589cb2937ea7ea70ea4cd3286
|
[
"Apache-2.0"
] | null | null | null |
signup_instagram.py
|
cnfreitax/all_scrapers
|
35597cd3845c64b589cb2937ea7ea70ea4cd3286
|
[
"Apache-2.0"
] | null | null | null |
signup_instagram.py
|
cnfreitax/all_scrapers
|
35597cd3845c64b589cb2937ea7ea70ea4cd3286
|
[
"Apache-2.0"
] | null | null | null |
"""
use of script to create the login method
use a fake account!
"""
from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup as bs
class Login:
def __init__(self, email, password, idPage):
self.browser = webdriver.Chrome('/home/ekoar/chromedriver')
self.email = email
self.password = password
self.idPage = idPage
def signup(self, url):
self.browser.get(url)
sleep(0.5)
emailInput = self.browser.find_element_by_xpath('//input[@class = "_2hvTZ pexuQ zyHYP"]')
passwordInput = self.browser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/article/div/div[1]/div/form/div[3]/div/label/input')
emailInput.send_keys(self.email)
sleep(0.2)
passwordInput.send_keys(self.password)
button = self.browser.find_element_by_xpath('//button[@class = "sqdOP L3NKy y3zKF "]').click()
sleep(4)
buttonpu = self.browser.find_element_by_xpath('//button[@class = "aOOlW HoLwm "]').click()
def searchPage(self):
buttonFind = self.browser.find_element_by_xpath('//input[@class = "XTCLo x3qfX "]')
buttonFind.send_keys(self.idPage)
sleep(0.9)
resultSearch = self.browser.find_element_by_xpath('//a[@class = "yCE8d "]').click()
def scraper(self):
lista_links = []
bsObj = bs(self.browser.page_source, 'html.parser')
sleep(3)
publicacoes = bsObj.find_all('div', {'class':'Nnq7C weEfm'})
lista_links.append(publicacoes)
| 35.295455
| 151
| 0.647778
| 197
| 1,553
| 4.954315
| 0.461929
| 0.101434
| 0.092213
| 0.135246
| 0.221311
| 0.221311
| 0.161885
| 0.161885
| 0
| 0
| 0
| 0.013923
| 0.21378
| 1,553
| 43
| 152
| 36.116279
| 0.785422
| 0.038635
| 0
| 0
| 0
| 0.032258
| 0.213468
| 0.076094
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0.129032
| 0.096774
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
fdae4d589a0bfe5706d084ebb885895cfa2070d3
| 1,479
|
py
|
Python
|
setup.py
|
polishmatt/sputr
|
7611d40090c8115dff69912725efc506414ac47a
|
[
"MIT"
] | 1
|
2017-02-13T23:09:18.000Z
|
2017-02-13T23:09:18.000Z
|
setup.py
|
polishmatt/sputr
|
7611d40090c8115dff69912725efc506414ac47a
|
[
"MIT"
] | 6
|
2017-02-18T20:14:32.000Z
|
2017-09-27T19:07:06.000Z
|
setup.py
|
polishmatt/sputr
|
7611d40090c8115dff69912725efc506414ac47a
|
[
"MIT"
] | null | null | null |
from setuptools import setup
import importlib
version = importlib.import_module('sputr.config').version
setup(
name='sputr',
version=version,
description='Simple Python Unit Test Runner',
long_description="An intuitive command line and Python package interface for Python's unit testing framework.",
author='Matt Wisniewski',
author_email='[email protected]',
license='MIT',
url='https://github.com/polishmatt/sputr',
keywords=['testing'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
],
platforms=['unix','linux'],
packages=[
'sputr'
],
install_requires=[
'click==6.7'
],
entry_points={
'console_scripts': [
'sputr = sputr.cli:cli'
],
},
)
| 31.468085
| 115
| 0.597025
| 143
| 1,479
| 6.132867
| 0.566434
| 0.173318
| 0.22805
| 0.148233
| 0.061574
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013749
| 0.262339
| 1,479
| 46
| 116
| 32.152174
| 0.790101
| 0
| 0
| 0.090909
| 0
| 0
| 0.563895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068182
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fdb1f945768c5695ac9336664448e0864ebfec52
| 4,587
|
py
|
Python
|
oy/models/mixins/polymorphic_prop.py
|
mush42/oy-cms
|
66f2490be7eab9a692a68bb635099ba21d5944ae
|
[
"MIT"
] | 5
|
2019-02-12T08:54:46.000Z
|
2021-03-15T09:22:44.000Z
|
oy/models/mixins/polymorphic_prop.py
|
mush42/oy-cms
|
66f2490be7eab9a692a68bb635099ba21d5944ae
|
[
"MIT"
] | 2
|
2020-04-30T01:27:08.000Z
|
2020-07-16T18:04:16.000Z
|
oy/models/mixins/polymorphic_prop.py
|
mush42/oy-cms
|
66f2490be7eab9a692a68bb635099ba21d5944ae
|
[
"MIT"
] | 3
|
2019-10-16T05:53:31.000Z
|
2021-10-11T09:37:16.000Z
|
# -*- coding: utf-8 -*-
"""
oy.models.mixins.polymorphic_prop
~~~~~~~~~~
Provides helper mixin classes for special sqlalchemy models
:copyright: (c) 2018 by Musharraf Omer.
:license: MIT, see LICENSE for more details.
"""
import sqlalchemy.types as types
from sqlalchemy import literal_column, event
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.ext.declarative import declared_attr
from oy.boot.sqla import db
class ProxiedDictMixin(object):
"""Adds obj[key] access to a mapped class.
This class basically proxies dictionary access to an attribute
called ``_proxied``. The class which inherits this class
should have an attribute called ``_proxied`` which points to a dictionary.
"""
def __len__(self):
return len(self._proxied)
def __iter__(self):
return iter(self._proxied)
def __getitem__(self, key):
return self._proxied[key]
def __contains__(self, key):
return key in self._proxied
def get(self, key):
return self._proxied.get(key)
def __setitem__(self, key, value):
self._proxied[key] = value
def __delitem__(self, key):
del self._proxied[key]
class ImmutableProxiedDictMixin(ProxiedDictMixin):
"""Like :class:`ProxiedDictMixin` but disables the addition of
new keys and deletion of existing ones
"""
def __setitem__(self, key, value):
if key not in self._proxied:
raise AttributeError("Cann't Set Attribute")
self._proxied[key] = value
def __delitem__(self, key):
raise AttributeError("Deleting is not allowed")
class PolymorphicVerticalProperty(object):
"""A key/value pair with polymorphic value storage.
The class which is mapped should indicate typing information
within the "info" dictionary of mapped Column objects.
"""
def __init__(self, key=None, value=None):
self.key = key
self.value = value
@hybrid_property
def value(self):
fieldname, discriminator = self.type_map[self.type]
if fieldname is None:
return None
else:
return getattr(self, fieldname)
@value.setter
def value(self, value):
py_type = type(value)
fieldname, discriminator = self.type_map[py_type]
self.type = discriminator
if fieldname is not None:
setattr(self, fieldname, value)
@value.deleter
def value(self):
self._set_value(None)
@value.comparator
class value(PropComparator):
"""A comparator for .value, builds a polymorphic comparison via CASE.
"""
def __init__(self, cls):
self.cls = cls
def _case(self):
pairs = set(self.cls.type_map.values())
whens = [
(
literal_column("'%s'" % discriminator),
cast(getattr(self.cls, attribute), String),
)
for attribute, discriminator in pairs
if attribute is not None
]
return case(whens, self.cls.type, null())
def __eq__(self, other):
return self._case() == cast(other, String)
def __ne__(self, other):
return self._case() != cast(other, String)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.value)
@event.listens_for(PolymorphicVerticalProperty, "mapper_configured", propagate=True)
def on_new_class(mapper, cls_):
"""Look for Column objects with type info in them, and work up
a lookup table.
"""
info_dict = {}
info_dict[type(None)] = (None, "none")
info_dict["none"] = (None, "none")
for k in mapper.c.keys():
col = mapper.c[k]
if "type" in col.info:
python_type, discriminator = col.info["type"]
if type(python_type) in (list, tuple):
for pty in python_type:
info_dict[pty] = (k, discriminator)
else:
info_dict[python_type] = (k, discriminator)
info_dict[discriminator] = (k, discriminator)
cls_.type_map = info_dict
class DynamicProp(PolymorphicVerticalProperty):
key = db.Column(db.String(128), nullable=False)
type = db.Column(db.String(64))
int_value = db.Column(db.Integer, info={"type": (int, "integer")})
str_value = db.Column(db.Unicode(5120), info={"type": (str, "string")})
bool_value = db.Column(db.Boolean, info={"type": (bool, "boolean")})
| 29.785714
| 84
| 0.625681
| 549
| 4,587
| 5.030965
| 0.320583
| 0.035844
| 0.018103
| 0.016293
| 0.112962
| 0.055757
| 0.055757
| 0.055757
| 0.029689
| 0
| 0
| 0.004168
| 0.267713
| 4,587
| 153
| 85
| 29.980392
| 0.818101
| 0.189448
| 0
| 0.108696
| 0
| 0
| 0.034015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.206522
| false
| 0
| 0.065217
| 0.086957
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fdbbcdb74fb497d41cae48ac2a6300801085c4fc
| 2,053
|
py
|
Python
|
tag.py
|
tom-choi/NLPforPIXIV
|
77e905793c792ec97f196a4da7144456018577c7
|
[
"MIT"
] | 1
|
2022-03-18T09:10:59.000Z
|
2022-03-18T09:10:59.000Z
|
tag.py
|
tom-choi/NLPforPIXIV
|
77e905793c792ec97f196a4da7144456018577c7
|
[
"MIT"
] | null | null | null |
tag.py
|
tom-choi/NLPforPIXIV
|
77e905793c792ec97f196a4da7144456018577c7
|
[
"MIT"
] | null | null | null |
import json
from msilib.schema import Directory
import pandas as pd
import csv
with open('./result-#succubus Drawings, Best Fan Art on pixiv, Japan-1647491083659.json', 'r',encoding="utf-8") as f:
data = json.load(f)
Tags_Directory = {}
print(f"已經捕捉到{len(data)}條信息,準備將tag新增到字典……")
n = len(data)
for i in range(0,n):
#print(f"idNum:{data[i]['idNum']} tag數量:{len(data[i]['tagsWithTransl'])}")
for j in range(0,len(data[i]['tagsWithTransl'])):
if (data[i]['tagsWithTransl'][j] in Tags_Directory):
Tags_Directory[data[i]['tagsWithTransl'][j]] += 1
else:
Tags_Directory[data[i]['tagsWithTransl'][j]] = 1
# print(f"Tags_Directory 字典收錄了以下tags: ")
# for key in Tags_Directory.keys():
# print(f"{key} : {Tags_Directory[key]}")
pd_Tags_Directory = []
pd_Tags_Directory_ID = {}
i = 0
print("製作字典序……")
for key in Tags_Directory.keys():
pd_Tags_Directory.append(key)
pd_Tags_Directory_ID[key] = i
i += 1
with open('./pd_Tags_Directory_ID.json', 'w+',encoding="utf-8") as f:
json.dump(pd_Tags_Directory_ID,f)
print(f"字典序完成(一共 {len(Tags_Directory)} 個tags)")
# for i in range(0,n):
# for j in range(0,len(data[i]['tagsWithTransl'])):
# if (data[i]['tagsWithTransl'][j] in Tags_Directory):
# Tags_Directory[data[i]['tagsWithTransl'][j]] += 1
# else:
# Tags_Directory[data[i]['tagsWithTransl'][j]] = 1
# k = 0
# times = 1
# print(f"進程:統計Tags二元組…… ({k}/{n})")
# Count_Tags = {"kEySSS": pd_Tags_Directory}
# for key in pd_Tags_Directory_ID:
# Count_Tag = [0 for _ in range(len(pd_Tags_Directory_ID))]
# for i in range(0,n):
# if (key not in data[i]['tagsWithTransl']):
# continue
# else:
# for j in range(0,len(data[i]['tagsWithTransl'])):
# Count_Tag[pd_Tags_Directory_ID[data[i]['tagsWithTransl'][j]]] += 1
# Count_Tags[key] = Count_Tag
# k += 1
# if (k % (n//100) == 0):
# print(f"進程:統計Tags二元組…… ({k}/{n}) 已完成{times}%")
# times += 1
# print(f"統計完成!準備輸出excel檔案")
# #print(pd_Tags_Directory)
# df = pd.DataFrame(Count_Tags)
# df.to_csv('trainning.csv')
# print(df)
| 33.112903
| 117
| 0.649781
| 325
| 2,053
| 4.027692
| 0.236923
| 0.228419
| 0.174179
| 0.106952
| 0.38961
| 0.350649
| 0.282659
| 0.244461
| 0.244461
| 0.218487
| 0
| 0.021326
| 0.154895
| 2,053
| 61
| 118
| 33.655738
| 0.719308
| 0.536775
| 0
| 0
| 0
| 0
| 0.271538
| 0.114504
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.115385
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fdc27871983c6fc23e23bf1a61087b70dff46dd6
| 238
|
py
|
Python
|
test_api/movies/urls.py
|
xm4dn355x/drf_test
|
efdc38afa51d259fcb5781c9f8cc52f93e2fd81b
|
[
"MIT"
] | null | null | null |
test_api/movies/urls.py
|
xm4dn355x/drf_test
|
efdc38afa51d259fcb5781c9f8cc52f93e2fd81b
|
[
"MIT"
] | null | null | null |
test_api/movies/urls.py
|
xm4dn355x/drf_test
|
efdc38afa51d259fcb5781c9f8cc52f93e2fd81b
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('movie/', views.MovieListView.as_view()),
path('movie/<int:pk>/', views.MovieDetailView.as_view()),
path('review/', views.ReviewCreateView.as_view()),
]
| 21.636364
| 61
| 0.684874
| 29
| 238
| 5.517241
| 0.551724
| 0.1125
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138655
| 238
| 10
| 62
| 23.8
| 0.780488
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fdc8a16130fbd3365668c8adfa036d8311ee21c2
| 2,205
|
py
|
Python
|
DataBase/Postgres/PostgresTest.py
|
InverseLina/python-practice
|
496d2020916d8096a32131cdedd25a4da7b7735e
|
[
"Apache-2.0"
] | null | null | null |
DataBase/Postgres/PostgresTest.py
|
InverseLina/python-practice
|
496d2020916d8096a32131cdedd25a4da7b7735e
|
[
"Apache-2.0"
] | null | null | null |
DataBase/Postgres/PostgresTest.py
|
InverseLina/python-practice
|
496d2020916d8096a32131cdedd25a4da7b7735e
|
[
"Apache-2.0"
] | null | null | null |
import psycopg2
# encoding=utf-8
__author__ = 'Hinsteny'
def get_conn():
conn = psycopg2.connect(database="hello_db", user="hinsteny", password="welcome", host="127.0.0.1", port="5432")
return conn
def create_table(conn):
cur = conn.cursor()
cur.execute('''CREATE TABLE if not exists COMPANY
(ID INT PRIMARY KEY NOT NULL,
NAME TEXT NOT NULL,
AGE INT NOT NULL,
ADDRESS CHAR(50),
SALARY REAL);''')
conn.commit()
conn.close()
def insert_data(conn):
cur = conn.cursor()
# cur.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \
# VALUES (1, 'Paul', 32, 'California', 20000.00 )")
#
# cur.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \
# VALUES (2, 'Allen', 25, 'Texas', 15000.00 )")
#
# cur.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \
# VALUES (3, 'Teddy', 23, 'Norway', 20000.00 )")
#
# cur.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \
# VALUES (4, 'Mark', 25, 'Rich-Mond ', 65000.00 )")
# conn.commit()
print("Records created successfully")
conn.close()
def select_data(conn):
'''
:param conn:
:return:
'''
cur = conn.cursor()
cur.execute("SELECT id, name, address, salary from COMPANY ORDER BY id ASC;")
rows = cur.fetchall()
for row in rows:
print("ID = ", row[0])
print("NAME = ", row[1])
print("ADDRESS = ", row[2])
print("SALARY = ", row[3], "\n")
print("Operation done successfully")
conn.close()
pass
def update_data(conn):
cur = conn.cursor()
cur.execute("UPDATE COMPANY set SALARY = 50000.00 where ID=1;")
conn.commit()
conn.close()
select_data(get_conn())
pass
def delete_data(conn):
cur = conn.cursor()
cur.execute("DELETE from COMPANY where ID=4;")
conn.commit()
conn.close()
select_data(get_conn())
pass
# Do test
if __name__ == "__main__":
create_table(get_conn())
insert_data(get_conn())
select_data(get_conn())
update_data(get_conn())
delete_data(get_conn())
pass
| 24.230769
| 116
| 0.579592
| 282
| 2,205
| 4.414894
| 0.347518
| 0.04498
| 0.053012
| 0.064257
| 0.360643
| 0.342169
| 0.320482
| 0.253815
| 0.253815
| 0.189558
| 0
| 0.042079
| 0.26712
| 2,205
| 90
| 117
| 24.5
| 0.728342
| 0.239456
| 0
| 0.384615
| 0
| 0
| 0.315277
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0.096154
| 0.019231
| 0
| 0.153846
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
fdd740c29776fedc99f0ae3abe771a8f8e0fe2b3
| 2,781
|
py
|
Python
|
app/routes.py
|
volvo007/flask_microblog
|
b826bcaa4fec6703a66f757e39fdf1fcced031f9
|
[
"Apache-2.0"
] | null | null | null |
app/routes.py
|
volvo007/flask_microblog
|
b826bcaa4fec6703a66f757e39fdf1fcced031f9
|
[
"Apache-2.0"
] | null | null | null |
app/routes.py
|
volvo007/flask_microblog
|
b826bcaa4fec6703a66f757e39fdf1fcced031f9
|
[
"Apache-2.0"
] | null | null | null |
import time
from flask import render_template, flash, redirect, url_for
from flask.globals import request
from flask_login.utils import logout_user
from werkzeug.urls import url_parse
# from flask.helpers import flash # 闪动消息
from app import app, db # 第二个 app 是 init文件里的app 对象
from app.forms import LoginForm, RegistrationForm
from flask_login import current_user, login_user, login_required
from app.models import User
@app.route('/')
@app.route('/index')
@login_required # 登录保护只需要加一个装饰器,加了这个的,都必须登录才能访问
def index():
# user = {'username': 'Miguel'}
posts = [
{
'author': {'username': 'John'},
'body': 'b day'
},
{
'author': {'username': 'Lucy'},
'body': 'bbb day'
}
]
return render_template('index.html', title='Home', posts=posts)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated: # login 模块会默认创建一个用户为 current_user
return redirect(url_for('index')) # 如果登录成功,会被跳转到主页
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first() # 数据库查询用户
# 这里查询用户,或者没有,或者密码不对
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
# remember_me 用 cookie 保留登录状态
login_user(user, remember=form.remember_me.data)
# 虽然没有登录的用户会重定向到 login,不过会包含三种情况
# 1. 登录url没有 next 参数,则重定向到 /login
# 2. 登录url包含 next 设置为相对路径的参数,则重定向到该url
# 3. 登录url包含 next 设置为包含域名的完整 url 的参数,则重定向到 /index
# 这是为了保证,重定向只发生在站内,不会导向恶意网站
# 为了只发生1,2,要调用 werkzeug 的 url_parse 解析
# 举例,如果登录失败,会出现 login?next=%2Findex ,%2F 是 / 字符,也就是重定向到 index
# 不过因为没有登录,所以这个行为会被接着重定向回 login
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(url_for('index'))
return render_template('login.html', title='Sing In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congrates, you now registered a new user')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
| 37.08
| 82
| 0.647609
| 348
| 2,781
| 5.054598
| 0.382184
| 0.027288
| 0.055713
| 0.068221
| 0.158044
| 0.067084
| 0.03411
| 0
| 0
| 0
| 0
| 0.003274
| 0.231212
| 2,781
| 75
| 83
| 37.08
| 0.819457
| 0.188781
| 0
| 0.175439
| 0
| 0
| 0.115282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0.052632
| 0.157895
| 0
| 0.385965
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
fddb07c49fca7dc8739e0a7542a2263412a966ed
| 179
|
py
|
Python
|
ao2j/lt1300/046/B.py
|
neshdev/competitive-prog
|
f406a85d62e83c3dbd3ad41f42ae121ebefd0fda
|
[
"MIT"
] | null | null | null |
ao2j/lt1300/046/B.py
|
neshdev/competitive-prog
|
f406a85d62e83c3dbd3ad41f42ae121ebefd0fda
|
[
"MIT"
] | null | null | null |
ao2j/lt1300/046/B.py
|
neshdev/competitive-prog
|
f406a85d62e83c3dbd3ad41f42ae121ebefd0fda
|
[
"MIT"
] | null | null | null |
n,k = [int(x) for x in input().split()]
arr = [int(x) for x in input().split()]
arr.sort()
total = 0
for i in range(k):
if arr[i] < 0:
total += -arr[i]
print(total)
| 16.272727
| 39
| 0.536313
| 35
| 179
| 2.742857
| 0.457143
| 0.083333
| 0.145833
| 0.166667
| 0.479167
| 0.479167
| 0.479167
| 0.479167
| 0
| 0
| 0
| 0.014815
| 0.24581
| 179
| 10
| 40
| 17.9
| 0.696296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
fde56cf34b5e2c673b4f1db2a825702a705cb407
| 1,257
|
py
|
Python
|
dsRenamingTool/dialogBase.py
|
S0nic014/dsRenamingTool
|
97efcd660af820ab6b5f1918222ba31a95d47788
|
[
"MIT"
] | 2
|
2020-07-15T16:59:31.000Z
|
2021-08-17T14:04:15.000Z
|
dsRenamingTool/dialogBase.py
|
S0nic014/dsRenamingTool
|
97efcd660af820ab6b5f1918222ba31a95d47788
|
[
"MIT"
] | null | null | null |
dsRenamingTool/dialogBase.py
|
S0nic014/dsRenamingTool
|
97efcd660af820ab6b5f1918222ba31a95d47788
|
[
"MIT"
] | null | null | null |
import sys
from PySide2 import QtCore
from PySide2 import QtWidgets
from shiboken2 import wrapInstance
import maya.OpenMayaUI as omui
def mayaMainWindow():
"""
Get maya main window as QWidget
:return: Maya main window as QWidget
:rtype: PySide2.QtWidgets.QWidget
"""
mainWindowPtr = omui.MQtUtil.mainWindow()
if mainWindowPtr:
if sys.version_info[0] < 3:
return wrapInstance(long(mainWindowPtr), QtWidgets.QWidget) # noqa: F821
else:
return wrapInstance(int(mainWindowPtr), QtWidgets.QWidget)
else:
mayaMainWindow()
class _modalDialog(QtWidgets.QDialog):
def __init__(self, parent=mayaMainWindow()):
super(_modalDialog, self).__init__(parent)
# Disable question mark for windows
self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint)
# MacOSX window stay on top
self.setProperty("saveWindowPref", True)
self.createActions()
self.createWidgets()
self.createLayouts()
self.createConnections()
def createActions(self):
pass
def createWidgets(self):
pass
def createLayouts(self):
pass
def createConnections(self):
pass
| 24.647059
| 87
| 0.668258
| 125
| 1,257
| 6.632
| 0.488
| 0.038601
| 0.039807
| 0.038601
| 0.055489
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009544
| 0.249801
| 1,257
| 50
| 88
| 25.14
| 0.869565
| 0.13922
| 0
| 0.193548
| 0
| 0
| 0.013258
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.193548
| false
| 0.129032
| 0.16129
| 0
| 0.451613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
fdfb78f1a782871b71fcd4058e86788874102e55
| 582
|
py
|
Python
|
iiif_prezi3/loader.py
|
rbturnbull/iiif-prezi3
|
0e66bc41438772c75e064c20964ed01aff1f3709
|
[
"Apache-2.0"
] | null | null | null |
iiif_prezi3/loader.py
|
rbturnbull/iiif-prezi3
|
0e66bc41438772c75e064c20964ed01aff1f3709
|
[
"Apache-2.0"
] | null | null | null |
iiif_prezi3/loader.py
|
rbturnbull/iiif-prezi3
|
0e66bc41438772c75e064c20964ed01aff1f3709
|
[
"Apache-2.0"
] | null | null | null |
import json
def load_extensions_from_json():
try:
extensions = json.load(open("extensions.json"))
except FileNotFoundError:
return
for ext in extensions:
__import__(f"iiif_prezi3.extensions.{ext}")
def load_extension(path):
pass
def monkeypatch_schema(schema_class, patch_classes):
schema_bases = list(schema_class.__bases__)
if type(patch_classes) == list:
for c in patch_classes:
schema_bases.append(c)
else:
schema_bases.append(patch_classes)
schema_class.__bases__ = tuple(schema_bases)
| 22.384615
| 55
| 0.689003
| 71
| 582
| 5.239437
| 0.464789
| 0.129032
| 0.145161
| 0.123656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002212
| 0.223368
| 582
| 25
| 56
| 23.28
| 0.820796
| 0
| 0
| 0
| 0
| 0
| 0.073883
| 0.04811
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.055556
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
a9039f8421d00114c0ba14dfaca35466584a7fcb
| 1,543
|
py
|
Python
|
server/main_node/create_tables.py
|
noderod/DARLMID
|
5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2
|
[
"MIT"
] | null | null | null |
server/main_node/create_tables.py
|
noderod/DARLMID
|
5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2
|
[
"MIT"
] | null | null | null |
server/main_node/create_tables.py
|
noderod/DARLMID
|
5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2
|
[
"MIT"
] | null | null | null |
"""
BASICS
Creates the necessary tables and users.
"""
import os
import psycopg2
con = psycopg2.connect (host = os.environ["POSTGRES_URL"], database = os.environ["POSTGRES_DB"], user = os.environ["POSTGRES_USER"], password = os.environ["POSTGRES_PASSWORD"])
cur = con.cursor()
# Creates main user table
cur.execute("""
CREATE TABLE IF NOT EXISTS user_data (
user_id serial PRIMARY KEY,
username VARCHAR (256) UNIQUE NOT NULL,
password VARCHAR (256) NOT NULL,
salt VARCHAR (256) NOT NULL,
date_creation TIMESTAMP NOT NULL,
last_action TIMESTAMP NOT NULL,
last_login TIMESTAMP NOT NULL,
last_logout TIMESTAMP NOT NULL
)""")
# Creates a read only user (SELECT)
# Query is done in an unsafe way because it is the only way, sanitizing it will cause issues
# No user input
read_only_postgres_user = os.environ["R_USERNAME"]
cur.execute("CREATE USER "+ read_only_postgres_user + " WITH ENCRYPTED PASSWORD %s", (os.environ["R_PASSWORD"],))
cur.execute("GRANT SELECT ON ALL TABLES IN SCHEMA public TO " + read_only_postgres_user)
# Creates a write user (SELECT, INSERT, UPDATE)
write_postgres_user = os.environ["RW_USERNAME"]
cur.execute("CREATE USER "+ write_postgres_user + " WITH ENCRYPTED PASSWORD %s", (os.environ["RW_PASSWORD"],))
cur.execute("GRANT SELECT, INSERT, DELETE, UPDATE ON ALL TABLES IN SCHEMA public TO " + write_postgres_user)
cur.execute("GRANT SELECT, USAGE ON ALL SEQUENCES IN SCHEMA public TO " + write_postgres_user)
con.commit()
con.close ()
| 32.829787
| 176
| 0.720674
| 222
| 1,543
| 4.869369
| 0.382883
| 0.066605
| 0.062905
| 0.055504
| 0.281221
| 0.175763
| 0.175763
| 0.079556
| 0
| 0
| 0
| 0.008675
| 0.178224
| 1,543
| 46
| 177
| 33.543478
| 0.843849
| 0.166559
| 0
| 0
| 0
| 0
| 0.564364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
8bf15c081cf1ec7e2805d8cdda039957d68c5367
| 454
|
py
|
Python
|
Exercicios/script030.py
|
jacksonmoreira/Curso-em-video-mundo1-
|
84b09bd3b61417fab483acf9f1a38e0cf6b95a80
|
[
"MIT"
] | null | null | null |
Exercicios/script030.py
|
jacksonmoreira/Curso-em-video-mundo1-
|
84b09bd3b61417fab483acf9f1a38e0cf6b95a80
|
[
"MIT"
] | null | null | null |
Exercicios/script030.py
|
jacksonmoreira/Curso-em-video-mundo1-
|
84b09bd3b61417fab483acf9f1a38e0cf6b95a80
|
[
"MIT"
] | null | null | null |
frase = str(input('Digite o seu nome completo para a análise ser feita:')).strip()
print('-' * 50)
print('Analisando nome...')
print('O seu nome em maiúsculas é {}.'.format(frase.upper()))
print('O seu nome em minúsculas é {}.'.format(frase.lower()))
print('O seu nome tem ao todo {} letras.'.format(len(frase) - frase.count(' ')))
print('O seu primeiro nome tem {} letras.'.format(frase.find(' ')))
print('Nome analisado com sucesso!')
print('-' * 50)
| 41.272727
| 82
| 0.665198
| 69
| 454
| 4.376812
| 0.507246
| 0.066225
| 0.10596
| 0.129139
| 0.099338
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 0.127753
| 454
| 10
| 83
| 45.4
| 0.752525
| 0
| 0
| 0.222222
| 0
| 0
| 0.503311
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.888889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
8bfbe25b3704f8131128b16676dbbc1e54dcc6b4
| 446
|
py
|
Python
|
bin/Notifier/NotificationLoader.py
|
juergenhoetzel/craft
|
9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd
|
[
"BSD-2-Clause"
] | 55
|
2016-11-20T17:08:19.000Z
|
2022-03-11T22:19:43.000Z
|
bin/Notifier/NotificationLoader.py
|
juergenhoetzel/craft
|
9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd
|
[
"BSD-2-Clause"
] | 17
|
2017-09-20T07:52:17.000Z
|
2021-12-03T10:03:00.000Z
|
bin/Notifier/NotificationLoader.py
|
juergenhoetzel/craft
|
9d3fe6dc07f2307e8f8212c8981b980a9d2d28fd
|
[
"BSD-2-Clause"
] | 29
|
2016-12-10T15:00:11.000Z
|
2021-12-02T12:54:05.000Z
|
import importlib
_NOTIFICATION_BACKENDS = None
def load(modules):
global _NOTIFICATION_BACKENDS;
if _NOTIFICATION_BACKENDS == None:
_NOTIFICATION_BACKENDS = dict()
for backend in modules:
backend = backend.strip()
backend = getattr(importlib.import_module("Notifier.Backends.%s" % backend), backend)()
_NOTIFICATION_BACKENDS[backend.name] = backend
return _NOTIFICATION_BACKENDS
| 29.733333
| 99
| 0.695067
| 43
| 446
| 6.906977
| 0.488372
| 0.40404
| 0.161616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.226457
| 446
| 14
| 100
| 31.857143
| 0.86087
| 0
| 0
| 0
| 0
| 0
| 0.044843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
e307995e7666610653ffb5c496c1cf1dfe8feab6
| 897
|
py
|
Python
|
machin/frame/algorithms/__init__.py
|
ikamensh/machin
|
af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
[
"MIT"
] | 1
|
2021-04-01T21:21:23.000Z
|
2021-04-01T21:21:23.000Z
|
machin/frame/algorithms/__init__.py
|
ikamensh/machin
|
af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
[
"MIT"
] | null | null | null |
machin/frame/algorithms/__init__.py
|
ikamensh/machin
|
af7b423c47bc1412530cf6c96c11bd3af9b3e239
|
[
"MIT"
] | null | null | null |
import warnings
from .base import TorchFramework
from .dqn import DQN
from .dqn_per import DQNPer
from .rainbow import RAINBOW
from .ddpg import DDPG
from .hddpg import HDDPG
from .td3 import TD3
from .ddpg_per import DDPGPer
from .a2c import A2C
from .a3c import A3C
from .ppo import PPO
from .sac import SAC
from .maddpg import MADDPG
try:
from .apex import DQNApex, DDPGApex
from .impala import IMPALA
from .ars import ARS
except ImportError as _:
warnings.warn(
"Failed to import algorithms relying on torch.distributed." " Set them to None."
)
DQNApex = None
DDPGApex = None
IMPALA = None
ARS = None
__all__ = [
"TorchFramework",
"DQN",
"DQNPer",
"RAINBOW",
"DDPG",
"HDDPG",
"TD3",
"DDPGPer",
"A2C",
"A3C",
"PPO",
"SAC",
"DQNApex",
"DDPGApex",
"IMPALA",
"ARS",
"MADDPG",
]
| 16.924528
| 88
| 0.637681
| 113
| 897
| 5
| 0.345133
| 0.024779
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013657
| 0.265329
| 897
| 52
| 89
| 17.25
| 0.843703
| 0
| 0
| 0
| 0
| 0
| 0.185061
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.422222
| 0
| 0.422222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
e308a4fb297dc8f9348bbe1730683c0c197aa336
| 2,925
|
py
|
Python
|
plaso/cli/helpers/hashers.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | 1,253
|
2015-01-02T13:58:02.000Z
|
2022-03-31T08:43:39.000Z
|
plaso/cli/helpers/hashers.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | 3,388
|
2015-01-02T11:17:58.000Z
|
2022-03-30T10:21:45.000Z
|
plaso/cli/helpers/hashers.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | 376
|
2015-01-20T07:04:54.000Z
|
2022-03-04T23:53:00.000Z
|
# -*- coding: utf-8 -*-
"""The hashers CLI arguments helper."""
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
class HashersArgumentsHelper(interface.ArgumentsHelper):
"""Hashers CLI arguments helper."""
NAME = 'hashers'
DESCRIPTION = 'Hashers command line arguments.'
_DEFAULT_HASHER_STRING = 'sha256'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'--hasher_file_size_limit', '--hasher-file-size-limit',
dest='hasher_file_size_limit', type=int, action='store', default=0,
metavar='SIZE', help=(
'Define the maximum file size in bytes that hashers should '
'process. Any larger file will be skipped. A size of 0 represents '
'no limit.'))
argument_group.add_argument(
'--hashers', dest='hashers', type=str, action='store',
default=cls._DEFAULT_HASHER_STRING, metavar='HASHER_LIST', help=(
'Define a list of hashers to use by the tool. This is a comma '
'separated list where each entry is the name of a hasher, such as '
'"md5,sha256". "all" indicates that all hashers should be '
'enabled. "none" disables all hashers. Use "--hashers list" or '
'"--info" to list the available hashers.'))
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
hashers = cls._ParseStringOption(
options, 'hashers', default_value=cls._DEFAULT_HASHER_STRING)
hasher_file_size_limit = cls._ParseNumericOption(
options, 'hasher_file_size_limit', default_value=0)
# TODO: validate hasher names.
if hasher_file_size_limit < 0:
raise errors.BadConfigOption(
'Invalid hasher file size limit value cannot be negative.')
setattr(configuration_object, '_hasher_names_string', hashers)
setattr(
configuration_object, '_hasher_file_size_limit', hasher_file_size_limit)
manager.ArgumentHelperManager.RegisterHelper(HashersArgumentsHelper)
| 36.111111
| 80
| 0.699487
| 349
| 2,925
| 5.716332
| 0.378224
| 0.0401
| 0.063158
| 0.085714
| 0.063158
| 0.038095
| 0.038095
| 0.038095
| 0
| 0
| 0
| 0.005224
| 0.214701
| 2,925
| 80
| 81
| 36.5625
| 0.8633
| 0.245812
| 0
| 0.097561
| 0
| 0
| 0.352305
| 0.054092
| 0
| 0
| 0
| 0.0125
| 0
| 1
| 0.04878
| false
| 0
| 0.097561
| 0
| 0.243902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
e321f4353a25d31bcaa64e339213294f5626c9c9
| 480
|
py
|
Python
|
src/default/ellipse/index.py
|
mikeludemann/python-data-visualization
|
e5317505d41ae79389f6eec61cefeca1690935b0
|
[
"MIT"
] | null | null | null |
src/default/ellipse/index.py
|
mikeludemann/python-data-visualization
|
e5317505d41ae79389f6eec61cefeca1690935b0
|
[
"MIT"
] | null | null | null |
src/default/ellipse/index.py
|
mikeludemann/python-data-visualization
|
e5317505d41ae79389f6eec61cefeca1690935b0
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Ellipse
NUM = 250
ells = [Ellipse(xy=np.random.rand(2) * 10,
width=np.random.rand(), height=np.random.rand(),
angle=np.random.rand() * 360)
for i in range(NUM)]
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
for e in ells:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(np.random.rand())
e.set_facecolor(np.random.rand(3))
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
plt.show()
| 20.869565
| 54
| 0.708333
| 89
| 480
| 3.730337
| 0.52809
| 0.144578
| 0.216867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038005
| 0.122917
| 480
| 22
| 55
| 21.818182
| 0.750594
| 0
| 0
| 0
| 0
| 0
| 0.022917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
e323376f728d32ac2cbf19f89a6bf1e46c450382
| 638
|
py
|
Python
|
_/chapter5-OpenStack/IdentityService/createproject.py
|
paullewallencom/hybrid-cloud-978-1-7888-3087-4
|
d101553fd342f420b581b87c58c7219f2b04a7c6
|
[
"Apache-2.0"
] | 3
|
2018-03-27T14:34:48.000Z
|
2021-10-04T16:28:19.000Z
|
_/chapter5-OpenStack/IdentityService/createproject.py
|
paullewallencom/hybrid-cloud-978-1-7888-3087-4
|
d101553fd342f420b581b87c58c7219f2b04a7c6
|
[
"Apache-2.0"
] | null | null | null |
_/chapter5-OpenStack/IdentityService/createproject.py
|
paullewallencom/hybrid-cloud-978-1-7888-3087-4
|
d101553fd342f420b581b87c58c7219f2b04a7c6
|
[
"Apache-2.0"
] | 1
|
2021-08-27T23:51:28.000Z
|
2021-08-27T23:51:28.000Z
|
#import OpenStack connection class from the SDK
from openstack import connection
# Create a connection object by calling the constructor and pass the security information
conn = connection.Connection(auth_url="http://192.168.0.106/identity",
project_name="demo",
username="admin",
password="manoj",
user_domain_id="default",
project_domain_id="default")
def create_project(conn):
project_desc = {
"description":"This project is for packtpub readers",
"isenabled" : True,
"name":"packtpub_readers"
}
project = conn.identity.create_project(**project_desc)
create_project(conn)
| 30.380952
| 89
| 0.714734
| 78
| 638
| 5.692308
| 0.602564
| 0.087838
| 0.067568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019157
| 0.181818
| 638
| 20
| 90
| 31.9
| 0.831418
| 0.210031
| 0
| 0
| 0
| 0
| 0.26494
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0.066667
| 0.066667
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
e3278fc449a9b7f42367d6c094639616a86c1514
| 353
|
py
|
Python
|
setup.py
|
markus61/selfstoredict
|
c770fd0dd4976e66299f51f71a71ad9c1875d699
|
[
"MIT"
] | 1
|
2017-01-18T11:19:24.000Z
|
2017-01-18T11:19:24.000Z
|
setup.py
|
markus61/selfstoredict
|
c770fd0dd4976e66299f51f71a71ad9c1875d699
|
[
"MIT"
] | null | null | null |
setup.py
|
markus61/selfstoredict
|
c770fd0dd4976e66299f51f71a71ad9c1875d699
|
[
"MIT"
] | 1
|
2018-02-23T06:23:43.000Z
|
2018-02-23T06:23:43.000Z
|
from setuptools import setup, find_packages
setup(
name='selfstoredict',
version='0.6',
packages=find_packages(),
url='https://github.com/markus61/selfstoredict',
license='MIT',
author='markus',
author_email='[email protected]',
description='a python class delivering a dict that stores itself into a JSON file or a redis db',
)
| 29.416667
| 101
| 0.696884
| 48
| 353
| 5.0625
| 0.8125
| 0.098765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013793
| 0.17847
| 353
| 11
| 102
| 32.090909
| 0.824138
| 0
| 0
| 0
| 0
| 0
| 0.444759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
e33e7075c79b3b47f743f64502284119cdb5e862
| 2,094
|
py
|
Python
|
konbata/Formats/xml_format.py
|
jzeuner/konbata
|
41c5ec9ce4c84e82e09daaa106ceed9de38c437b
|
[
"MIT"
] | 2
|
2019-12-01T16:12:24.000Z
|
2021-05-18T22:10:12.000Z
|
konbata/Formats/xml_format.py
|
jzeuner/konbata
|
41c5ec9ce4c84e82e09daaa106ceed9de38c437b
|
[
"MIT"
] | 10
|
2019-09-19T17:08:46.000Z
|
2021-02-17T21:42:10.000Z
|
konbata/Formats/xml_format.py
|
jzeuner/konbata
|
41c5ec9ce4c84e82e09daaa106ceed9de38c437b
|
[
"MIT"
] | 3
|
2019-11-27T18:39:12.000Z
|
2021-02-10T15:11:58.000Z
|
"""
Loader and Parser for the xml format.
Version: 0.01-alpha
"""
from xml.dom import minidom
from konbata.Data.Data import DataNode, DataTree
from konbata.Formats.Format import Format
def xml_toTree(file, delimiter, options=None):
"""
Function transforms a xml file into a DataTree.
Parameters
----------
file: file
open input file in at least read mode
Returns
-------
tree: DataTree
"""
# TODO: Second Parser with the import xml.etree.ElementTree as ET class
xml_reader = minidom.parse(file)
xml_reader.normalize()
tree = DataTree(tree_type='xml')
if xml_reader.hasChildNodes():
for node in xml_reader.childNodes:
childNode = help_xml_toTree(node)
tree.root.add(childNode)
return tree
def help_xml_toTree(xml_node):
"""
Helps xml_ToTree function, walks through xml recursive
Parameters
----------
xml_node: ElementType1
Returns
-------
node: DataNode
"""
if xml_node.hasChildNodes():
tree_node = DataNode(xml_node.localName)
for node in xml_node.childNodes:
tree_node.add(help_xml_toTree(node))
return tree_node
# TODO Add Attributes
node = None
if xml_node.nodeType == xml_node.TEXT_NODE:
# TODO: guess xml_node.nodeValue == xml_node.data
node = DataNode(xml_node.nodeValue.replace('\n ', ''))
elif xml_node.nodeType == xml_node.ELEMENT_NODE:
# TODO: guess xml_node.tagName == xml_node.localName
node = DataNode(xml_node.localName)
else:
# TODO: Implement the other nodeTypes
print('Warning: NodeType not supported yet')
node = DataNode(xml_node.localName)
return node
def xml_fromTree(tree, file, options=None):
"""
Function transforms a DataTree into a xml file.
Parameters
----------
tree: DataTree
file: file
open output file in at least write mode
options: list, optional
"""
# TODO
pass
xml_format = Format('xml', ['/n'], xml_toTree, xml_fromTree)
| 22.516129
| 75
| 0.637536
| 261
| 2,094
| 4.969349
| 0.35249
| 0.086353
| 0.046261
| 0.058597
| 0.17579
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002579
| 0.259312
| 2,094
| 92
| 76
| 22.76087
| 0.833656
| 0.354823
| 0
| 0.066667
| 0
| 0
| 0.038079
| 0
| 0
| 0
| 0
| 0.032609
| 0
| 1
| 0.1
| false
| 0.033333
| 0.1
| 0
| 0.3
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
e348be446d860ef514d588759be2dbd6de2b4764
| 651
|
py
|
Python
|
essentials_kit_management/interactors/get_pay_through_details_interactor.py
|
RajeshKumar1490/iB_hubs_mini_project
|
f7126092400fb9a62fb4bff643dae7cda3a8d9d2
|
[
"MIT"
] | null | null | null |
essentials_kit_management/interactors/get_pay_through_details_interactor.py
|
RajeshKumar1490/iB_hubs_mini_project
|
f7126092400fb9a62fb4bff643dae7cda3a8d9d2
|
[
"MIT"
] | 2
|
2021-09-07T07:06:00.000Z
|
2021-09-07T07:24:26.000Z
|
essentials_kit_management/interactors/get_pay_through_details_interactor.py
|
RajeshKumar1490/iB_hubs_mini_project
|
f7126092400fb9a62fb4bff643dae7cda3a8d9d2
|
[
"MIT"
] | null | null | null |
from essentials_kit_management.interactors.storages.storage_interface \
import StorageInterface
from essentials_kit_management.interactors.presenters.presenter_interface \
import PresenterInterface
class GetPayThroughDetailsInteractor:
def __init__(
self, storage: StorageInterface, presenter: PresenterInterface):
self.storage = storage
self.presenter = presenter
def get_pay_through_details(self):
upi_id = self.storage.get_upi_id()
pay_through_details_response = \
self.presenter.get_pay_through_details_response(upi_id=upi_id)
return pay_through_details_response
| 34.263158
| 76
| 0.761905
| 68
| 651
| 6.882353
| 0.382353
| 0.08547
| 0.145299
| 0.160256
| 0.162393
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184332
| 651
| 18
| 77
| 36.166667
| 0.881356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
e34b1404822d471e120b2d87a3f5be2a57d14434
| 1,323
|
py
|
Python
|
molotov_ext/__init__.py
|
2gis-test-labs/molotov-ext
|
2cf2cc5b74f6676ed1680511030d4dddb8be8380
|
[
"Apache-2.0"
] | null | null | null |
molotov_ext/__init__.py
|
2gis-test-labs/molotov-ext
|
2cf2cc5b74f6676ed1680511030d4dddb8be8380
|
[
"Apache-2.0"
] | null | null | null |
molotov_ext/__init__.py
|
2gis-test-labs/molotov-ext
|
2cf2cc5b74f6676ed1680511030d4dddb8be8380
|
[
"Apache-2.0"
] | null | null | null |
from argparse import Namespace
from functools import partial
from typing import Any
import molotov
from .formatters import DefaultFormatter
from .record_table import RecordTable
from .recorder import Recorder
from .reporter import Reporter
from .scenario import Scenario
__all__ = ("Reporter", "register_reporter", "scenario", "recorder")
recorder = Recorder(RecordTable())
scenario = partial(Scenario, recorder.on_starting_scenario)
@molotov.events()
async def event_listener(event: str, **info: Any) -> None:
if event == "sending_request":
recorder.on_sending_request(info["session"], info["request"])
elif event == "response_received":
recorder.on_response_received(info["session"], info["response"], info["request"])
elif event == "scenario_success":
recorder.on_scenario_success(info["scenario"]["name"], info["wid"])
elif event == "scenario_failure":
recorder.on_scenario_failure(info["scenario"]["name"], info["wid"], info['exception'])
elif event == "current_workers":
recorder.on_current_workers(info["workers"])
def register_reporter(args: Namespace) -> Reporter:
if args.processes > 1:
raise NotImplementedError('Возможность работы с несколькими процессами не поддерживается!')
return Reporter(recorder, DefaultFormatter())
| 32.268293
| 99
| 0.73167
| 149
| 1,323
| 6.328859
| 0.375839
| 0.063627
| 0.031813
| 0.042418
| 0.04878
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000887
| 0.147392
| 1,323
| 40
| 100
| 33.075
| 0.835106
| 0
| 0
| 0
| 0
| 0
| 0.199546
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.321429
| 0
| 0.392857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
e36ead0127bc40a1f4670d0eba027d0736c82d0a
| 781
|
py
|
Python
|
kafka_scripts/kafka-producer-stream-algorithm.py
|
walterjgsp/meaning
|
71fd69eab430d364baefb31096c866999de9b4dd
|
[
"MIT"
] | null | null | null |
kafka_scripts/kafka-producer-stream-algorithm.py
|
walterjgsp/meaning
|
71fd69eab430d364baefb31096c866999de9b4dd
|
[
"MIT"
] | null | null | null |
kafka_scripts/kafka-producer-stream-algorithm.py
|
walterjgsp/meaning
|
71fd69eab430d364baefb31096c866999de9b4dd
|
[
"MIT"
] | null | null | null |
from kafka import KafkaProducer
import json
import random
from time import sleep
from datetime import datetime
# Create an instance of the Kafka producer
producer = KafkaProducer(bootstrap_servers='kafka-server:9092',
value_serializer=lambda m: json.dumps(
m).encode('utf-8'),
api_version=(0, 11, 5))
stream_algorithm_str = {"id":"1","import_str": "from sklearn.tree import DecisionTreeClassifier",
"alg_str": "DecisionTreeClassifier", "parameters_str": None,
"db_training_path": "test_training.csv","db_test_path":"test_test.csv"}
producer.send('sk-individual-topic', stream_algorithm_str)
# block until all async messages are sent
producer.flush()
| 37.190476
| 97
| 0.658131
| 92
| 781
| 5.413043
| 0.652174
| 0.060241
| 0.072289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016892
| 0.241997
| 781
| 20
| 98
| 39.05
| 0.824324
| 0.102433
| 0
| 0
| 0
| 0
| 0.289398
| 0.063037
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.428571
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
e3773931c3c2274119d47a9e56c7b5427c5ed618
| 241
|
py
|
Python
|
python/sock-merchant.py
|
gajubadge11/hackerrank-3
|
132a5019b7ed21507bb95b5063fa66c446b0eff7
|
[
"MIT"
] | 21
|
2015-02-09T18:08:38.000Z
|
2021-11-08T15:00:48.000Z
|
python/sock-merchant.py
|
gajubadge11/hackerrank-3
|
132a5019b7ed21507bb95b5063fa66c446b0eff7
|
[
"MIT"
] | 7
|
2020-04-12T23:00:19.000Z
|
2021-01-30T23:44:24.000Z
|
python/sock-merchant.py
|
gajubadge11/hackerrank-3
|
132a5019b7ed21507bb95b5063fa66c446b0eff7
|
[
"MIT"
] | 27
|
2015-07-22T18:08:12.000Z
|
2022-02-28T19:50:26.000Z
|
#!/bin/python3
from collections import Counter
def pairs(socks):
return sum(list(map(lambda sock: sock // 2, Counter(socks).values())))
_ = int(input().strip())
socks = list(map(int, input().strip().split(' ')))
print(pairs(socks))
| 18.538462
| 74
| 0.659751
| 33
| 241
| 4.787879
| 0.666667
| 0.126582
| 0.164557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009569
| 0.13278
| 241
| 12
| 75
| 20.083333
| 0.746411
| 0.053942
| 0
| 0
| 0
| 0
| 0.004405
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.166667
| 0.5
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
8b4cf66930d071ee4505d81a0c0281d51346de46
| 384
|
py
|
Python
|
zad1_12.py
|
kamilhabrych/python-semestr5-lista1
|
65faeffe83bcc4706b2818e2e7802d986b19244b
|
[
"MIT"
] | null | null | null |
zad1_12.py
|
kamilhabrych/python-semestr5-lista1
|
65faeffe83bcc4706b2818e2e7802d986b19244b
|
[
"MIT"
] | null | null | null |
zad1_12.py
|
kamilhabrych/python-semestr5-lista1
|
65faeffe83bcc4706b2818e2e7802d986b19244b
|
[
"MIT"
] | null | null | null |
x = 2 ** (1/2)
y = 3 ** (1/3)
z = 5 ** (1/5)
print(x)
print(y)
print(z)
print()
if x>y and x>z:
print(x,'jest największa')
elif y>x and y>z:
print(y,'jest największa')
elif z>x and z>y:
print(z,'jest największa')
print()
if x<y and x<z:
print(x,'jest najmniejsza')
elif y<x and y<z:
print(y,'jest najmniejsza')
elif z<x and z<y:
print(z,'jest najmniejsza')
| 16
| 31
| 0.585938
| 80
| 384
| 2.8125
| 0.175
| 0.133333
| 0.093333
| 0.08
| 0.586667
| 0.586667
| 0.586667
| 0.586667
| 0.586667
| 0.213333
| 0
| 0.0299
| 0.216146
| 384
| 24
| 32
| 16
| 0.717608
| 0
| 0
| 0.1
| 0
| 0
| 0.241558
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.55
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
8b4e779744d51e5ebec4b797f93c9f1ab0c716a1
| 555
|
py
|
Python
|
setup.py
|
kennydo/pick-my-stick
|
17bb4fbb35cc9637a838f5bdd91caeb7458b43bd
|
[
"MIT"
] | null | null | null |
setup.py
|
kennydo/pick-my-stick
|
17bb4fbb35cc9637a838f5bdd91caeb7458b43bd
|
[
"MIT"
] | null | null | null |
setup.py
|
kennydo/pick-my-stick
|
17bb4fbb35cc9637a838f5bdd91caeb7458b43bd
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='picker-my-sticker',
version='0.0.1',
description='Stickers for Slack',
long_description='S t i c k e r s',
url='https://github.com/kennydo/pick-my-stick',
author='Kenny Do',
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
],
packages=find_packages(),
entry_points={
},
)
| 25.227273
| 51
| 0.610811
| 67
| 555
| 4.985075
| 0.791045
| 0.071856
| 0.149701
| 0.155689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016548
| 0.237838
| 555
| 21
| 52
| 26.428571
| 0.77305
| 0
| 0
| 0
| 0
| 0
| 0.448649
| 0.03964
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8b66b9f64668e1a15163413263d5b63cdc824a7c
| 1,435
|
py
|
Python
|
Scripts/ExplicitInstantation.py
|
fbudin69500/calatk
|
3cee90488feab7e3ef2ade1f791106aa7f11e404
|
[
"Apache-2.0"
] | 2
|
2019-09-15T12:51:02.000Z
|
2020-04-08T14:03:58.000Z
|
Scripts/ExplicitInstantation.py
|
cpatrick/calatk
|
849c17919ac5084b5b067c7631bc2aa1efd650df
|
[
"Apache-2.0"
] | null | null | null |
Scripts/ExplicitInstantation.py
|
cpatrick/calatk
|
849c17919ac5084b5b067c7631bc2aa1efd650df
|
[
"Apache-2.0"
] | 1
|
2018-10-20T16:38:28.000Z
|
2018-10-20T16:38:28.000Z
|
#!/usr/bin/env python
"""Create a .cxx file that performs explicit instantiation over float/double and
dimensions 1, 2, and 3. Writes the file to the current directory."""
usage = "ExplicitInstantiation.py <class_name>"
import sys
if len(sys.argv) < 2 or sys.argv[1] == '-h' or sys.argv[1] == '--help':
print(usage)
sys.exit(1)
copyright_header = """/*
*
* Copyright 2011 by the CALATK development team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
"""
explicit_file = open(sys.argv[1] + '.cxx', 'w')
explicit_file.write(copyright_header)
content = """
#include "{0}.txx"
namespace CALATK
{
template class {0}< float, 1 >;
template class {0}< float, 2 >;
template class {0}< float, 3 >;
template class {0}< double, 1 >;
template class {0}< double, 2 >;
template class {0}< double, 3 >;
} // namespace CALATK
""".replace('{0}', sys.argv[1])
explicit_file.write(content)
explicit_file.close()
| 26.090909
| 80
| 0.694774
| 213
| 1,435
| 4.647887
| 0.507042
| 0.060606
| 0.084848
| 0.057576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026182
| 0.174913
| 1,435
| 54
| 81
| 26.574074
| 0.809966
| 0.114983
| 0
| 0
| 0
| 0
| 0.739715
| 0.018987
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025
| 0
| 0.025
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8b67d69e37e542f410bab436a641c536c8c9539f
| 3,231
|
py
|
Python
|
aiopogo/auth_google.py
|
DennyLoko/aiopogo
|
55a9efe13c51261c68ab2abe8efc4ac69e04eb01
|
[
"MIT"
] | 14
|
2017-03-28T16:32:24.000Z
|
2021-03-13T23:03:57.000Z
|
aiopogo/auth_google.py
|
ultrafunkamsterdam/aiopogo
|
43444c994a400bc9bc8fd1ccaa6a1f79ff5df1fe
|
[
"MIT"
] | 8
|
2017-03-01T07:56:09.000Z
|
2017-08-15T07:37:12.000Z
|
aiopogo/auth_google.py
|
ultrafunkamsterdam/aiopogo
|
43444c994a400bc9bc8fd1ccaa6a1f79ff5df1fe
|
[
"MIT"
] | 14
|
2017-04-08T20:01:50.000Z
|
2017-08-19T04:23:57.000Z
|
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from time import time
try:
from gpsoauth import perform_master_login, perform_oauth
except ImportError:
def perform_master_login(*args, **kwargs):
raise ImportError('Must install gpsoauth to use Google accounts')
perform_oauth = perform_master_login
from .auth import Auth
from .exceptions import AuthException, InvalidCredentialsException
class AuthGoogle(Auth):
GOOGLE_LOGIN_ANDROID_ID = '9774d56d682e549c'
GOOGLE_LOGIN_SERVICE = 'audience:server:client_id:848232511240-7so421jotr2609rmqakceuu1luuq0ptb.apps.googleusercontent.com'
GOOGLE_LOGIN_APP = 'com.nianticlabs.pokemongo'
GOOGLE_LOGIN_CLIENT_SIG = '321187995bc7cdc2b5fc91b11a96e2baa8602c62'
def __init__(self, proxy=None, refresh_token=None):
Auth.__init__(self)
self.provider = 'google'
self._refresh_token = refresh_token
self._proxy = proxy
async def user_login(self, username, password):
self.log.info('Google User Login for: %s', username)
try:
assert (isinstance(username, str)
and isinstance(password, str))
except AssertionError:
raise InvalidCredentialsException(
"Username/password not correctly specified")
login = partial(
perform_master_login,
username,
password,
self.GOOGLE_LOGIN_ANDROID_ID,
proxy=self._proxy)
with ThreadPoolExecutor(max_workers=1) as executor:
user_login = await self.loop.run_in_executor(executor, login)
try:
self._refresh_token = user_login['Token']
except KeyError:
raise AuthException("Invalid Google Username/password")
await self.get_access_token()
async def get_access_token(self, force_refresh=False):
if not force_refresh and self.check_access_token():
self.log.debug('Using cached Google access token')
return self._access_token
self._access_token = None
self.authenticated = False
self.log.info('Requesting Google access token...')
oauth = partial(perform_oauth, None, self._refresh_token,
self.GOOGLE_LOGIN_ANDROID_ID, self.GOOGLE_LOGIN_SERVICE,
self.GOOGLE_LOGIN_APP, self.GOOGLE_LOGIN_CLIENT_SIG,
proxy=self._proxy)
with ThreadPoolExecutor(max_workers=1) as executor:
token_data = await self.loop.run_in_executor(executor, oauth)
try:
self._access_token = token_data['Auth']
except KeyError:
self._access_token = None
self.authenticated = False
raise AuthException("Could not receive a Google Access Token")
try:
self._access_token_expiry = float(token_data['Expiry'])
except KeyError:
self._access_token_expiry = time() + 7200.0
self.authenticated = True
self.log.info('Google Access Token successfully received.')
self.log.debug('Google Access Token: %s...',
self._access_token[:25])
return self._access_token
| 36.715909
| 127
| 0.662953
| 351
| 3,231
| 5.840456
| 0.307692
| 0.085854
| 0.058537
| 0.029268
| 0.173171
| 0.12878
| 0.12878
| 0.05561
| 0.05561
| 0.05561
| 0
| 0.028547
| 0.262767
| 3,231
| 87
| 128
| 37.137931
| 0.832074
| 0
| 0
| 0.257143
| 0
| 0
| 0.159084
| 0.050449
| 0
| 0
| 0
| 0
| 0.028571
| 1
| 0.028571
| false
| 0.071429
| 0.114286
| 0
| 0.242857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
8b6d14070d60a3432471d3e5b7787427ad3b6a3d
| 565
|
py
|
Python
|
CursoemVideo/Desafio076.py
|
davihonorato/Curso-python
|
47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0
|
[
"MIT"
] | null | null | null |
CursoemVideo/Desafio076.py
|
davihonorato/Curso-python
|
47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0
|
[
"MIT"
] | null | null | null |
CursoemVideo/Desafio076.py
|
davihonorato/Curso-python
|
47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0
|
[
"MIT"
] | null | null | null |
# Exercício Python 076: Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços, na sequência.
# No final, mostre uma listagem de preços, organizando os dados em forma tabular.
produtos = ('LÁPIS', 1.75,
'BORRACHA', 2,
'CADERNO', 20,
'CANETAS', 7,
'MOCHILA', 120)
print('-'*40)
print(f'{"PRODUTOS":^40}')
print('-'*40)
for c in range(0, len(produtos)):
if c % 2 == 0:
print(f'{produtos[c]:.<30}', end='R$')
else:
print(f'{produtos[c]:>7.2f}')
print('-'*40)
| 31.388889
| 129
| 0.580531
| 81
| 565
| 4.049383
| 0.703704
| 0.064024
| 0.128049
| 0.091463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065882
| 0.247788
| 565
| 17
| 130
| 33.235294
| 0.705882
| 0.366372
| 0
| 0.214286
| 0
| 0
| 0.259155
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
8b6dc47fa5a53a344b6d3a7e96adce1b89de4411
| 521
|
py
|
Python
|
projects/golem_integration/tests/actions/wait_for_element_enabled.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
projects/golem_integration/tests/actions/wait_for_element_enabled.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
projects/golem_integration/tests/actions/wait_for_element_enabled.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
from golem import actions
description = 'Verify wait_for_element_enabled action'
def test(data):
actions.navigate(data.env.url+'dynamic-elements/?delay=3')
actions.wait_for_element_enabled('#button-three', 10)
actions.verify_element_enabled('#button-three')
actions.navigate(data.env.url + 'dynamic-elements/?delay=5')
try:
actions.wait_for_element_enabled('#button-three', 3)
except Exception as e:
assert "Timeout waiting for element #button-three to be enabled" in e.args[0]
| 34.733333
| 85
| 0.729367
| 73
| 521
| 5.054795
| 0.520548
| 0.108401
| 0.113821
| 0.170732
| 0.455285
| 0.455285
| 0.455285
| 0.243902
| 0
| 0
| 0
| 0.013575
| 0.151631
| 521
| 14
| 86
| 37.214286
| 0.821267
| 0
| 0
| 0
| 0
| 0
| 0.349328
| 0.142035
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8b73af8b167c0c808ac06e682936f0020d7644ea
| 2,104
|
py
|
Python
|
python/raft/NodeState.py
|
chenzhaoplus/vraft
|
73fe880289061cfbb62aa33b8e5c7d012543bb9d
|
[
"Apache-2.0"
] | 23
|
2020-05-17T04:22:17.000Z
|
2022-02-22T02:09:34.000Z
|
python/raft/NodeState.py
|
chenzhaoplus/vraft
|
73fe880289061cfbb62aa33b8e5c7d012543bb9d
|
[
"Apache-2.0"
] | 1
|
2020-10-22T11:47:54.000Z
|
2020-10-22T11:47:54.000Z
|
python/raft/NodeState.py
|
chenzhaoplus/vraft
|
73fe880289061cfbb62aa33b8e5c7d012543bb9d
|
[
"Apache-2.0"
] | 11
|
2020-07-11T07:12:19.000Z
|
2022-03-23T08:24:15.000Z
|
import collections
from cluster import Cluster
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO)
VoteResult = collections.namedtuple('VoteResult', ['term', 'vote_granted', 'id'])
class NodeState:
def __init__(self, node=None):
self.cluster = Cluster()
self.node = node
self.id = node.id
self.current_term = 0
self.vote_for = None # node.id of the voted candidate
# input: candidate (id, current_term, lastLogIndex, lastLogTerm)
# output: vote_granted (true/false), term (current_term, for candidate to update itself)
# rule:
# 1. return false if candidate.term < current_term
# 2. return true if (voteFor is None or voteFor==candidate.id) and candidate's log is newer than receiver's
def vote(self, vote_request):
term = vote_request['term']
candidate_id = vote_request['candidate_id']
if term > self.current_term:
logging.info(f'{self} approves vote request since term: {term} > {self.current_term}')
self.vote_for = candidate_id
self.current_term = term
return VoteResult(True, self.current_term, self.id)
if term < self.current_term:
logging.info(f'{self} rejects vote request since term: {term} < {self.current_term}')
return VoteResult(False, self.current_term, self.id)
# vote_request.term == self.current_term
if self.vote_for is None or self.vote_for == candidate_id:
# TODO check if the candidate's log is newer than receiver's
self.vote_for = candidate_id
return VoteResult(True, self.current_term, self.id)
logging.info(f'{self} rejects vote request since vote_for: {self.vote_for} != {candidate_id}')
return VoteResult(False, self.current_term, self.id)
# another thread might change the state into Follower when got heartbeat
# only candidate could return True
# it returns False for both Leader and Follower
def win(self):
return False
| 42.08
| 113
| 0.66635
| 283
| 2,104
| 4.823322
| 0.289753
| 0.112821
| 0.120879
| 0.069597
| 0.380952
| 0.348718
| 0.348718
| 0.316484
| 0.057143
| 0.057143
| 0
| 0.001861
| 0.23384
| 2,104
| 49
| 114
| 42.938776
| 0.844913
| 0.281844
| 0
| 0.2
| 0
| 0
| 0.204136
| 0
| 0
| 0
| 0
| 0.020408
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0.033333
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8b772e552dd2f4d89f3edbd1233977b33bf49895
| 542
|
py
|
Python
|
solutions/593_valid_square.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/593_valid_square.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/593_valid_square.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
class Solution:
def validSquare(self, p1: List[int], p2: List[int], p3: List[int], p4: List[int]) -> bool:
"""Math.
Running time: O(1)
"""
v = [p1, p2, p3, p4]
e = []
for i, p in enumerate(v):
for j, q in enumerate(v[i+1:]):
e.append(((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2, q[0] - p[0], q[1] - p[1]))
e.sort(key=lambda x: x[0])
return e[0][0] > 0 and e[-1][0] == e[-2][0] and e[-1][1] * e[-2][1] + e[-1][2] * e[-2][2] == 0
| 36.133333
| 102
| 0.389299
| 96
| 542
| 2.197917
| 0.364583
| 0.132701
| 0.113744
| 0.056872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106017
| 0.356089
| 542
| 15
| 103
| 36.133333
| 0.498567
| 0.046125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8b827211c6d78c4e03e51a44190d8e3f1cffc3db
| 2,443
|
py
|
Python
|
legacy/models/GAT.py
|
astrockragh/IceCube
|
eba09e9f9a3c351dbf05496821bcd7d29ac0261c
|
[
"MIT"
] | null | null | null |
legacy/models/GAT.py
|
astrockragh/IceCube
|
eba09e9f9a3c351dbf05496821bcd7d29ac0261c
|
[
"MIT"
] | null | null | null |
legacy/models/GAT.py
|
astrockragh/IceCube
|
eba09e9f9a3c351dbf05496821bcd7d29ac0261c
|
[
"MIT"
] | 2
|
2021-03-03T20:39:38.000Z
|
2021-06-09T11:58:00.000Z
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dropout, Input
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
from tensorflow.random import set_seed
from spektral.transforms.layer_preprocess import LayerPreprocess
from spektral.layers import GATConv
from spektral.layers.pooling.global_pool import GlobalMaxPool, GlobalAvgPool, GlobalSumPool
from tensorflow.keras.layers import Dense, LeakyReLU, BatchNormalization
from tensorflow.keras.activations import tanh
from tensorflow.sparse import SparseTensor
hidden_states = 16
activation = LeakyReLU(alpha = 0.1)
class model(Model):
def __init__(self, n_out = 4):
super().__init__()
# Define layers of the model
self.att1 = GATConv(hidden_states, attn_heads=2, dropout_rate=0.4, activation = "relu", return_attn_coef=False) #required keywords is channels/hidden states
self.att2 = GATConv(hidden_states//2, attn_heads=3, dropout_rate=0.1, activation = "relu")# attn heads are the time limiting key_word, watch out with it
self.att3 = GATConv(hidden_states*2, attn_heads=4, dropout_rate=0.7, activation = "relu") # hiddenstates has to be pretty low as well
self.Pool1 = GlobalAvgPool() #good results with all three
self.Pool2 = GlobalSumPool()
self.Pool3 = GlobalMaxPool() #important for angle fitting
self.decode = [Dense(size * hidden_states) for size in [16, 8, 4]]
self.norm_layers = [BatchNormalization() for i in range(len(self.decode))]
self.d2 = Dense(n_out)
def call(self, inputs, training = False):
x, a, i = inputs
# a=sp_matrix_to_sp_tensor(a)
LayerPreprocess(self.att1)
LayerPreprocess(self.att2)
# x, alpha = self.att1([x,a])
x = self.att1([x,a])
x = self.att2([x, a])
x = self.att3([x,a])
x1 = self.Pool1([x, i])
x2 = self.Pool2([x, i])
x3 = self.Pool3([x,i])
x = tf.concat([x1, x2, x3], axis = 1)
# x = tf.concat([x1, x2], axis = 1)
# x = tf.concat([x2, x3], axis = 1)
for decode_layer, norm_layer in zip(self.decode, self.norm_layers):
x = activation(decode_layer(x))
x = norm_layer(x, training = training)
x = self.d2(x)
# return x, alpha
# tf.print(tf.shape(x))
return x
| 46.09434
| 164
| 0.669668
| 342
| 2,443
| 4.675439
| 0.365497
| 0.070044
| 0.071295
| 0.013133
| 0.11945
| 0.052533
| 0
| 0
| 0
| 0
| 0
| 0.026842
| 0.222268
| 2,443
| 53
| 165
| 46.09434
| 0.814737
| 0.158821
| 0
| 0
| 0
| 0
| 0.005874
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.302326
| 0
| 0.395349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
8b83777c4cdb8551be6cb2f0840e3a838be9ce71
| 792
|
py
|
Python
|
pyleecan/Methods/Slot/SlotUD2/get_surface_active.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | 95
|
2019-01-23T04:19:45.000Z
|
2022-03-17T18:22:10.000Z
|
pyleecan/Methods/Slot/SlotUD2/get_surface_active.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | 366
|
2019-02-20T07:15:08.000Z
|
2022-03-31T13:37:23.000Z
|
pyleecan/Methods/Slot/SlotUD2/get_surface_active.py
|
IrakozeFD/pyleecan
|
5a93bd98755d880176c1ce8ac90f36ca1b907055
|
[
"Apache-2.0"
] | 74
|
2019-01-24T01:47:31.000Z
|
2022-02-25T05:44:42.000Z
|
from numpy import arcsin, exp
from ....Classes.Segment import Segment
from ....Classes.Arc1 import Arc1
from ....Classes.SurfLine import SurfLine
def get_surface_active(self, alpha=0, delta=0):
"""Return the full winding surface
Parameters
----------
self : SlotUD2
A SlotUD2 object
alpha : float
float number for rotation (Default value = 0) [rad]
delta : complex
complex number for translation (Default value = 0)
Returns
-------
surf_wind: Surface
Surface corresponding to the Winding Area
"""
st = self.get_name_lam()
surface = self.active_surf.copy()
surface.label = "Wind_" + st + "_R0_T0_S0"
# Apply transformation
surface.rotate(alpha)
surface.translate(delta)
return surface
| 22.628571
| 59
| 0.64899
| 96
| 792
| 5.25
| 0.541667
| 0.065476
| 0.051587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018456
| 0.247475
| 792
| 34
| 60
| 23.294118
| 0.827181
| 0.42298
| 0
| 0
| 0
| 0
| 0.035176
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.363636
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
8b8fdfbcf9706a09172ee59fba63e07828a65599
| 2,492
|
py
|
Python
|
tests/array_creation.py
|
manopapad/legate.numpy
|
896f4fd9b32db445da6cdabf7b78d523fca96936
|
[
"Apache-2.0"
] | null | null | null |
tests/array_creation.py
|
manopapad/legate.numpy
|
896f4fd9b32db445da6cdabf7b78d523fca96936
|
[
"Apache-2.0"
] | null | null | null |
tests/array_creation.py
|
manopapad/legate.numpy
|
896f4fd9b32db445da6cdabf7b78d523fca96936
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import legate.numpy as lg
def test():
x = lg.array([1, 2, 3])
y = np.array([1, 2, 3])
z = lg.array(y)
assert np.array_equal(x, z)
assert x.dtype == z.dtype
xe = lg.empty((2, 3))
ye = np.empty((2, 3))
assert lg.shape(xe) == np.shape(ye)
assert xe.dtype == ye.dtype
xz = lg.zeros((2, 3))
yz = np.zeros((2, 3))
assert np.array_equal(xz, yz)
assert xz.dtype == yz.dtype
xo = lg.ones((2, 3))
yo = np.ones((2, 3))
assert np.array_equal(xo, yo)
assert xo.dtype == yo.dtype
xf = lg.full((2, 3), 3)
yf = np.full((2, 3), 3)
assert np.array_equal(xf, yf)
assert xf.dtype == yf.dtype
xel = lg.empty_like(x)
yel = np.empty_like(y)
assert lg.shape(xel) == np.shape(yel)
assert xel.dtype == yel.dtype
xzl = lg.zeros_like(x)
yzl = np.zeros_like(y)
assert np.array_equal(xzl, yzl)
assert xzl.dtype == yzl.dtype
xol = lg.ones_like(x)
yol = np.ones_like(y)
assert np.array_equal(xol, yol)
assert xol.dtype == yol.dtype
xfl = lg.full_like(x, 3)
yfl = np.full_like(y, 3)
assert np.array_equal(xfl, yfl)
assert xfl.dtype == yfl.dtype
x = lg.arange(10)
y = np.arange(10)
assert np.array_equal(x, y)
assert x.dtype == y.dtype
x = lg.arange(10, dtype=np.int32)
y = np.arange(10, dtype=np.int32)
assert np.array_equal(x, y)
assert x.dtype == y.dtype
x = lg.arange(2.0, 10.0)
y = np.arange(2.0, 10.0)
assert np.array_equal(x, y)
assert x.dtype == y.dtype
x = lg.arange(2, 30, 3)
y = np.arange(2, 30, 3)
assert np.array_equal(x, y)
assert x.dtype == y.dtype
# xfls = lg.full_like(x, '3', dtype=np.str_)
# yfls = np.full_like(y, '3', dtype=np.str_)
# assert(lg.array_equal(xfls, yfls))
# assert(xfls.dtype == yfls.dtype)
return
if __name__ == "__main__":
test()
| 25.428571
| 74
| 0.620385
| 428
| 2,492
| 3.537383
| 0.261682
| 0.055482
| 0.094452
| 0.130779
| 0.2893
| 0.176354
| 0.119551
| 0.119551
| 0.119551
| 0.119551
| 0
| 0.034682
| 0.236356
| 2,492
| 97
| 75
| 25.690722
| 0.760904
| 0.28451
| 0
| 0.135593
| 0
| 0
| 0.004535
| 0
| 0
| 0
| 0
| 0
| 0.440678
| 1
| 0.016949
| false
| 0
| 0.033898
| 0
| 0.067797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8b9488221b16161dd9b114f9406bbcd8771818fb
| 11,341
|
py
|
Python
|
nilmtk/simulate/gaussianstatemachines.py
|
BaluJr/energytk
|
e9b06bcb43a40010ccc40a534a7067ee520fb3a7
|
[
"Apache-2.0"
] | 3
|
2018-11-09T16:13:32.000Z
|
2019-11-10T20:10:10.000Z
|
nilmtk/simulate/gaussianstatemachines.py
|
BaluJr/energytk
|
e9b06bcb43a40010ccc40a534a7067ee520fb3a7
|
[
"Apache-2.0"
] | null | null | null |
nilmtk/simulate/gaussianstatemachines.py
|
BaluJr/energytk
|
e9b06bcb43a40010ccc40a534a7067ee520fb3a7
|
[
"Apache-2.0"
] | 1
|
2018-10-24T10:28:52.000Z
|
2018-10-24T10:28:52.000Z
|
import numpy as np
import pandas as pd
from nilmtk import TimeFrame
import datetime
import matplotlib.pyplot as plt
# transients['active transition'].cumsum().resample('2s', how='ffill').plot()
class GaussianStateMachines(object):
"""
This class is a basic simulator, which creates sample loads by randomizing signatures
of some predefined statemachine appliances.
The randomization is performed by a perfect gaussian distribution, whose stddev can be
defined.
The signatures of all the appliances are superimposed to yield a final load profile.
"""
def simulate(self, output_datastore, appliance_specs = None, duration = 8640000, seed = None):
'''
Performs the simulation of a defined interval of load profile.
The style of the output is heavily linked to the EventbasedCombination
disaggregator.
The target file is filled with submeters for each appliance and a single
site_meter.
Parameters
----------
appliance_specs:
The specification of the appliances. See the default appliances
created in the constructor to have a description of the default
format
target_file: str
The path to the file where the powerflow shall be created.
duration: pd.Timedelta
Circa duration of the created load profile.
Default 100 days
seed: int
The seed number used within the random generation. This allows to
genrate the same load profile multiple times. If kept None,
random seed is used.
Returns
-------
transients: pd.DataFrame
The transients of the load profile
steady_states: pd.DataFrame
The steady states of the load profile
'''
np.random.seed(seed=seed)
specs =[[((2000, 20, 15), (20, 6, 10)), ((-2000, 10, 15), (10, 3, 10))], # Heater 1
[((1500, 40, 15), (10, 6, 10)), ((-1500, 10, 15), (10, 2, 10))], # Heater 2
[((130, 10, 90), (10, 5, 30)), ((-130, 10, 300), (10, 6, 50))], # Fridge
[((80, 0, 4*60*60),(10, 0.01, 60*60*2)), ((-80, 0.0, 10),(10, 0.01, 10))], # Lamp
[((40, 0, 50), (6, 2, 10)), ((120, 0, 40), (15, 2, 10)), ((-160, 10, 200), (10, 1, 30))], # Complex1
[((100, 0, 10*60), (10, 0.1, 80)), ((-26, 0, 180), (5, 0.1, 50)), ((-74,5, 480), (15,1,50))], # Complex2
[((320, 0, 60*2), (10, 3, 10)), ((-40, 0, 180), (5, 0.1, 50)), ((-100,5, 480), (15,1,50)), ((-180,5, 480), (15,1,50))]] # Complex3
# Breaks as appearances, break duration in Minutes, stddev
break_spec = [[5, 300, 10], [4, 600, 10], [7, 2*60,10], [1, 60*12, 180], [4, 60, 10], [2, 60, 10], [2, 60*6, 60*60]]
#for i, bs in enumerate(break_spec):
# bs[0] = bs[0]*len(specs[i])
appliance_names = ['Heater1', 'Heater2', 'Fridge', 'Lamp', 'Complex 1', 'Complex 2', "Complex 3"]
# Generate powerflow for each appliance
appliances = []
for i, spec in enumerate(specs):
avg_activation_duration = sum(map(lambda e: e[0][-1], spec))
avg_batch_duration = avg_activation_duration * break_spec[i][0] + (break_spec[i][1]*60)
num_batches = duration // avg_batch_duration
activations_per_batch = break_spec[i][0]
events_per_batch = len(spec) * activations_per_batch
flags = []
for flag_spec in spec:
flags.append(np.random.normal(flag_spec[0], flag_spec[1], (num_batches * activations_per_batch, 3)))
flags = np.hstack(flags)
# Take care that fits exactly to 5
cumsum = flags[:,:-3:3].cumsum(axis=1) # 2d vorrausgesetzt np.add.accumulate
flags[:,:-3:3][cumsum < 5] += 5 - cumsum[cumsum < 5]
flags[:,-3] = -flags[:,:-3:3].sum(axis=1) # 2d vorrausgesetzt
flags = flags.reshape((-1,3))
# Put appliance to the input format
appliance = pd.DataFrame(flags, columns=['active transition', 'spike', 'starts'])
num_batches_exact = len(appliance)//events_per_batch
breaks = np.random.normal(break_spec[i][1],break_spec[i][2], num_batches_exact)
appliance.loc[events_per_batch-1::events_per_batch,'starts'] += (breaks * 60)#num_breaks*break_spec[i][0]
appliance.index = pd.DatetimeIndex(appliance['starts'].clip(lower=5).shift().fillna(i).cumsum()*1e9, tz='utc')
appliance['ends'] = appliance.index + pd.Timedelta('1s') # Werden eh nicht benutzt, muss kuerzer sein als der clip
appliance.drop(columns=['starts'], inplace=True)
appliance.loc[appliance['active transition'] < 0, 'signature'] = appliance['active transition'] - appliance['spike']
appliance.loc[appliance['active transition'] >= 0, 'signature'] = appliance['active transition'] + appliance['spike']
appliance['original_appliance'] = i
appliances.append(appliance[:])
# Create the overall powerflow as mixture of single appliances
transients = pd.concat(appliances, verify_integrity = True)
transients = transients.sort_index()
# Write into file
building_path = '/building{}'.format(1)
for appliance in range(len(appliances)):
key = '{}/elec/meter{:d}'.format(building_path, appliance + 2)
data = appliances[appliance]['active transition'].append(pd.Series(0, name='power active', index=appliances[appliance]['active transition'].index - pd.Timedelta('0.5sec')))
data = pd.DataFrame(data.sort_index().cumsum())
data.columns = pd.MultiIndex.from_tuples([('power', 'active')], names=['physical_quantity', 'type'])
output_datastore.append(key, data)
overall = transients['active transition'].append(pd.Series(0, name='power active', index=transients['active transition'].index - pd.Timedelta('0.5sec')))
overall = pd.DataFrame(overall.sort_index().cumsum())
overall.columns = pd.MultiIndex.from_tuples([('power', 'active')], names=['physical_quantity', 'type'])
output_datastore.append('{}/elec/meter{:d}'.format(building_path, 1), overall)
num_meters = len(appliances) + 1
# Write the metadata
timeframe = TimeFrame(start = transients.index[0], end = transients.index[-1])
self._save_metadata_for_disaggregation(output_datastore, timeframe, num_meters, appliance_names)
# The immediate result
steady_states = transients[['active transition']].cumsum().rename(columns={'active transition':'active average'})
steady_states[['active average']] += 60
transients = transients[['active transition', 'spike', 'signature', 'ends']]
return transients, steady_states
def _save_metadata_for_disaggregation(self, output_datastore, timeframe, num_meters, appliancetypes):
"""
Stores the metadata within the storage.
REMINDER: Also urpruenglich wollte ich das anders machen und eben auch die Metadatan mit abspeichern.
Habe ich aus zeitgruenden dann gelassen und mache es doch so wie es vorher war.
This function first checks whether there are already metainformation in the file.
If zes, it extends them and otherwise it removes them.
Note that `self.MODEL_NAME` needs to be set to a string before
calling this method. For example, we use `self.MODEL_NAME = 'CO'`
for Combinatorial Optimisation.
TODO:`preprocessing_applied` for all meters
TODO: submeter measurement should probably be the mains
measurement we used to train on, not the mains measurement.
Parameters
----------
output_datastore : nilmtk.DataStore subclass object
The datastore to write metadata into.
timeframe : list of nilmtk.TimeFrames or nilmtk.TimeFrameGroup
The TimeFrames over which this data is valid for.
num_meters : [int]
Required if `supervised=False`, Gives for each phase amount of meters
appliancetypes: [str]
The names for the different appliances. Is used in plots and error metric
tables.
"""
# Global metadata
meter_devices = {
'synthetic' : {
'model': "Synth",
'sample_period': 0, # Makes it possible to use special load functionality
'max_sample_period': 1,
'measurements': [{
'physical_quantity': 'power',
'type': 'active'
}]
}}
date_now = datetime.datetime.now().isoformat().split('.')[0]
dataset_metadata = {
'name': "Synthetic Gaussian Statemachine",
'date': date_now,
'meter_devices': meter_devices,
'timeframe': timeframe.to_dict()
}
output_datastore.save_metadata('/', dataset_metadata)
# Building metadata always stored for the new buildings
phase_building = 1
building_path = '/building{}'.format(phase_building)
mains_data_location = building_path + '/elec/meter1'
# Main meter is sum of all single appliances:
elec_meters = {}
elec_meters[1] = {
'device_model': 'synthetic',
'site_meter': True,
'data_location': mains_data_location,
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': timeframe.to_dict()
}
}
def update_elec_meters(meter_instance):
elec_meters.update({
meter_instance: {
'device_model': 'synthetic', # self.MODEL_NAME,
'submeter_of': 1,
'data_location': (
'{}/elec/meter{}'.format(
building_path, meter_instance)),
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': timeframe.to_dict()
}
}
})
# Appliances and submeters:
appliances = []
# Submeters (Starts at 2 because meter 1 is mains and 0 not existing)
for chan in range(2, num_meters):
update_elec_meters(meter_instance=chan)
appliance = {
'original_name': appliancetypes[chan-2],
'meters': [chan],
'type': appliancetypes[chan-2],
'instance': chan - 1
}
appliances.append(appliance)
building_metadata = {
'instance': (phase_building),
'elec_meters': elec_meters,
'appliances': appliances,
}
output_datastore.save_metadata(building_path, building_metadata)
| 49.308696
| 184
| 0.571907
| 1,276
| 11,341
| 4.974922
| 0.30721
| 0.032766
| 0.009452
| 0.003308
| 0.137051
| 0.113264
| 0.101607
| 0.08995
| 0.071676
| 0.071676
| 0
| 0.045798
| 0.310731
| 11,341
| 229
| 185
| 49.524017
| 0.766279
| 0.288687
| 0
| 0.088
| 0
| 0
| 0.125825
| 0.005545
| 0
| 0
| 0
| 0.0131
| 0
| 1
| 0.024
| false
| 0
| 0.04
| 0
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8ba05404268d52ee6b9dcf341a6f596fdbe8cb19
| 453
|
py
|
Python
|
test/unit/test_wrapper.py
|
Bowowzahoya/cordis_search
|
8766717fdd785b2768785b9147e63cac62dfbd43
|
[
"MIT"
] | null | null | null |
test/unit/test_wrapper.py
|
Bowowzahoya/cordis_search
|
8766717fdd785b2768785b9147e63cac62dfbd43
|
[
"MIT"
] | null | null | null |
test/unit/test_wrapper.py
|
Bowowzahoya/cordis_search
|
8766717fdd785b2768785b9147e63cac62dfbd43
|
[
"MIT"
] | null | null | null |
import pandas as pd
from context import *
from cordis_search import wrapper as wr
TEST_PROJECTS_FILE = pd.read_csv(RESOURCES_FOLDER+"fp7_test_projects.csv", sep=";")
print(TEST_PROJECTS_FILE)
def test_search():
query = "multiculturalism"
selected_projects = wr.search(TEST_PROJECTS_FILE, query)
assert set(selected_projects.index.to_list()) == set([267583, 287711])
def test_summary():
wr.summary(TEST_PROJECTS_FILE)
test_summary()
| 25.166667
| 83
| 0.766004
| 64
| 453
| 5.125
| 0.5
| 0.182927
| 0.195122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032911
| 0.128035
| 453
| 18
| 84
| 25.166667
| 0.797468
| 0
| 0
| 0
| 0
| 0
| 0.0837
| 0.046256
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.166667
| false
| 0
| 0.25
| 0
| 0.416667
| 0.083333
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8ba29d9fb01d60102507bf5db6c09210143677fa
| 3,471
|
py
|
Python
|
release/util/splunkbase_releaser.py
|
xynazog/amp4e_splunk_events_input
|
a5bb57cf82ca3e96fa9a444e5e5e9789eb16b70b
|
[
"BSD-2-Clause"
] | 9
|
2017-07-31T16:13:51.000Z
|
2021-01-06T15:02:36.000Z
|
release/util/splunkbase_releaser.py
|
xynazog/amp4e_splunk_events_input
|
a5bb57cf82ca3e96fa9a444e5e5e9789eb16b70b
|
[
"BSD-2-Clause"
] | 51
|
2017-10-24T17:25:44.000Z
|
2022-03-31T16:47:58.000Z
|
release/util/splunkbase_releaser.py
|
xynazog/amp4e_splunk_events_input
|
a5bb57cf82ca3e96fa9a444e5e5e9789eb16b70b
|
[
"BSD-2-Clause"
] | 12
|
2017-08-01T08:59:39.000Z
|
2021-02-24T21:10:46.000Z
|
import os
from distutils.dir_util import copy_tree
from invoke import task, run
import shutil
class SplunkbaseReleaser:
DIRS_TO_ARCHIVE = ['appserver', 'bin', 'certs', 'default', 'metadata', 'README', 'static']
APP_NAME = 'amp4e_events_input'
PATH_TO_PYTHON_LIBS = '/opt/splunk/lib/python3.7/site-packages'
PYTHON_LIBS_TO_ARCHIVE = ['splunklib', 'pika']
EXCLUDED_FILES = ['local.meta', 'requirements-splunk.txt', '*.pyc', '*.pyo']
SPLUNKBASE_README = 'README_SPLUNKBASE.md'
LICENSE = 'LICENSE'
def __init__(self, app_dir):
print(app_dir);
self.app_dir = app_dir
@property
def _tmp_dir(self):
return os.path.join('/tmp')
@property
def _tmp_app_dir(self):
return os.path.join(self._tmp_dir, self.APP_NAME)
@property
def _readme_splunkbase_path(self):
return os.path.join(self.app_dir, self.SPLUNKBASE_README)
@property
def _license_path(self):
return os.path.join(self.app_dir, self.LICENSE)
@property
def _excluded_files_arguments(self):
return ' '.join(map(lambda f: "--exclude='{}'".format(f), self.EXCLUDED_FILES))
@property
def _release_file_path(self):
return os.path.join(self.app_dir, 'release', '{}.spl'.format(self.APP_NAME))
def __call__(self):
self.prepare()
self.copy_dirs()
self.copy_python_libs()
self.make_bin_dir_executable()
self.copy_splunk_readme()
self.copy_license()
self.create_archive()
self._remove_tmp_app_dir()
def prepare(self):
self._remove_tmp_app_dir()
self._remove_release_file()
self._create_tmp_app_dir()
def copy_python_libs(self):
for l in self.PYTHON_LIBS_TO_ARCHIVE:
dest_dir = os.path.join(self._tmp_app_dir, 'bin', l)
if os.path.isdir(dest_dir):
invoke.Exit('The directory {} already exists and conflicts with a native Python package. ' \
'Please rename or delete it.'.format(dest_dir))
else:
copy_tree(os.path.join(self.PATH_TO_PYTHON_LIBS, l), dest_dir)
def make_bin_dir_executable(self):
for root, dirs, files in os.walk(os.path.join(self._tmp_app_dir, 'bin')):
for f in files:
os.chmod(os.path.join(root, f), 0o755)
for d in dirs:
os.chmod(os.path.join(root, d), 0o755)
def create_archive(self):
print("CREATING FILE")
run("tar -czf {} {} -C {} {}"
.format(self._release_file_path, self._excluded_files_arguments, self._tmp_dir, self.APP_NAME))
def _remove_release_file(self):
if os.path.exists(self._release_file_path):
os.remove(self._release_file_path)
def copy_dirs(self):
for d in self.DIRS_TO_ARCHIVE:
copy_tree(os.path.join(self.app_dir, d), os.path.join(self._tmp_app_dir, d))
def copy_splunk_readme(self, dest_file='README.md'):
shutil.copyfile(self._readme_splunkbase_path, os.path.join(self._tmp_app_dir, dest_file))
def copy_license(self):
shutil.copyfile(self._license_path, os.path.join(self._tmp_app_dir, self.LICENSE))
def _remove_tmp_app_dir(self):
if os.path.isdir(self._tmp_app_dir):
shutil.rmtree(self._tmp_app_dir)
def _create_tmp_app_dir(self):
if not os.path.isdir(self._tmp_app_dir):
os.makedirs(self._tmp_app_dir)
| 34.71
| 109
| 0.645923
| 486
| 3,471
| 4.277778
| 0.228395
| 0.066378
| 0.064935
| 0.074074
| 0.285233
| 0.224627
| 0.138047
| 0.103896
| 0.05291
| 0.036556
| 0
| 0.004131
| 0.232786
| 3,471
| 99
| 110
| 35.060606
| 0.776568
| 0
| 0
| 0.1
| 0
| 0
| 0.106598
| 0.017862
| 0
| 0
| 0
| 0
| 0
| 1
| 0.225
| false
| 0
| 0.05
| 0.075
| 0.45
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8ba8ed599cf94b78021c68a977f2d000df6dcd16
| 2,476
|
py
|
Python
|
aiida_jutools/__init__.py
|
PhilippRue/aiida-jutools
|
66070e7077cb454bcfd70dd3327b335499556a16
|
[
"MIT"
] | 5
|
2020-12-11T13:32:09.000Z
|
2022-01-19T08:36:40.000Z
|
aiida_jutools/__init__.py
|
PhilippRue/aiida-jutools
|
66070e7077cb454bcfd70dd3327b335499556a16
|
[
"MIT"
] | 7
|
2021-01-28T10:24:13.000Z
|
2021-08-18T13:42:47.000Z
|
aiida_jutools/__init__.py
|
PhilippRue/aiida-jutools
|
66070e7077cb454bcfd70dd3327b335499556a16
|
[
"MIT"
] | 5
|
2020-12-07T17:13:38.000Z
|
2021-11-25T09:58:48.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=unused-import
###############################################################################
# Copyright (c), Forschungszentrum Jülich GmbH, IAS-1/PGI-1, Germany. #
# All rights reserved. #
# This file is part of the aiida-jutools package. #
# (AiiDA JuDFT tools) #
# #
# The code is hosted on GitHub at https://github.com/judftteam/aiida-jutools. #
# For further information on the license, see the LICENSE.txt file. #
# For further information please visit http://judft.de/. #
# #
###############################################################################
"""AiiDA JuTools.
We recommended to use this library with the import statement ``import aiida_jutools as jutools``. In your code,
you can then call all available tools like so: ``jutools.package.tool()``.
"""
__version__ = "0.1.0-dev1"
# Import all of the library's user packages.
from . import code
from . import computer
from . import group
from . import io
from . import logging
from . import meta
from . import node
from . import plugins
from . import process
from . import process_functions
from . import submit
from . import structure
# # import all of the library's developer packages.
from . import _dev
# Potentially problematic imports:
# - kkr: As soon as aiida-kkr becomes dependent on aiida-jutools, this import MIGHT introduce a circular
# dependencies. A simple test (made aiida-kkr import aiida-jutools) had no such effect. But if it
# occurs, here a few potential solutions:
# - Decouple the kkr package = remove from import list above. Then all code using it must be updated
# to import it separately, like from aiida_jutools import kkr as _jutools_kkr. Might break external code.
# - Hide all aiida-kkr imports = in resp. module, move them inside the tools that use them. If it works,
# this might be a solution that does not break external code.
#
# The potential problem and the solution stated above, if it becomes one, applies to other JuDFTTeam plugins as well,
# should they start using aiida-jutools as common codebase (aiida-fleur, aiida-spirit, aiida-spex, ...).
| 51.583333
| 117
| 0.586026
| 298
| 2,476
| 4.83557
| 0.503356
| 0.097155
| 0.029146
| 0.019431
| 0.030534
| 0.030534
| 0
| 0
| 0
| 0
| 0
| 0.003944
| 0.283118
| 2,476
| 47
| 118
| 52.680851
| 0.807887
| 0.784733
| 0
| 0
| 0
| 0
| 0.029851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.928571
| 0
| 0.928571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
8baf2837359bef97c791a3fa5aa72048d1181a43
| 590
|
py
|
Python
|
examples/parallel_spectra.py
|
zhaonat/py-maxwell-fd3d
|
bfa4fb826401b98371fdd9306c5fee2e74e7e545
|
[
"MIT"
] | 3
|
2022-01-21T03:53:25.000Z
|
2022-01-23T04:54:43.000Z
|
examples/parallel_spectra.py
|
Guowu-Mcgill/py-maxwell-fd3d
|
bfa4fb826401b98371fdd9306c5fee2e74e7e545
|
[
"MIT"
] | null | null | null |
examples/parallel_spectra.py
|
Guowu-Mcgill/py-maxwell-fd3d
|
bfa4fb826401b98371fdd9306c5fee2e74e7e545
|
[
"MIT"
] | 1
|
2022-01-23T04:54:47.000Z
|
2022-01-23T04:54:47.000Z
|
import os,sys
Nthread = 1
os.environ["OMP_NUM_THREADS"] = str(Nthread) # export OMP_NUM_THREADS=1
os.environ["OPENBLAS_NUM_THREADS"] = str(Nthread) # export OPENBLAS_NUM_THREADS=1
os.environ["MKL_NUM_THREADS"] = str(Nthread) # export MKL_NUM_THREADS=1
os.environ["VECLIB_MAXIMUM_THREADS"] = str(Nthread) # export VECLIB_MAXIMUM_THREADS=1
os.environ["NUMEXPR_NUM_THREADS"] = str(Nthread) # export NUMEXPR_NUM_THREADS=1
## generate spectrum, which requires several simulations...using mip4py
# test system will initially be a fabry-perot slab, since the spectrum is analytically determinable
| 59
| 99
| 0.798305
| 88
| 590
| 5.125
| 0.431818
| 0.177384
| 0.110865
| 0.254989
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0.098305
| 590
| 10
| 99
| 59
| 0.834586
| 0.520339
| 0
| 0
| 0
| 0
| 0.330909
| 0.08
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8bb228a7e4380e3d2e78dff77a6e5e75257a88f3
| 1,109
|
py
|
Python
|
main.py
|
Zlobin/wp-updater
|
e10ebbb7ddd2a2398c7a660d134ce8598738fe7d
|
[
"MIT"
] | null | null | null |
main.py
|
Zlobin/wp-updater
|
e10ebbb7ddd2a2398c7a660d134ce8598738fe7d
|
[
"MIT"
] | null | null | null |
main.py
|
Zlobin/wp-updater
|
e10ebbb7ddd2a2398c7a660d134ce8598738fe7d
|
[
"MIT"
] | null | null | null |
# python3 main.py
import urllib.request
import zipfile
import os
import shutil
# @TODO change it
# eg.: /var/www/blog
OLD_WP_PATH = ''
NEW_WP_PATH_TMP = ''
if not (os.path.exists(OLD_WP_PATH)) or not (os.path.exists(NEW_WP_PATH_TMP)):
os._exit(0)
WP_URL = 'http://wordpress.org/latest.zip'
EXTRACTED_NAME = 'wordpress'
NEW_WP_PATH = os.path.join(NEW_WP_PATH_TMP, EXTRACTED_NAME)
# Download the file from url, save it in a temporary directory and get the
# path to it (e.g. '/tmp/tmpb43hma') in the `wp_archve` variable:
wp_archive, headers = urllib.request.urlretrieve(WP_URL)
with zipfile.ZipFile(wp_archive, 'r') as zf:
zf.extractall(NEW_WP_PATH_TMP)
os.remove(wp_archive)
# Remove new files
shutil.rmtree(os.path.join(NEW_WP_PATH, 'wp-content'))
os.remove(os.path.join(NEW_WP_PATH, 'readme.html'))
# Copy content to the new WP
shutil.copy2(os.path.join(OLD_WP_PATH, 'wp-config.php'), NEW_WP_PATH)
shutil.copytree(os.path.join(OLD_WP_PATH, 'wp-content'), os.path.join(NEW_WP_PATH, 'wp-content'))
shutil.rmtree(OLD_WP_PATH)
shutil.copytree(NEW_WP_PATH, OLD_WP_PATH)
shutil.rmtree(NEW_WP_PATH)
| 27.04878
| 97
| 0.757439
| 199
| 1,109
| 3.984925
| 0.361809
| 0.128625
| 0.124842
| 0.06053
| 0.218159
| 0.171501
| 0.123581
| 0.070618
| 0
| 0
| 0
| 0.005045
| 0.106402
| 1,109
| 40
| 98
| 27.725
| 0.795156
| 0.208296
| 0
| 0
| 0
| 0
| 0.109195
| 0
| 0
| 0
| 0
| 0.025
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8bb63310aa365a92b778a469e0b31fbf329b889f
| 7,711
|
py
|
Python
|
v0/aia_eis_v0/circuits/circuit_pack_01.py
|
DreamBoatOve/aia_eis
|
458b4d29846669b10db4da1b3e86c0b394614ceb
|
[
"MIT"
] | 1
|
2022-03-02T12:57:19.000Z
|
2022-03-02T12:57:19.000Z
|
v0/aia_eis_v0/circuits/circuit_pack_01.py
|
DreamBoatOve/aia_eis
|
458b4d29846669b10db4da1b3e86c0b394614ceb
|
[
"MIT"
] | null | null | null |
v0/aia_eis_v0/circuits/circuit_pack_01.py
|
DreamBoatOve/aia_eis
|
458b4d29846669b10db4da1b3e86c0b394614ceb
|
[
"MIT"
] | null | null | null |
from circuits.elements import ele_C as C
from circuits.elements import ele_L as L
from circuits.elements import ele_Warburg as WB
from circuits.elements import ele_Q as Q
"""
支惠在原始的几个电路基础上新加了很多简单的单路,之后有空再去合并
"""
"""
Define all the circuits used in this project
Python 电路模型函数名 命名规则:
‘a’ == ‘(’; ‘b’ == ‘)’,直接用字母a替代左括号,用字母b替代右括号
Circuit(ECM) No. CDC Function
0 R(CR) RaCRb, Simplified Randles Cell
0 R0aC0R1b
1 R(QR) RaQRb
1 R(QR) R0aQ0R1b
2 R(QR)(QR) RaQRbaQRb
2 R(QR)(QR) R0aQ0R1baQ1R2b
3 R(QR(LR)) RaQRaLRbb
3 R(QR(LR)) R0aQ0R1aL0R2bb
4 R(Q(RW)) RaQaRWbb
4 R(Q(RW)) R0aQ0aR1W0bb
5 R(QR)(QR)W RaQRbaQRbW
5 R(QR)(QR)W R0aQ0R1baQ1R2bW0
6 R(QR)(Q(RW)) RaQRbaQaRWbb
6 R(QR)(Q(RW)) R0aQ0R1baQ1aR2W0bb
7 R(QR)W RaQRbW
7 R(QR)W R0aQ0R1bW0
8 R(Q(RW))Q RaQaRWbbQ
8 R(Q(RW))Q R0aQ0aR1W0bbQ1
9 R(Q(R(QR))) RaQaRaQRbbb
9 R(Q(R(QR))) R0aQ0aR1aQ1R2bbb
Q_pair = (q, n) or [q, n]
q: CPE coefficient, Constant phase element [s^n/ohm]
n: Constant phase elelment exponent [-]
WB_sigma: warburg coefficient
"""
# ECM-0 R(CR)
# ECM-0 R0(C0R1)
def RaCRb(w, R0, R1, C0):
# RaCRb == R0aC0R1b, Simplified Randles Cell
z = R0 + 1 / (1 / R1 + 1j * w * C0)
return z
# ECM-1 R(QR), already include ECM-0, when n = 1
def RaQRb(w, R0, Q0_pair, R1):
z = R0 + 1 / ((1 / R1) + (1 / Q(w, q = Q0_pair[0], n = Q0_pair[1])))
return z
# ECM-2 R(QR)(QR)
def RaQRbaQRb(w, R0, Q0_pair, R1, Q1_pair, R2):
z = R0 \
+ 1 / ((1 / R1) + (1 / Q(w, q = Q0_pair[0], n = Q0_pair[1])))\
+ 1 / ((1 / R2) + (1 / Q(w, q = Q1_pair[0], n = Q1_pair[1])))
return z
# ECM-3 R(QR(LR))
def RaQRaLRbb(w, R0, Q0_pair, R1, L0, R2):
z = R0 + 1 / ((1 / Q(w, q=Q0_pair[0], n=Q0_pair[1])) + (1 / R1) + (1 / (L(w, L0) + R2)))
return z
# ECM-4 R(Q(RW))
def RaQaRWbb(w, R0, Q0_pair, R1, W0):
z = R0 + 1 / ((1 / Q(w, q=Q0_pair[0], n=Q0_pair[1])) + (1 / (R1 + WB(w, sigma=W0))))
return z
# ECM-5 R(QR)(QR)W
def RaQRbaQRbW(w, R0, Q0_pair, R1, Q1_pair, R2, W0):
z = R0 + 1 / ((1 / Q(w, q=Q0_pair[0], n=Q0_pair[1])) + (1/R1)) \
+ 1 / ((1 / Q(w, q=Q1_pair[0], n=Q1_pair[1])) + (1/R2)) \
+ WB(w, sigma=W0)
return z
# ECM-6 R(QR)(Q(RW))
def RaQRbaQaRWbb(w, R0, Q0_pair, R1, Q1_pair, R2, W0):
z = R0 + 1 / ((1 / Q(w, q=Q0_pair[0],n =Q0_pair[1])) + (1/R1)) \
+ 1 / ((1 / Q(w, q=Q1_pair[0],n=Q1_pair[1])) + (1/(R2 + WB(w, sigma=W0))))
return z
# ECM-7 R(QR)W
def RaQRbW(w, R0, Q0_pair, R1, W0):
z = R0 + 1 / ((1 / Q(w, q=Q0_pair[0], n=Q0_pair[1])) + (1/R1)) + WB(w, sigma=W0)
return z
# ECM-8 R(Q(RW))Q
def RaQaRWbbQ(w, R0, Q0_pair, R1, W0, Q1_pair):
z = R0 + 1 / ((1 / Q(w, q=Q0_pair[0],n=Q0_pair[1])) + (1 / (R1 + WB(w, sigma=W0)))) \
+ Q(w, q=Q1_pair[0], n=Q1_pair[1])
return z
# ECM-9 R(Q(R(QR)))
def RaQaRaQRbbb(w, R0, Q0_pair, R1, Q1_pair, R2):
z = R0 + 1 / ((1 / Q(w, q=Q0_pair[0], n=Q0_pair[1])) + ( 1 / ( R1 + ( 1 / ( 1/Q(w, q=Q1_pair[0],n=Q1_pair[1]) + 1/R2)))) )
return z
# ------------------ ECMs are not numbered ------------------
# DPFC: ECM-10 R0(C0R1)(C1(R2W0))
def RaCRbaCaRWbb(w, R0, C0, R1, C1, R2, W0):
z = R0 + 1/(1/R1 + 1j * w * C0) + 1 / (1j * w * C1 + 1/(R2 + WB(w, sigma=W0)))
return z
# DPFC: ECM-11 R0(C0R1(R2W0))(Q0R3)
def RaCRaRWbbaQRb(w, R0, C0, R1, R2, W0, Q0_pair, R3):
z = R0 + 1 / (1j * w * C0 + 1/R1 + 1 / (R2 + WB(w, sigma=W0)) ) + 1 / (1 / Q(w, q=Q0_pair[0], n=Q0_pair[1]) + 1/R3)
return z
# ------------------ ECMs are not numbered ------------------
#NEW RULE: ecm_2(two element)_001(Sequence)
#ecm_2_001 R0R1
def RR(R0,R1):
z = R0 + R1
return z
#ecm_2_002 (R0R1)
def aRRb(R0,R1):
z = 1 / (1 / R0 + 1 / R1)
return z
#ecm_2_003 R0L0
def RL(w,R0,L0):
z = R0 + L(w, L0)
return z
#ecm_2_004 (R0L0)
def aRLb(w,R0,L0):
z = 1 / (1 / R0 + 1/(L(w, L0) ))
return z
#ecm_2_005 R0C0
def RC(w, R0, C0):
z = R0 + 1 / 1j * w * C0
return z
#ecm_2_006 (R0C0)
def aRCb(w, R0, C0):
z = 1 / (1 / R0 + 1j * w * C0)
return z
#ecm_3_001 R0R1R2
def RRR(R0,R1,R2):
z = R0 + R1 + R2
return z
#ecm_3_002 R0(R1R2)
def RaRRb(R0,R1,R2):
z = 1 / (1 / R1 + 1 / R2) + R0
return z
#ecm_3_003 (R0R1R2)
def aRRRb(R0,R1,R2):
z = 1 / (1 / R0 + 1 / R1 + 1 / R2)
return z
#ecm_3_004 R0R1L0
def RRL(w, R0,R1,L0):
z = R0 + R1 + L(w, L0)
return z
#ecm_3_005 R0(R1L0)
def RaRLb(w,R0,R1,L0):
z = R0 + 1 / (1 / L(w, L0) + 1 / R1)
return z
#ecm_3_006 (R0R1)L0
def aRRbL(w,R0,R1,L0):
z = 1 / (1 / R0 + 1 / R1) + L(w, L0)
return z
#ecm_3_007 (R0R1L0)
def aRRLb(w,R0,R1,L0):
z = 1 / (1 / R0 + 1 / R1 + 1 / L(w, L0))
return z
#ecm_3_008 R0L0L1
def RLL(w,R0,L0,L1):
z = R0 + L(w, L0) + L(w, L1)
return z
#ecm_3_009 R0(L0L1)
def RaLLb(w,R0,L0,L1):
z = R0 + 1/(1 / L(w, L0) + 1 / L(w, L1))
return z
#ecm_3_010 (R0L0L1)
def aRLLb(w,R0,L0,L1):
z = 1 / (1 / L(w, L0) + 1 / L(w, L1) + 1 / R0)
return z
#ecm_3_011 (R0L0)L1
def aRLbL(w,R0,L0,L1):
z = 1 / (1 / L(w, L0) + 1 / R0) + L(w, L1)
return z
#ecm_3_012 R0R1C0
def RRC(w, R0,R1,C0):
z = R0 + R1 + 1 / 1j * w * C0
return z
#ecm_3_013 (R0R1)C0
def aRRbC(w, R0,R1,C0):
z = 1 / (1 / R0 + 1 / R1) + 1 / 1j * w * C0
return z
#ecm_3_014 R0(R1C0)
def RaRCb(w, R0,R1,C0):
z = 1 / (1 / R1 + 1j * w * C0) + R0
return z
#ecm_3_015 (R0R1C0)
def aRRCb(w, R0,R1,C0):
z = 1 / (1 / R0 + 1 / R1 + 1j * w * C0)
return z
#ecm_3_016 R0C0C1
def RCC(w, R0,C0,C1):
z = R0 + 1 / 1j * w * C1 + 1 / 1j * w * C0
return z
#ecm_3_017 (R0C0)C1
def aRCbC(w, R0,C0,C1):
z = 1 / (1 / R0 + 1j * w * C0) + 1 / 1j * w * C1
return z
#ecm_3_018 R0(C0C1)
def RaCCb(w, R0,C0,C1):
z = R0 + 1 / (1j * w * C0 + 1j * w * C1)
return z
#ecm_3_019 (R0C0C1)
def aRCCb(w, R0,C0,C1):
z = 1 / (1 / R0 + 1j * w * C0 + 1j * w * C1)
return z
#ecm_3_020 R0R1Q0
def RRQ(w, R0,R1,Q0_pair):
z = R0 + R1 + Q(w, q = Q0_pair[0], n = Q0_pair[1])
return z
#ecm_3_021 (R0R1)Q0
def aRRbQ(w, R0, R1, Q0_pair):
z = 1 / (1 / R0 + 1 / R1) + Q(w, q = Q0_pair[0], n = Q0_pair[1])
return z
#ecm_3_022 R0(R1Q0)
def RaRQb(w, R0,R1,Q0_pair):
z = R0 + 1 / (1 / R1 + 1 / Q(w, q = Q0_pair[0], n = Q0_pair[1]))
return z
#ecm_3_023 (R0R1Q0)
def aRRQb(w, R0,R1,Q0_pair):
z = 1 / (1 / R0 + 1 / R1 + 1 / Q(w, q = Q0_pair[0], n = Q0_pair[1]))
return z
#ecm_3_024 RQ0Q1
def RQQ(w, R0,Q0_pair,Q1_pair):
z = R0 + Q(w, q = Q0_pair[0], n = Q0_pair[1]) + Q(w, q = Q1_pair[0], n = Q1_pair[1])
return z
#ecm_3_025 (R0Q0)Q1
def aRQbQ(w, R0, Q0_pair, Q1_pair):
z = 1 / (1 / R0 + 1 / Q(w, q = Q0_pair[0], n = Q0_pair[1])) + Q(w, q = Q1_pair[0], n = Q1_pair[1])
return z
#ecm_3_026 R(Q0Q1)
def RaQQb(w, R0, Q0_pair, Q1_pair):
z = R0 + 1 / (1 / Q(w, q = Q1_pair[0], n = Q1_pair[1]) + 1 / Q(w, q = Q0_pair[0], n = Q0_pair[1]))
return z
#ecm_3_027 CCQ
#ecm_3_028 C(CQ)
#ecm_3_029 (CCQ)
#ecm_3_030 (CC)Q
#ecm_3_031 CQQ
#ecm_3_032 C(QQ)
#ecm_3_033 Q(CQ)
#ecm_3_034 (CQQ)
# ------------------ ECMs are not numbered ------------------
| 26.317406
| 126
| 0.489171
| 1,479
| 7,711
| 2.440162
| 0.139959
| 0.084788
| 0.113605
| 0.085342
| 0.561929
| 0.468828
| 0.421446
| 0.363536
| 0.319756
| 0.294541
| 0
| 0.164704
| 0.317339
| 7,711
| 293
| 127
| 26.317406
| 0.520897
| 0.155492
| 0
| 0.323944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.309859
| false
| 0
| 0.028169
| 0
| 0.647887
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8bb882a476b7069df7988169620b13ea93893010
| 2,204
|
py
|
Python
|
dj_vercereg/client/vercereg_client.py
|
davidath/dj-vercereg
|
d1ae1dba21cab93c759ecf79346bc60c2d88d7a8
|
[
"Apache-2.0"
] | null | null | null |
dj_vercereg/client/vercereg_client.py
|
davidath/dj-vercereg
|
d1ae1dba21cab93c759ecf79346bc60c2d88d7a8
|
[
"Apache-2.0"
] | null | null | null |
dj_vercereg/client/vercereg_client.py
|
davidath/dj-vercereg
|
d1ae1dba21cab93c759ecf79346bc60c2d88d7a8
|
[
"Apache-2.0"
] | 1
|
2022-03-14T13:33:19.000Z
|
2022-03-14T13:33:19.000Z
|
#!/usr/bin/env python
# Copyright 2014 The University of Edinburgh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import curses
import collections
import argparse
import datetime
import logging
logging.basicConfig()
logger = logging.getLogger('DJREG_CLIENT')
logger.setLevel(logging.INFO)
from vercereg_lib import VerceRegManager
class VerceRegClient:
HISTORY_LENGTH = 5000
history = None
manager = None
def __init__(self):
self.history = collections.deque(maxlen=self.HISTORY_LENGTH)
self.manager = VerceRegManager()
def main():
# TODO: Define and implement commands for the client
# parser = argparse.ArgumentParser(description='Client for the VERCE Registry.')
# parser.add_argument('command', metavar='Command', type=str,
# help='a VERCE Registry command')
manager = VerceRegManager()
manager.login('iraklis', 'iraklis')
logger.info(manager.get_auth_token())
# manager.login('admin', 'admin')
# logger.info(manager.get_auth_token())
# manager.clone(1, 'cloned_wspc'+'@'.join(str(datetime.datetime.now()).split()))
# logger.info(manager.get_pe_spec(1, 'pes', 'MyPE'))
# logger.info(manager.get_pe_spec(1, 'fns', 'Fn1')) # should raise an exception
manager.delete_pe_spec(1, 'libpck', 'LibPE11')
new_pe = manager.register_pe_spec(1, 'libpck', 'LibPE11', descr='Some description for a test PE')
new_conn = manager.add_pe_connection(str(new_pe['id']), kind='IN', name='CnName', stype='str', dtype='DTYPE', comment='My comment', is_array=True, modifiers='one:two')
manager.add_pe_connection(str(new_pe['id']), kind='OUT', name='outconn')
if __name__ == '__main__':
main()
| 32.895522
| 169
| 0.720054
| 302
| 2,204
| 5.125828
| 0.536424
| 0.03876
| 0.043928
| 0.05168
| 0.153747
| 0.127907
| 0.127907
| 0.046512
| 0.046512
| 0
| 0
| 0.011841
| 0.156987
| 2,204
| 67
| 170
| 32.895522
| 0.821313
| 0.501815
| 0
| 0
| 0
| 0
| 0.127442
| 0
| 0
| 0
| 0
| 0.014925
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8bbe4c37ae3a9b71342799d82ae3b600239ac59b
| 2,638
|
py
|
Python
|
src/backends/example_mongodb/database.py
|
rartino/python-optimade-server
|
84457091c7ec0db52a7e034bb6a7cd4bcbdd4e57
|
[
"MIT"
] | null | null | null |
src/backends/example_mongodb/database.py
|
rartino/python-optimade-server
|
84457091c7ec0db52a7e034bb6a7cd4bcbdd4e57
|
[
"MIT"
] | null | null | null |
src/backends/example_mongodb/database.py
|
rartino/python-optimade-server
|
84457091c7ec0db52a7e034bb6a7cd4bcbdd4e57
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2019 Rickard Armiento
#
# This file is part of a Python candidate reference implementation of
# the optimade API [https://www.optimade.org/]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pymongo
import threading
class Database(object):
def __init__(self):
self.client = pymongo.MongoClient()
self.db = self.client.optimade_test
def empty_database(self):
self.client.drop_database("optimade_test")
self.db = self.client.optimade_test
def collection_destroy_if_exists(self, coll):
self.db[coll].remove({})
if coll in self.db.list_collection_names():
self.db[coll].drop()
def insert(self, coll, data):
self.db[coll].insert_one(data)
def insert_many(self, coll, datas):
try:
x = self.db[coll].insert_many(datas)
except pymongo.errors.BulkWriteError as e:
print(e.details)
raise
def find(self, coll, query, projection=None, limit=None):
if projection is None or projection == []:
if limit is None:
return self.db[coll].find(query)
else:
return self.db[coll].find(query).limit(limit)
else:
if limit is None:
return self.db[coll].find(query, dict([(x, 1) for x in projection]))
else:
return self.db[coll].find(query, dict([(x, 1) for x in projection])).limit(limit)
def find_one(self, coll, query):
return self.db[coll].find_one(query)
def close(self):
self.client.close()
| 35.173333
| 97
| 0.674375
| 370
| 2,638
| 4.756757
| 0.418919
| 0.040909
| 0.051136
| 0.045455
| 0.147727
| 0.136364
| 0.136364
| 0.082386
| 0.082386
| 0.082386
| 0
| 0.002979
| 0.236543
| 2,638
| 74
| 98
| 35.648649
| 0.870904
| 0.449962
| 0
| 0.194444
| 0
| 0
| 0.009129
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.055556
| 0.027778
| 0.444444
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8be59cb6314fe38c5f76d083bb061587be243807
| 2,043
|
py
|
Python
|
trend_analyze/test/test_fecth_data_from_api.py
|
popper2710/Trend_Analyze
|
0c98bcd7986bdb2d2b9bdc8022bfa08ddf0e7b0f
|
[
"MIT"
] | null | null | null |
trend_analyze/test/test_fecth_data_from_api.py
|
popper2710/Trend_Analyze
|
0c98bcd7986bdb2d2b9bdc8022bfa08ddf0e7b0f
|
[
"MIT"
] | 2
|
2020-09-26T14:58:33.000Z
|
2021-03-31T20:01:40.000Z
|
trend_analyze/test/test_fecth_data_from_api.py
|
popper2710/Trend_Analyze
|
0c98bcd7986bdb2d2b9bdc8022bfa08ddf0e7b0f
|
[
"MIT"
] | null | null | null |
import unittest
from trend_analyze.config import *
from trend_analyze.src.model import *
from trend_analyze.src.fetch_data_from_api import ApiTwitterFetcher
class TestFetchDataFromApi(unittest.TestCase):
"""
test class for fetch_data_from_api.py
"""
def __init__(self, *args, **kwargs):
super(TestFetchDataFromApi, self).__init__(*args, **kwargs)
self.atf = ApiTwitterFetcher(quiet=True)
def setUp(self) -> None:
os.environ['TREND_ANALYZE_ENV'] = 'test'
def tearDown(self) -> None:
os.environ['TREND_ANALYZE_ENV'] = TREND_ANALYZE_ENV
def test_fetch_followed_list(self):
follower = self.atf._fetch_followed_list(TEST_USERNAME)
self.assertIsInstance(follower[0], User)
def test_fetch_following_list(self):
following = self.atf._fetch_followed_list(TEST_USERNAME)
self.assertIsInstance(following[0], User)
def test_fetch_user_relations(self):
user_relations = self.atf.fetch_user_relations(TEST_USERNAME)
self.assertIsInstance(user_relations[0], UserRelation)
def test_fetch_user_info(self):
user = self.atf.fetch_user_info(TEST_USER_ID)
self.assertEqual(user.user_id, TEST_USER_ID)
def test_fetch_user_tweet(self):
user_tweet = self.atf.fetch_user_tweet(TEST_USER_ID)
for i in user_tweet:
self.assertEqual(i[0].user.user_id, TEST_USER_ID)
break
def test_fetch_tweet_including_target(self):
tweet = self.atf.fetch_tweet_including_target("TEST", is_RT=True, is_name=True)
for i in tweet:
self.assertIn("test", i[0].text.lower())
break
def test_fetch_trend_availables(self):
trend_availables = self.atf.fetch_trends_available()
self.assertEqual(trend_availables[0]['name'], "Worldwide")
def test_fetch_current_trends(self):
trends = self.atf.fetch_current_trends(JAPAN_WOEID)
self.assertNotEqual(trends[0]['trends'], [])
if __name__ == '__main__':
unittest.main()
| 32.951613
| 87
| 0.698972
| 264
| 2,043
| 5.041667
| 0.257576
| 0.047333
| 0.072126
| 0.072126
| 0.225394
| 0.162284
| 0.132231
| 0.084147
| 0.084147
| 0
| 0
| 0.004268
| 0.197259
| 2,043
| 61
| 88
| 33.491803
| 0.807317
| 0.018111
| 0
| 0.047619
| 0
| 0
| 0.036683
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.261905
| false
| 0
| 0.095238
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8be7a3c80ff1ca3e87607f2cacd6a9420c5e831e
| 7,305
|
py
|
Python
|
logging_with_arcpy.py
|
Kortforsyningen/logging-with-arcpy
|
b227aea9c377ca6e98b2c398d294c08bce506f84
|
[
"MIT"
] | 1
|
2022-03-03T13:20:12.000Z
|
2022-03-03T13:20:12.000Z
|
logging_with_arcpy.py
|
Kortforsyningen/logging-with-arcpy
|
b227aea9c377ca6e98b2c398d294c08bce506f84
|
[
"MIT"
] | null | null | null |
logging_with_arcpy.py
|
Kortforsyningen/logging-with-arcpy
|
b227aea9c377ca6e98b2c398d294c08bce506f84
|
[
"MIT"
] | 1
|
2022-03-30T11:33:43.000Z
|
2022-03-30T11:33:43.000Z
|
# -*- coding: utf-8 -*-
"""
This module allows Python's logging module and Esri's ArcMap tools to play nicely together.
Everything here works with the root logger, there is currently no functionality to work with multiple loggers.
The standard logging.basicConfig() doesn't work out of the box in ArcMap tools, because the logging session lives
throughout the ArcMap session, and isn't restarted with every tool invocation. init_logging() can be used instead of
basicConfig(), and takes care of this issue by performing the necessary (re)initialisations.
Furthermore, flush_and_close_logger() should be called at the end of each script, to ensure that all output is flushed
when the script terminates. For the same reason mentioned above, some logging output may be delayed otherwise.
Finally, the ArcPyLogHandler class (mostly adopted from
http://gis.stackexchange.com/questions/135920/arcpy-logging-error-messages) allows the logging module to send output to
ArcMap's tool output window, using arcpy.AddMessage(), etc.
TODO:
- ArcPyLogHandler currently creates an empty file as given in input. If it isn't used, it shouldn't be created.
Created by: Hanne L. Petersen <[email protected]>
Created on: 2016-08-26
"""
import os
import socket
import logging
import logging.handlers
import arcpy
def init_logging(filename="log.txt", level=logging.INFO, fmt="", datefmt='%d-%m-%Y %H:%M', mode='a'):
"""
Initialise a useful logging session. For ArcMap tools, logging.basicConfig probably won't do what you want... (details below)
Use fmt="%(asctime)s %(message)s" to log without user and computer name.
If filename is a relative path, it will be relative to C:\Windows\System32 for tools called from an ArcMap toolbox.
So just use absolute paths...
Note that if you're using the logging module from inside ArcMap, e.g. from a tool in a toolbox, your logging session
will survive within the ArcMap session! In addition, the logging.basicConfig() function is intended to be run only
once ("only the first call will actually do anything: subsequent calls are effectively no-ops", from
https://docs.python.org/2/howto/logging.html#logging-advanced-tutorial)
I.e., you may have two tools that write to different log files - this won't work if you run both tools from the same
ArcMap session, and you do it the naive way.
Or if you run a tool several times inside the same ArcMap session, calling basicConfig WILL DO NOTHING. I.e.
debugging sucks big time.
In ArcMap you probably want to run flush_and_close_logger() at the end of your script, otherwise output can
sometimes be delayed.
Other format placeholders can be found in https://docs.python.org/2/library/logging.html#logrecord-attributes
TODO: The proper way for this module might be something with inheritance or subclassing...
"""
# Some useful snippets for copy-pasting when debugging:
# import logging
# root_logger = logging.getLogger()
# h = root_logger.handlers[0]
# root_logger.removeHandler(h)
# print([h.baseFilename for h in root_logger.handlers])
if fmt == '':
# Default format prepend user name and computer name.
# http://stackoverflow.com/questions/799767/getting-name-of-windows-computer-running-python-script?answertab=active#tab-top
fmt = "%(asctime)s {} {} %(message)s".format(os.getenv('USERNAME'), socket.gethostname().upper())
root_logger = logging.getLogger()
# Need to run regular basicConfig first - seems like it does something we need...
# Whatever logging level is set to a restrictive level here, it will persist throughout (looks like a bug).
# If it's set to a low level here (or NOTSET), it seems to work fine, respecting what's set later.
# The filename is replaced properly later.
logging.basicConfig(level=logging.NOTSET)
# Start by removing all existing handlers from the root logger
# Remove from the back, to avoid the indexes going haywire
for i in range(len(root_logger.handlers)-1, -1, -1):
root_logger.removeHandler(root_logger.handlers[i])
# Then set up the new handler with appropriate formatter
# https://docs.python.org/2/library/logging.handlers.html#logging.FileHandler
add_handler(logging.FileHandler(filename, mode=mode, encoding=None, delay=False), level=level)
def add_handler(h, level=logging.INFO, fmt="", datefmt='%d-%m-%Y %H:%M'):
"""Add a handler."""
root_logger = logging.getLogger()
if fmt == '':
fmt = "%(asctime)s {} {} %(message)s".format(os.getenv('USERNAME'), socket.gethostname().upper())
# Prep the Formatter, and add it
# https://docs.python.org/2/library/logging.html#logging.Formatter
f = logging.Formatter(fmt, datefmt)
# Add the level and formatter to the handler
# https://docs.python.org/2/library/logging.handlers.html#logging.FileHandler
# https://docs.python.org/2/library/logging.html#handler-objects
h.setLevel(level)
h.setFormatter(f)
root_logger.addHandler(h)
def flush_and_close_logger():
"""From ArcMap there seem to be some problems with flushing, and this seems to help..."""
for h in logging.getLogger().handlers:
h.flush()
logging.shutdown()
def _logging_is_active():
"""Check if a logging session has been initiated (e.g. with logging.basicConfig())."""
# http://stackoverflow.com/questions/26017073/how-to-get-filename-from-a-python-logger
return len(logging.getLogger().handlers) > 0
class ArcPyLogHandler(logging.handlers.RotatingFileHandler):
"""
Custom logging class that passes messages to the arcpy tool window.
From http://gis.stackexchange.com/questions/135920/arcpy-logging-error-messages
"""
# TODO: This class is still initting a RotatingFileHandler for the init filename and creating a file
# - this file should be removed (or the init re-implemented)
def emit(self, record):
"""Write the log message."""
# It shouldn't be necessary to reimport, but it seems to be, otherwise it can crash, when several tools are
# run inside the same ArcMap session...
# Perhaps the imports from the first run get cleared, but because the logging session somehow survives, they
# don't get imported again?
import logging
import arcpy
try:
my_msg = self.format(record) # fixed this - the code at stackexchange didn't work for me here
# msg = record.msg.format(record.args) # old code
except:
my_msg = record.msg
if record.levelno >= logging.ERROR:
arcpy.AddError(my_msg)
elif record.levelno >= logging.WARNING:
arcpy.AddWarning(my_msg)
else: # everything else goes here (if you don't want debug, remove it from the handler, if you do want it,
# there's nowhere else to send it to
arcpy.AddMessage(my_msg)
# The following line would send the message to the regular RotatingFileHandler, but we don't want that here:
# super(ArcPyLogHandler, self).emit(record)
# end class ArcPyLogHandler
| 46.826923
| 132
| 0.699384
| 1,051
| 7,305
| 4.831589
| 0.34824
| 0.023631
| 0.017724
| 0.021268
| 0.127806
| 0.110083
| 0.110083
| 0.110083
| 0.088224
| 0.088224
| 0
| 0.008325
| 0.210678
| 7,305
| 155
| 133
| 47.129032
| 0.872355
| 0.70089
| 0
| 0.243902
| 0
| 0
| 0.058918
| 0
| 0
| 0
| 0
| 0.019355
| 0
| 1
| 0.121951
| false
| 0
| 0.170732
| 0
| 0.341463
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8bea72f709ee92f0256045645314ed84137d7a0e
| 6,924
|
py
|
Python
|
tests/test_share_resource/test_master_data.py
|
anhlt59/django_refactor
|
1b1d86af9f732a000e30feb7644f6ca60b6e516a
|
[
"MIT"
] | null | null | null |
tests/test_share_resource/test_master_data.py
|
anhlt59/django_refactor
|
1b1d86af9f732a000e30feb7644f6ca60b6e516a
|
[
"MIT"
] | null | null | null |
tests/test_share_resource/test_master_data.py
|
anhlt59/django_refactor
|
1b1d86af9f732a000e30feb7644f6ca60b6e516a
|
[
"MIT"
] | null | null | null |
import pytest
from app.share_resources.master_data.models import *
# test company_type_category
@pytest.mark.django_db
class TestCompanyTypeCategory:
def test_create(self, create_company_type_category):
assert CompanyTypeCategory.objects.count() == 1
def test_update(self, create_company_type_category):
category = create_company_type_category
category.name = "category name update"
category.save()
category_test = CompanyTypeCategory.objects.get(id=category.id)
assert category_test.name == "category name update"
def test_delete(self, create_company_type_category):
assert CompanyTypeCategory.objects.count() == 1
CompanyTypeCategory.objects.get(id=create_company_type_category.id).delete()
assert CompanyTypeCategory.objects.count() == 0
# test company_type_master
@pytest.mark.django_db
class TestCompanyTypeMaster:
def test_create(self, create_company_type_master):
assert CompanyTypeMaster.objects.count() == 1
def test_update(self, create_company_type_master):
company_type = create_company_type_master
company_type.name = "company type update"
company_type.save()
company_type_test = CompanyTypeMaster.objects.get(id=company_type.id)
assert company_type_test.name == "company type update"
def test_delete(self, create_company_type_master):
assert CompanyTypeMaster.objects.count() == 1
create_company_type_master.delete()
assert CompanyTypeMaster.objects.count() == 0
# test work_place_category
@pytest.mark.django_db
class TestWorkPlaceCategory:
def test_create(self, create_work_place_category):
assert WorkPlaceCategory.objects.count() == 1
def test_update(self, create_work_place_category):
work_place_category = create_work_place_category
work_place_category.name = "work place category name update"
work_place_category.save()
work_place_category_test = WorkPlaceCategory.objects.get(id=create_work_place_category.id)
assert work_place_category_test.name == "work place category name update"
def test_delete(self, create_work_place_category):
assert WorkPlaceCategory.objects.count() == 1
WorkPlaceCategory.objects.get(id=create_work_place_category.id).delete()
assert WorkPlaceCategory.objects.count() == 0
# test work_place_master
@pytest.mark.django_db
class TestWorkPlaceMaster:
def test_create(self, create_work_place_master):
assert WorkPlaceMaster.objects.count() == 1
def test_update(self, create_work_place_master):
work_place = create_work_place_master
work_place.name = "work place name"
work_place.save()
work_place_test = WorkPlaceMaster.objects.get(id=work_place.id)
assert work_place_test.name == "work place name"
def test_delete(self, create_work_place_master):
assert WorkPlaceMaster.objects.count() == 1
create_work_place_master.delete()
assert WorkPlaceMaster.objects.count() == 0
# test job_type_category
@pytest.mark.django_db
class TestJobTypeCategory:
def test_create(self, create_job_type_category):
assert JobTypeCategory.objects.count() == 1
def test_update(self, create_job_type_category):
job_type = create_job_type_category
job_type.name = "job name update"
job_type.save()
job_type_test = JobTypeCategory.objects.get(id=job_type.id)
assert job_type_test.name == "job name update"
def test_delete(self, create_job_type_category):
assert JobTypeCategory.objects.count() == 1
create_job_type_category.delete()
assert JobTypeCategory.objects.count() == 0
# test job_type_master
@pytest.mark.django_db
class TestJobTypeMaster:
def test_create(self, create_job_type_master):
assert JobTypeMaster.objects.count() == 1
def test_update(self, create_job_type_master):
job_type = create_job_type_master
job_type.name = "job type update"
job_type.save()
job_type_test = JobTypeMaster.objects.get(id=job_type.id)
assert job_type_test.name == "job type update"
def test_delete(self, create_job_type_master):
assert JobTypeMaster.objects.count() == 1
create_job_type_master.delete()
assert JobTypeMaster.objects.count() == 0
# test school_info_master
@pytest.mark.django_db
class TestSchoolInfoMaster:
def test_create(self, create_school_info_master):
assert SchoolInfoMaster.objects.count() == 1
def test_update(self, create_school_info_master):
school_info = create_school_info_master
school_info.name = "name update"
school_info.save()
school_info_test = SchoolInfoMaster.objects.get(id=school_info.id)
assert school_info_test.name == "name update"
def test_delete(self, create_school_info_master):
assert SchoolInfoMaster.objects.count() == 1
create_school_info_master.delete()
assert SchoolInfoMaster.objects.count() == 0
# test media
@pytest.mark.django_db
class TestMedia:
def test_create(self, create_media):
assert Media.objects.count() == 1
def test_update(self, create_media):
media = create_media
media.name = "name update"
media.save()
media_update = Media.objects.get(id=media.id)
assert media_update.name == "name update"
def test_delete(self, create_media):
assert Media.objects.count() == 1
create_media.delete()
assert Media.objects.count() == 0
# test Plan
@pytest.mark.django_db
class TestPlan:
def test_create(self, create_plan):
assert Plan.objects.count() == 1
def test_update(self, create_plan):
plan = create_plan
plan.name = "name update"
plan.save()
plan_test = Plan.objects.get(id=plan.id)
assert plan_test.name == "name update"
def test_delete(self, create_plan):
assert Plan.objects.count() == 1
create_plan.delete()
assert Plan.objects.count() == 0
# test type_auto_reply_message
@pytest.mark.django_db
class TestTypeAutoReplyMessage:
def test_create(self, create_type_auto_reply_message):
assert TypeAutoReplyMessage.objects.count() == 1
def test_update(self, create_type_auto_reply_message):
type_auto_reply_message = create_type_auto_reply_message
type_auto_reply_message.name = "type update"
type_auto_reply_message.save()
type_auto_reply_message_test = TypeAutoReplyMessage.objects.get(id=type_auto_reply_message.id)
assert type_auto_reply_message_test.name == "type update"
def test_delete(self, create_type_auto_reply_message):
assert TypeAutoReplyMessage.objects.count() == 1
create_type_auto_reply_message.delete()
assert TypeAutoReplyMessage.objects.count() == 0
| 33.941176
| 102
| 0.718515
| 869
| 6,924
| 5.408516
| 0.067894
| 0.044681
| 0.055319
| 0.051064
| 0.651064
| 0.593617
| 0.49
| 0.408511
| 0.328936
| 0.271064
| 0
| 0.005385
| 0.195407
| 6,924
| 203
| 103
| 34.108374
| 0.83827
| 0.03134
| 0
| 0.225352
| 0
| 0
| 0.047491
| 0
| 0
| 0
| 0
| 0
| 0.28169
| 1
| 0.211268
| false
| 0
| 0.014085
| 0
| 0.295775
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4739439933f695399a93b2006266d661c9ac6918
| 1,521
|
py
|
Python
|
sprint/DS-Unit-3-Sprint-3-Productization-and-Cloud/sprint/aq_dashboard.py
|
ndow33/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
17ebfc34991649580fce24dae5c0a7933f5b3095
|
[
"MIT"
] | null | null | null |
sprint/DS-Unit-3-Sprint-3-Productization-and-Cloud/sprint/aq_dashboard.py
|
ndow33/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
17ebfc34991649580fce24dae5c0a7933f5b3095
|
[
"MIT"
] | null | null | null |
sprint/DS-Unit-3-Sprint-3-Productization-and-Cloud/sprint/aq_dashboard.py
|
ndow33/DS-Unit-3-Sprint-3-Productization-and-Cloud
|
17ebfc34991649580fce24dae5c0a7933f5b3095
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import openaq
import requests
# connects to the api
api = openaq.OpenAQ()
# initialize the app
APP = Flask(__name__)
# configure the database
APP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
DB = SQLAlchemy(APP)
# define a function that will return a list of date time and value tuples
def tuple_list():
status, body = api.measurements(city='Los Angeles', parameter='pm25')
body_result = body["results"]
test_list = []
counter = 0
for x in body_result:
utc_date = body_result[counter]['date']['utc']
value = body_result[counter]['value']
combo = [utc_date, value]
test_list.append(combo)
counter = counter + 1
return test_list
# Base route
@APP.route('/')
def root():
# uses the function defined above to return a list of tuples as a string
test = tuple_list()
return str(test)
class Record(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
datetime = DB.Column(DB.String(25))
value = DB.Column(DB.Float, nullable=False)
def __repr__(self):
return 'TODO - write a nice representation of Records'
@APP.route('/refresh')
def refresh():
"""Pull fresh data from Open AQ and replace existing data."""
DB.drop_all()
DB.create_all()
# TODO Get data from OpenAQ, make Record objects with it, and add to db
db_list = tuple_list()
DB.session.commit()
return 'Data refreshed!'
| 27.160714
| 76
| 0.663379
| 212
| 1,521
| 4.632075
| 0.495283
| 0.040733
| 0.03055
| 0.026477
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005973
| 0.229454
| 1,521
| 56
| 77
| 27.160714
| 0.831911
| 0.224852
| 0
| 0
| 0
| 0
| 0.131177
| 0.020665
| 0
| 0
| 0
| 0.017857
| 0
| 1
| 0.108108
| false
| 0
| 0.108108
| 0.027027
| 0.432432
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
474730af1ea1af916a12ede7e8f6ca83e5e468a8
| 952
|
py
|
Python
|
pastila/schema.py
|
harlov/pastila
|
27ebe862bc25d5cc1a14766e5eec8f48853098c3
|
[
"MIT"
] | null | null | null |
pastila/schema.py
|
harlov/pastila
|
27ebe862bc25d5cc1a14766e5eec8f48853098c3
|
[
"MIT"
] | null | null | null |
pastila/schema.py
|
harlov/pastila
|
27ebe862bc25d5cc1a14766e5eec8f48853098c3
|
[
"MIT"
] | null | null | null |
from pastila.fields import Field
class Schema(object):
data = None
def __init__(self):
self.data = {}
def load(self, data):
for field, value in data.items():
self.load_to_field(field, value)
self.validate()
def validate(self):
for name, field in self.fields.items():
field.validate(self.data[name], self)
def dump(self):
data = {}
for name, field in self.fields.items():
data[name] = field.dump()
return data
def __getattr__(self, item):
return self.data[item]
@property
def fields(self):
return {
name: field for name, field in
filter(lambda x: isinstance(x[1], Field), self.__class__.__dict__.items())
}
def load_to_field(self, field, value):
if field not in self.fields:
return None
self.data[field] = self.fields[field].load(value)
| 22.666667
| 86
| 0.570378
| 119
| 952
| 4.394958
| 0.285714
| 0.091778
| 0.068834
| 0.080306
| 0.110899
| 0.110899
| 0.110899
| 0
| 0
| 0
| 0
| 0.001536
| 0.316176
| 952
| 41
| 87
| 23.219512
| 0.801843
| 0
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.241379
| false
| 0
| 0.034483
| 0.068966
| 0.482759
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4749535b6dc42716b629b0a869d4e93214bb93a8
| 3,056
|
py
|
Python
|
assignments/assignment2/model.py
|
RuslanOm/dlcourse_ai
|
f4c85497dc4affb942cacb363f17ce63b39c1bd7
|
[
"MIT"
] | null | null | null |
assignments/assignment2/model.py
|
RuslanOm/dlcourse_ai
|
f4c85497dc4affb942cacb363f17ce63b39c1bd7
|
[
"MIT"
] | null | null | null |
assignments/assignment2/model.py
|
RuslanOm/dlcourse_ai
|
f4c85497dc4affb942cacb363f17ce63b39c1bd7
|
[
"MIT"
] | null | null | null |
import numpy as np
from layers import FullyConnectedLayer, ReLULayer, softmax_with_cross_entropy, l2_regularization
class TwoLayerNet:
""" Neural network with two fully connected layers """
def __init__(self, n_input, n_output, hidden_layer_size, reg):
"""
Initializes the neural network
Arguments:
n_input, int - dimension of the model input
n_output, int - number of classes to predict
hidden_layer_size, int - number of neurons in the hidden layer
reg, float - L2 regularization strength
"""
self.reg = reg
self.fc1 = FullyConnectedLayer(
n_input=n_input, n_output=hidden_layer_size)
self.fc2 = FullyConnectedLayer(
n_input=hidden_layer_size, n_output=n_output)
self.relu = ReLULayer()
def compute_loss_and_gradients(self, X, y):
"""
Computes total loss and updates parameter gradients
on a batch of training examples
Arguments:
X, np array (batch_size, input_features) - input data
y, np array of int (batch_size) - classes
"""
# Before running forward and backward pass through the model,
# clear parameter gradients aggregated from the previous pass
# TODO Set parameter gradient to zeros
# Hint: using self.params() might be useful!
for _, par in self.fc1.params().items():
par.grad *= 0
for _, par in self.fc2.params().items():
par.grad *= 0
# TODO Compute loss and fill param gradients
# by running forward and backward passes through the model
x1 = self.relu.forward(self.fc1.forward(X))
out = self.fc2.forward(x1)
loss_cre, dprediction = softmax_with_cross_entropy(out, y)
loss_l2_w1, dW1_l2 = l2_regularization(self.fc1.W.value, self.reg)
loss_l2_w2, dW2_l2 = l2_regularization(self.fc2.W.value, self.reg)
loss = loss_cre + loss_l2_w1 + loss_l2_w2
d_relu = self.fc2.backward(dprediction)
d_inp = self.relu.backward(d_relu)
_ = self.fc1.backward(d_inp)
self.fc1.W.grad += dW1_l2
self.fc2.W.grad += dW2_l2
# After that, implement l2 regularization on all params
# Hint: self.params() is useful again!
return loss
def predict(self, X):
"""
Produces classifier predictions on the set
Arguments:
X, np array (test_samples, num_features)
Returns:
y_pred, np.array of int (test_samples)
"""
# TODO: Implement predict
# Hint: some of the code of the compute_loss_and_gradients
# can be reused
# pred = np.zeros(X.shape[0], np.int)
x1 = self.relu.forward(self.fc1.forward(X))
out = self.fc2.forward(x1)
return np.argmax(out, axis=1)
def params(self):
result = {
'W1': self.fc1.W,
'W2': self.fc2.W,
'B1': self.fc1.B,
'B2': self.fc2.B,
}
return result
| 32.510638
| 96
| 0.612238
| 403
| 3,056
| 4.48139
| 0.332506
| 0.034884
| 0.033223
| 0.025471
| 0.127353
| 0.087486
| 0.087486
| 0.056478
| 0.056478
| 0.056478
| 0
| 0.023865
| 0.30072
| 3,056
| 93
| 97
| 32.860215
| 0.821245
| 0.375
| 0
| 0.153846
| 0
| 0
| 0.004654
| 0
| 0
| 0
| 0
| 0.021505
| 0
| 1
| 0.102564
| false
| 0
| 0.051282
| 0
| 0.25641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
474e6ccd3a09db8bd61fb6310dfa022bed136ad4
| 892
|
py
|
Python
|
demo/singleperson.py
|
Neerajj9/Computer-Vision-based-Offside-Detection-in-soccer
|
744bfc636463f24c4f78f25684864c2ce4abb43f
|
[
"MIT"
] | 8
|
2020-10-17T14:54:53.000Z
|
2022-02-09T11:03:01.000Z
|
demo/singleperson.py
|
Neerajj9/Computer-Vision-based-Offside-Detection-in-soccer
|
744bfc636463f24c4f78f25684864c2ce4abb43f
|
[
"MIT"
] | 4
|
2021-01-03T16:02:29.000Z
|
2021-11-23T03:26:01.000Z
|
demo/singleperson.py
|
Neerajj9/Computer-Vision-based-Offside-Detection-in-soccer
|
744bfc636463f24c4f78f25684864c2ce4abb43f
|
[
"MIT"
] | 2
|
2021-04-10T07:05:55.000Z
|
2021-09-19T23:22:18.000Z
|
import os
import sys
sys.path.append(os.path.dirname(__file__) + "/../")
from scipy.misc import imread
from util.config import load_config
from nnet import predict
from util import visualize
from dataset.pose_dataset import data_to_input
cfg = load_config("demo/pose_cfg.yaml")
# Load and setup CNN part detector
sess, inputs, outputs = predict.setup_pose_prediction(cfg)
# Read image from file
file_name = "demo/image.png"
image = imread(file_name, mode='RGB')
image_batch = data_to_input(image)
# Compute prediction with the CNN
outputs_np = sess.run(outputs, feed_dict={inputs: image_batch})
scmap, locref, _ = predict.extract_cnn_output(outputs_np, cfg)
# Extract maximum scoring location from the heatmap, assume 1 person
pose = predict.argmax_pose_predict(scmap, locref, cfg.stride)
# Visualise
visualize.show_heatmaps(cfg, image, scmap, pose)
visualize.waitforbuttonpress()
| 25.485714
| 68
| 0.784753
| 133
| 892
| 5.06015
| 0.488722
| 0.023774
| 0.032689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001276
| 0.121076
| 892
| 34
| 69
| 26.235294
| 0.857143
| 0.181614
| 0
| 0
| 0
| 0
| 0.053867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.388889
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
475e2827e051fef8ddf4351c0f8e2268a7395759
| 599
|
py
|
Python
|
setup.py
|
jcanode/small_nn
|
0e7fa58a52b45b2221b66bd0a67bd7395934133c
|
[
"MIT"
] | 1
|
2020-07-06T20:43:23.000Z
|
2020-07-06T20:43:23.000Z
|
setup.py
|
jcanode/small_nn
|
0e7fa58a52b45b2221b66bd0a67bd7395934133c
|
[
"MIT"
] | null | null | null |
setup.py
|
jcanode/small_nn
|
0e7fa58a52b45b2221b66bd0a67bd7395934133c
|
[
"MIT"
] | 1
|
2020-07-04T18:11:43.000Z
|
2020-07-04T18:11:43.000Z
|
import setuptools
from setuptools import setup, find_namespace_packages
setuptools.setup(
name="small_nn-jcanode",
version="0.0.1",
author="Justin Canode",
author_email="[email protected]",
description="A small Neural Network Framework",
long_description_content_type="text/markdown",
url="https://github.com/jcanode/small_nn",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 28.52381
| 53
| 0.677796
| 69
| 599
| 5.73913
| 0.724638
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012346
| 0.188648
| 599
| 20
| 54
| 29.95
| 0.802469
| 0
| 0
| 0
| 0
| 0
| 0.407346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
47604d58582e9e00c559605b1ca161c8c2ddf27a
| 1,212
|
py
|
Python
|
RVFS/account/urls.py
|
cahudson94/Raven-Valley-Forge-Shop
|
52f46381eafa9410d8e9c759366ef7490dcb1de9
|
[
"MIT"
] | 2
|
2018-02-12T01:32:16.000Z
|
2021-08-23T19:29:08.000Z
|
RVFS/account/urls.py
|
cahudson94/Raven-Valley-Forge-Shop
|
52f46381eafa9410d8e9c759366ef7490dcb1de9
|
[
"MIT"
] | 1
|
2018-05-23T03:42:20.000Z
|
2018-05-23T03:42:20.000Z
|
RVFS/account/urls.py
|
cahudson94/Raven-Valley-Forge-Shop
|
52f46381eafa9410d8e9c759366ef7490dcb1de9
|
[
"MIT"
] | null | null | null |
"""."""
from django.urls import path, reverse_lazy
from account.views import (AccountView,
InfoFormView,
EditAccountView,
AddAddressView,
AddressListView,
DeleteAddress)
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', AccountView.as_view(), name='account'),
path('add-address/', AddAddressView.as_view(), name='add_add'),
path('address-list/', AddressListView.as_view(), name='add_list'),
path('delete-address/<int:pk>/', DeleteAddress.as_view(), name='del_add'),
path('edit/<int:pk>/', EditAccountView.as_view(), name='edit_acc'),
path('info-form/<int:pk>/', InfoFormView.as_view(), name='info_reg'),
path('change_password/', auth_views.PasswordChangeView.as_view(
template_name='password_reset/change_password.html',
success_url=reverse_lazy('change_password_done')),
name='change_password'),
path('change_password_done/', auth_views.PasswordChangeDoneView.as_view(
template_name='password_reset/change_password_done.html',
),
name='change_password_done')
]
| 43.285714
| 78
| 0.634488
| 129
| 1,212
| 5.697674
| 0.333333
| 0.065306
| 0.081633
| 0.035374
| 0.122449
| 0.122449
| 0.122449
| 0.122449
| 0
| 0
| 0
| 0
| 0.227723
| 1,212
| 27
| 79
| 44.888889
| 0.785256
| 0.000825
| 0
| 0
| 0
| 0
| 0.243983
| 0.099585
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.291667
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
47615b2e9cbae2e821ca67ff9a73e485fdec4592
| 209
|
wsgi
|
Python
|
index.wsgi
|
webgovernor/dungeonsheet
|
59800249f0394af0fc08d7ca23e68faf9d0d2920
|
[
"MIT"
] | null | null | null |
index.wsgi
|
webgovernor/dungeonsheet
|
59800249f0394af0fc08d7ca23e68faf9d0d2920
|
[
"MIT"
] | null | null | null |
index.wsgi
|
webgovernor/dungeonsheet
|
59800249f0394af0fc08d7ca23e68faf9d0d2920
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, '/home/nullism/web/dnd.nullism.com/')
from main import app
conf = {}
conf['SECRET_KEY'] = 'CHANGEME'
app.config.update(conf)
application = app
| 14.928571
| 56
| 0.712919
| 33
| 209
| 4.484848
| 0.757576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005464
| 0.124402
| 209
| 13
| 57
| 16.076923
| 0.803279
| 0.095694
| 0
| 0
| 0
| 0
| 0.27957
| 0.182796
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.375
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
476347b456fac42ec555a12cc44e71eae3a0e9fc
| 236
|
py
|
Python
|
src/file_format/utils/json_util.py
|
tys-hiroshi/test-github-actions-python
|
c47172949fbdd6ddffe889ca0f91eb11a741021d
|
[
"MIT"
] | null | null | null |
src/file_format/utils/json_util.py
|
tys-hiroshi/test-github-actions-python
|
c47172949fbdd6ddffe889ca0f91eb11a741021d
|
[
"MIT"
] | 2
|
2021-05-21T09:36:42.000Z
|
2021-05-28T03:55:44.000Z
|
src/file_format/utils/json_util.py
|
tys-hiroshi/test-github-actions-python
|
c47172949fbdd6ddffe889ca0f91eb11a741021d
|
[
"MIT"
] | 1
|
2020-08-06T06:21:34.000Z
|
2020-08-06T06:21:34.000Z
|
# -*- coding:utf-8 -*-
import json
class JsonUtil(object):
def __init__(self, jsonFilePath):
with open(jsonFilePath, 'r', encoding='utf-8') as f:
self.content = json.load(f)
if __name__ == '__main__':
pass
| 21.454545
| 60
| 0.610169
| 30
| 236
| 4.4
| 0.8
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01105
| 0.233051
| 236
| 11
| 61
| 21.454545
| 0.718232
| 0.084746
| 0
| 0
| 0
| 0
| 0.065116
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.142857
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
4768b72ec107983342fe43250644f8d66a20e6f5
| 483
|
py
|
Python
|
example/retreive_soil_data.py
|
Smeaol22/ssurgo_provider
|
63bc2251ef031da013af4cf0b252cc48cad4965e
|
[
"BSD-2-Clause"
] | null | null | null |
example/retreive_soil_data.py
|
Smeaol22/ssurgo_provider
|
63bc2251ef031da013af4cf0b252cc48cad4965e
|
[
"BSD-2-Clause"
] | null | null | null |
example/retreive_soil_data.py
|
Smeaol22/ssurgo_provider
|
63bc2251ef031da013af4cf0b252cc48cad4965e
|
[
"BSD-2-Clause"
] | null | null | null |
from pathlib import Path
from src.main import retrieve_soil_composition
# This example is base on geodatabase obtain from ssurgo on Ohio area
ssurgo_folder_path = Path().absolute().parent / 'resources' / 'SSURGO' / 'soils_GSSURGO_oh_3905571_01' \
/ 'soils' / 'gssurgo_g_oh' / 'gSSURGO_OH.gdb'
coordinates = [(40.574234, -83.292448), (40.519224, -82.799437), (40.521048, -82.790174)]
soil_data_list = retrieve_soil_composition(coordinates, ssurgo_folder_path)
| 43.909091
| 104
| 0.732919
| 66
| 483
| 5.106061
| 0.636364
| 0.071217
| 0.136499
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139024
| 0.151139
| 483
| 10
| 105
| 48.3
| 0.682927
| 0.138716
| 0
| 0
| 0
| 0
| 0.176329
| 0.065217
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
4786a66a40aa4e0d9cb192b3447ac559f77b749b
| 481
|
py
|
Python
|
models/model.py
|
BaoLocPham/hum2song
|
706b7fdf838944e2aabe0ae331c0867cb67f6fbc
|
[
"MIT"
] | null | null | null |
models/model.py
|
BaoLocPham/hum2song
|
706b7fdf838944e2aabe0ae331c0867cb67f6fbc
|
[
"MIT"
] | null | null | null |
models/model.py
|
BaoLocPham/hum2song
|
706b7fdf838944e2aabe0ae331c0867cb67f6fbc
|
[
"MIT"
] | null | null | null |
from models.wrap_mobilenet import *
from models.wrap_resnet import *
from models.wrap_vgg import *
from models.wrap_alexnet import *
def get_model(config="resnet"):
if "resnet" in config.backbone:
model = get_resnet(config=config)
elif "mobilenet" in config.backbone:
model = get_mobilenet(config)
elif "vgg" in config.backbone:
model = get_vgg(config)
elif "alexnet" in config.backbone:
model = get_alexnet(config)
return model
| 32.066667
| 41
| 0.702703
| 64
| 481
| 5.140625
| 0.265625
| 0.121581
| 0.170213
| 0.255319
| 0.291793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2079
| 481
| 15
| 42
| 32.066667
| 0.863517
| 0
| 0
| 0
| 0
| 0
| 0.064315
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
478b0e311b234d21a0af4f46a1bbc9e444318807
| 527
|
py
|
Python
|
Lab02_ifelse_and_loops/exercise-17.py
|
rodrigoc-silva/Python-course
|
327b20738a4b383510faddc0ec26a54be1bbd717
|
[
"MIT"
] | null | null | null |
Lab02_ifelse_and_loops/exercise-17.py
|
rodrigoc-silva/Python-course
|
327b20738a4b383510faddc0ec26a54be1bbd717
|
[
"MIT"
] | null | null | null |
Lab02_ifelse_and_loops/exercise-17.py
|
rodrigoc-silva/Python-course
|
327b20738a4b383510faddc0ec26a54be1bbd717
|
[
"MIT"
] | null | null | null |
#This program converts KPH to MPH.
#constant
CONVERT_FACTOR = 0.6214
#head output
print("KPH \t MPH")
print("_" * 20)
#loop
for kph_speed in range (60, 131, 10):
#calculation
mph_speed = kph_speed * CONVERT_FACTOR
#output
print(kph_speed, '\t', format(mph_speed, '.1f'))
input("\nPress any key to quit")
# Case 1
# KPH MPH
# ____________________
# 60 37.3
# 70 43.5
# 80 49.7
# 90 55.9
# 100 62.1
# 110 68.4
# 120 74.6
# 130 80.8
# Press any key to quit
| 15.5
| 52
| 0.588235
| 83
| 527
| 3.39759
| 0.674699
| 0.085106
| 0.099291
| 0.085106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160428
| 0.290323
| 527
| 34
| 53
| 15.5
| 0.593583
| 0.470588
| 0
| 0
| 0
| 0
| 0.149425
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
4790692759f37d6994f35811b0b4dd07154a5efb
| 1,664
|
py
|
Python
|
1018.py
|
idarlenearaujo/URI_Python
|
c4517f90f5310894347edcf58a28d3e569a89a2b
|
[
"MIT"
] | null | null | null |
1018.py
|
idarlenearaujo/URI_Python
|
c4517f90f5310894347edcf58a28d3e569a89a2b
|
[
"MIT"
] | null | null | null |
1018.py
|
idarlenearaujo/URI_Python
|
c4517f90f5310894347edcf58a28d3e569a89a2b
|
[
"MIT"
] | null | null | null |
# entrada
value = int(input())
# variaveis
cashier = True
valueI = value
n1 = 0
n2 = 0
n5 = 0
n10 = 0
n20 = 0
n50 = 0
n100 = 0
# laço quando cashier == False sai
while cashier == True:
# condicionais
if value >= 100:
valueA = value // 100
n100 = valueA
valueB = valueA * 100
value = value - valueB
# condicionais
elif value >= 50:
valueA = value // 50
n50 = valueA
valueB = valueA * 50
value = value - valueB
# condicionais
elif value >= 20:
valueA = value // 20
n20 = valueA
valueB = valueA * 20
value = value - valueB
# condicionais
elif value >= 10:
valueA = value // 10
n10 = valueA
valueB = valueA * 10
value = value - valueB
# condicionais
elif value >= 5:
valueA = value // 5
n5 = valueA
valueB = valueA * 5
value = value - valueB
# condicionais
elif value >= 2:
valueA = value // 2
n2 = valueA
valueB = valueA * 2
value = value - valueB
# condicionais
elif value >= 1:
valueA = value // 1
n1 = valueA
valueB = valueA * 1
value = value - valueB
# condicionais
elif value == 0:
# condição para sair
cashier = False
print(
'{}\n{} nota(s) de R$ 100,00\n{} nota(s) de R$ 50,00\n{} nota(s) de R$ 20,00\n{} nota(s) de R$ 10,00\n{} nota(s) de R$ 5,00\n{} nota(s) de R$ 2,00\n{} nota(s) de R$ 1,00'.format(
valueI, n100, n50, n20, n10, n5, n2, n1))
| 20.292683
| 183
| 0.485577
| 204
| 1,664
| 3.960784
| 0.215686
| 0.095297
| 0.155941
| 0.242574
| 0.413366
| 0.402228
| 0
| 0
| 0
| 0
| 0
| 0.107614
| 0.408053
| 1,664
| 82
| 184
| 20.292683
| 0.71269
| 0.103966
| 0
| 0.137255
| 0
| 0.019608
| 0.120172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4793cb0e3e72768ba54484744717ebd0208905a2
| 795
|
py
|
Python
|
backend/appengine/routes/desenhos/edit.py
|
faahbih/projetoolivarts
|
3dfd955fe44d58a38b85b6643440a600b0bde81a
|
[
"MIT"
] | null | null | null |
backend/appengine/routes/desenhos/edit.py
|
faahbih/projetoolivarts
|
3dfd955fe44d58a38b85b6643440a600b0bde81a
|
[
"MIT"
] | null | null | null |
backend/appengine/routes/desenhos/edit.py
|
faahbih/projetoolivarts
|
3dfd955fe44d58a38b85b6643440a600b0bde81a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from desenho.desenho_model import Desenho, DesenhoForm
from gaecookie.decorator import no_csrf
#from pedido.pedido_model import Pedido, PedidoForm
from routes import desenhos
from tekton.gae.middleware.redirect import RedirectResponse
from tekton.router import to_path
@no_csrf
def index(desenho_id):
desenhos = Desenho.get_by_id(int(desenho_id))
ctx={'desenhos': desenhos,
'salvar_path':to_path(salvar)}
return TemplateResponse(ctx,'/desenhos/desenhos_form.html')
def salvar(desenho_id,**kwargs):
desenhos = Desenho.get_by_id(desenho_id)
desenho = DesenhoForm(**kwargs)
desenho.put()
return RedirectResponse(desenhos)
| 34.565217
| 63
| 0.783648
| 102
| 795
| 5.882353
| 0.431373
| 0.06
| 0.06
| 0.066667
| 0.073333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001441
| 0.127044
| 795
| 22
| 64
| 36.136364
| 0.863112
| 0.089308
| 0
| 0
| 0
| 0
| 0.065187
| 0.038835
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.388889
| 0
| 0.611111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
479ba82b2ef03b26d431347ee226aa3e2162ff17
| 221
|
py
|
Python
|
PYTHON/Exemplos/Aula 1/Ex004.py
|
B1linha/ADS---Mackenzie
|
0dc53418ff7580836f6a64c5370e204e8841d1a9
|
[
"MIT"
] | null | null | null |
PYTHON/Exemplos/Aula 1/Ex004.py
|
B1linha/ADS---Mackenzie
|
0dc53418ff7580836f6a64c5370e204e8841d1a9
|
[
"MIT"
] | null | null | null |
PYTHON/Exemplos/Aula 1/Ex004.py
|
B1linha/ADS---Mackenzie
|
0dc53418ff7580836f6a64c5370e204e8841d1a9
|
[
"MIT"
] | null | null | null |
""" Faça um programa que receba o salário de um funcionário, calcule e mostre o novo salário, sabende-se que este sofreu um aumento de 25%"""
sal = float(input('Salário:'))
nsal = sal*1.25
print ('novo salário = ', nsal)
| 55.25
| 142
| 0.710407
| 37
| 221
| 4.243243
| 0.675676
| 0.140127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027174
| 0.167421
| 221
| 4
| 143
| 55.25
| 0.826087
| 0.606335
| 0
| 0
| 0
| 0
| 0.283951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
479c652f9d6be7f731af9a0eaf026a8198211ed7
| 1,947
|
py
|
Python
|
src/test/test_cmdparse_qmp.py
|
dougpuob/qemu-tasker
|
58a24090016abebcda8e95c382bceaef453ea981
|
[
"MIT"
] | null | null | null |
src/test/test_cmdparse_qmp.py
|
dougpuob/qemu-tasker
|
58a24090016abebcda8e95c382bceaef453ea981
|
[
"MIT"
] | null | null | null |
src/test/test_cmdparse_qmp.py
|
dougpuob/qemu-tasker
|
58a24090016abebcda8e95c382bceaef453ea981
|
[
"MIT"
] | null | null | null |
import unittest
import sys
import os
import sys
import json
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_DIR = os.path.abspath(os.path.join(TEST_DIR, os.pardir))
sys.path.insert(0, PROJECT_DIR)
from module.cmdparse import cmdargs
class test_cmdparse(unittest.TestCase):
def __init__(self, methodName: str = ...) -> None:
super().__init__(methodName)
def test_qmp_no_arg(self):
sys.argv = ['qemu-tasker.py',
'qmp',
'--taskid', '10010',
'--execute', 'human-monitor-command']
args = cmdargs().get_parsed_args()
self.assertEqual(args.taskid, 10010)
self.assertEqual(args.command, "qmp")
self.assertEqual(args.execute, "human-monitor-command")
def test_qmp_argsjson1(self):
sys.argv = ['qemu-tasker.py',
'qmp',
'--taskid', '10010',
'--execute', 'human-monitor-command',
'--argsjson', '{"command-line" : "info version" }']
args = cmdargs().get_parsed_args()
self.assertEqual(args.taskid, 10010)
self.assertEqual(args.command, "qmp")
self.assertEqual(args.execute, "human-monitor-command")
self.assertEqual(args.argsjson, '{"command-line" : "info version" }')
def test_qmp_argsjson2(self):
argsjson = {"command-line" : "info version" }
sys.argv = ['qemu-tasker.py',
'qmp',
'--taskid', '10010',
'--execute', 'human-monitor-command',
'--argsjson', json.dumps(argsjson)]
args = cmdargs().get_parsed_args()
self.assertEqual(args.taskid, 10010)
self.assertEqual(args.command, "qmp")
self.assertEqual(args.execute, "human-monitor-command")
self.assertEqual(args.argsjson, json.dumps(argsjson))
if __name__ == '__main__':
unittest.main()
| 31.918033
| 77
| 0.582435
| 208
| 1,947
| 5.269231
| 0.269231
| 0.150547
| 0.190693
| 0.142336
| 0.650547
| 0.583029
| 0.583029
| 0.583029
| 0.583029
| 0.583029
| 0
| 0.023207
| 0.269646
| 1,947
| 60
| 78
| 32.45
| 0.747539
| 0
| 0
| 0.543478
| 0
| 0
| 0.191063
| 0.064715
| 0
| 0
| 0
| 0
| 0.23913
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.23913
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
479ce552b6ca46a4ea68c0270c6749107ab46cb3
| 3,498
|
py
|
Python
|
ebbp/ebb_fit_prior.py
|
uttiyamaji/ebbp
|
d1b7270b7741cab4b18a9f54b47060f90ac1fc2c
|
[
"MIT"
] | null | null | null |
ebbp/ebb_fit_prior.py
|
uttiyamaji/ebbp
|
d1b7270b7741cab4b18a9f54b47060f90ac1fc2c
|
[
"MIT"
] | null | null | null |
ebbp/ebb_fit_prior.py
|
uttiyamaji/ebbp
|
d1b7270b7741cab4b18a9f54b47060f90ac1fc2c
|
[
"MIT"
] | null | null | null |
"""
ebb_fit_prior : fits a Beta prior by estimating the parameters from the data using
method of moments and MLE estimates
augment : given data and prior, computes the shrinked estimate, credible intervals and
augments those in the given dataframe
check_fit : plots the true average and the shrinked average
"""
import numpy as np
import pandas as pd
from scipy.stats import beta as beta_dist
from dataclasses import dataclass
import matplotlib.pyplot as plt
@dataclass
class Beta:
""" minimal Beta class, for prior and posterior objects"""
alpha: float
beta: float
def pdf(self, x):
return beta_dist.pdf(x, self.alpha, self.beta)
pass
def plot(self, x, n):
""" plots the prior pdf density over the data histogram"""
# add various plotting args
x_ax = np.linspace(0,1,1000)
rv = beta_dist(self.alpha, self.beta)
p = x/n
plt.hist(p, density = True)
plt.plot(x_ax, rv.pdf(x_ax))
plt.title(f'Beta({self.alpha.round(2)},{self.beta.round(2)})')
plt.show()
def ebb_fit_prior(x, n, method = 'mm', start = (0.5,0.5)):
p = x/n
if (method == 'mm'):
mu, sig = np.mean(p), np.var(p)
a = ((1-mu)/sig - 1/mu)*mu**2
b = a*(1/mu - 1)
fitted_prior = Beta(a,b)
pass
elif (method == 'mle'):
# starting value
# if (np.isnan(start)):
# mm_est = ebb_fit_prior(x, n, 'mm')
# start = (mm_est.alpha, mm_est.beta)
# #print(start)
# likelihood function: f(a, b)
def likelihood(pars):
return (-np.sum(beta_dist.pdf(p, pars[0], pars[1])))
# optimization function: over a series of params, optimise likelihood
# outp = minimize(likelihood, x0 = start, method = 'BFGS')
# fitted_prior = Beta(outp.x[0], outp.x[1])
a,b,*ls = beta_dist.fit(p)
fitted_prior = Beta(a,b)
pass
else:
return ('Method should be MM or MLE')
return fitted_prior
pass
def augment(prior, data, x, n):
# compute the estimates
post_alpha = prior.alpha + x
post_beta = prior.beta + n - x
eb_est = (x + prior.alpha)/(n + prior.alpha + prior.beta)
posterior = Beta(post_alpha, post_beta)
# compute the posterior credible intervals
post_l = beta_dist.ppf(0.025, posterior.alpha, posterior.beta)
post_u = beta_dist.ppf(0.975, posterior.alpha, posterior.beta)
# add the column
new_cols = pd.DataFrame({'alpha':post_alpha, 'beta': post_beta, 'eb_est': eb_est, 'cred_lower': post_l, 'cred_upper':post_u})
aug_df = pd.concat([data, new_cols], axis = 1)
return aug_df
pass
def check_fit(aug_df):
plt.plot(aug_df.est, aug_df.eb_est)
plt.show()
if __name__ == '__main__':
x = np.random.randint(0,50,20)
n = np.random.randint(50,100, 20)
p = x/n
dt = pd.DataFrame({'S':x, 'Tot':n, 'est':p})
est1 = ebb_fit_prior(x,n, 'mm')
print(est1)
est1.plot(x, n)
new_dt = augment(est1, dt, dt.S, dt.Tot)
print(new_dt.head(10))
check_fit(new_dt)
print('=============================')
est2 = ebb_fit_prior(x,n,'mle')
print(est2)
est2.plot(x,n)
new_dt = augment(est2, dt, dt.S, dt.Tot)
print(new_dt.head(10))
check_fit(new_dt)
| 26.104478
| 129
| 0.57004
| 515
| 3,498
| 3.739806
| 0.287379
| 0.011423
| 0.028557
| 0.024922
| 0.110073
| 0.096573
| 0.040498
| 0.040498
| 0.040498
| 0.040498
| 0
| 0.022312
| 0.295312
| 3,498
| 133
| 130
| 26.300752
| 0.759026
| 0.241567
| 0
| 0.228571
| 0
| 0
| 0.063025
| 0.029412
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0.071429
| 0.071429
| 0.014286
| 0.271429
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
479f58c93d75ceaaa8c1e6a8d36c5a44e2d8377b
| 307
|
py
|
Python
|
methylprep/processing/__init__.py
|
WonyoungCho/methylprep
|
4e34f62be969158453ba9b05b7629433f9bbba8b
|
[
"MIT"
] | 5
|
2019-08-28T08:27:16.000Z
|
2020-03-11T17:20:01.000Z
|
methylprep/processing/__init__.py
|
WonyoungCho/methylprep
|
4e34f62be969158453ba9b05b7629433f9bbba8b
|
[
"MIT"
] | 16
|
2021-04-08T22:02:58.000Z
|
2022-03-18T17:30:50.000Z
|
methylprep/processing/__init__.py
|
WonyoungCho/methylprep
|
4e34f62be969158453ba9b05b7629433f9bbba8b
|
[
"MIT"
] | 3
|
2020-05-21T10:16:24.000Z
|
2020-08-30T09:26:52.000Z
|
from .pipeline import SampleDataContainer, run_pipeline, make_pipeline
from .preprocess import preprocess_noob
from .postprocess import consolidate_values_for_sheet
__all__ = [
'SampleDataContainer',
'preprocess_noob',
'run_pipeline',
'make_pipeline,',
'consolidate_values_for_sheet'
]
| 25.583333
| 70
| 0.781759
| 32
| 307
| 7
| 0.4375
| 0.098214
| 0.133929
| 0.205357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143322
| 307
| 11
| 71
| 27.909091
| 0.851711
| 0
| 0
| 0
| 0
| 0
| 0.286645
| 0.091205
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
47a75c9fd27c1c4e7e7447cf693a765246360655
| 1,446
|
py
|
Python
|
yc164/518.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
yc164/518.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
yc164/518.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
def r2i(s):
i = 0
result = 0
while i < len(s):
a = {'IV': 4, 'IX': 9, 'XL': 40, 'XC': 90, 'CD': 400, 'CM': 900}
if s[i:i + 2] in a:
result += a[s[i:i + 2]]
i += 2
continue
b = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
result += b[s[i]]
i += 1
return result
def i2r(i):
if i > 3999:
return 'ERROR'
result = ''
while i != 0:
if i >= 1000:
result += 'M'
i -= 1000
elif i >= 900:
result += 'CM'
i -= 900
elif i >= 500:
result += 'D'
i -= 500
elif i >= 400:
result += 'CD'
i -= 400
elif i >= 100:
result += 'C'
i -= 100
elif i >= 90:
result += 'XC'
i -= 90
elif i >= 50:
result += 'L'
i -= 50
elif i >= 40:
result += 'XL'
i -= 40
elif i >= 10:
result += 'X'
i -= 10
elif i >= 9:
result += 'IX'
i -= 9
elif i >= 5:
result += 'V'
i -= 5
elif i >= 4:
result += 'IV'
i -= 4
elif i >= 1:
result += 'I'
i -= 1
return result
N = int(input())
R = input().split()
print(i2r(sum(r2i(r) for r in R)))
| 21.58209
| 77
| 0.310512
| 183
| 1,446
| 2.453552
| 0.284153
| 0.13363
| 0.020045
| 0.017817
| 0.066815
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144315
| 0.525588
| 1,446
| 66
| 78
| 21.909091
| 0.510204
| 0
| 0
| 0.032787
| 0
| 0
| 0.029737
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0
| 0
| 0.081967
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
47a795b1d3c2fd7990f98fee6d037bc7e104529b
| 966
|
py
|
Python
|
asynapplicationinsights/channel/abstractions.py
|
RobertoPrevato/aioapplicationinsights
|
c72721c6ed0e64b4e5bfecbcd3dde62f7c6ea120
|
[
"MIT"
] | 2
|
2018-08-13T14:26:31.000Z
|
2019-12-01T01:03:10.000Z
|
asynapplicationinsights/channel/abstractions.py
|
RobertoPrevato/aioapplicationinsights
|
c72721c6ed0e64b4e5bfecbcd3dde62f7c6ea120
|
[
"MIT"
] | 4
|
2018-10-09T20:32:59.000Z
|
2018-12-09T20:46:09.000Z
|
asynapplicationinsights/channel/abstractions.py
|
RobertoPrevato/aioapplicationinsights
|
c72721c6ed0e64b4e5bfecbcd3dde62f7c6ea120
|
[
"MIT"
] | null | null | null |
from asyncio import Queue, QueueEmpty
from abc import ABC, abstractmethod
from typing import List
class TelemetryChannel(ABC):
def __init__(self):
self._queue = Queue()
self._max_length = 500
def get(self):
try:
return self._queue.get_nowait()
except QueueEmpty:
return None
async def put(self, item):
if not item:
return
await self._queue.put(item)
if self.should_flush():
await self.flush()
def should_flush(self) -> bool:
return self._max_length <= self._queue.qsize()
async def flush(self):
data = []
while True:
item = self.get()
if not item:
break
data.append(item)
if data:
await self.send(data)
@abstractmethod
async def send(self, data: List):
pass
@abstractmethod
async def dispose(self):
pass
| 21
| 54
| 0.555901
| 110
| 966
| 4.745455
| 0.372727
| 0.068966
| 0.049808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004862
| 0.361284
| 966
| 45
| 55
| 21.466667
| 0.841167
| 0
| 0
| 0.171429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0.057143
| 0.085714
| 0.028571
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
47a8d279cbfb373533f6a00a0322f66158d1d281
| 759
|
py
|
Python
|
2021/day3/a.py
|
vinnymaker18/adventofcode
|
92d0a6f5a04e6601b6c82ee323565e7327be36f8
|
[
"MIT"
] | null | null | null |
2021/day3/a.py
|
vinnymaker18/adventofcode
|
92d0a6f5a04e6601b6c82ee323565e7327be36f8
|
[
"MIT"
] | null | null | null |
2021/day3/a.py
|
vinnymaker18/adventofcode
|
92d0a6f5a04e6601b6c82ee323565e7327be36f8
|
[
"MIT"
] | null | null | null |
def processInput(inputFile='input.txt'):
return [l.strip() for l in open(inputFile).readlines()]
def filter(data, a=1):
setA = set(range(len(data)))
setB = set(range(len(data)))
def count(bit, value, dataset):
return set(d for d in dataset if data[d][bit] == value)
for bit in range(12):
onesA = count(bit, '1', setA)
zerosA = count(bit, '0', setA)
onesB = count(bit, '1', setB)
zerosB = count(bit, '0', setB)
setA = onesA if len(onesA) >= len(zerosA) else zerosA
setB = onesB if len(onesB) < len(zerosB) else zerosB
if not setB:
setB = onesB or zerosB
x, y = data[min(setA)], data[min(setB)]
print(int(x, 2) * int(y, 2))
filter(processInput())
| 25.3
| 63
| 0.571805
| 113
| 759
| 3.840708
| 0.371681
| 0.092166
| 0.050691
| 0.069124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016275
| 0.27141
| 759
| 29
| 64
| 26.172414
| 0.768535
| 0
| 0
| 0
| 0
| 0
| 0.017128
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0
| 0.105263
| 0.263158
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
47bbb88fe7ad9a14195f7bde44006fac967ad0e2
| 2,044
|
py
|
Python
|
python/datadb2/core/svc/build_db2_url.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/datadb2/core/svc/build_db2_url.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
python/datadb2/core/svc/build_db2_url.py
|
jiportilla/ontology
|
8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
from base import BaseObject
from base import CryptoBase
from base import FileIO
from datadb2.core.dmo import BaseDB2Client
class BuildDb2Url(BaseObject):
""" Create a DB2 connection """
__config_path = 'resources/config/db2/schemas.yml'
def __init__(self,
is_debug: bool = False):
"""
Created:
9-Oct-2019
[email protected]
* https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1080
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
self._config = FileIO.file_to_yaml_by_relative_path(self.__config_path)
@staticmethod
def _values(d_config: dict):
username = CryptoBase.decrypt_str(os.environ[d_config['username'][1:]])
password = CryptoBase.decrypt_str(os.environ[d_config['password'][1:]])
return {
'host': d_config['host'].strip(),
'database': d_config['database'].strip(),
'port': d_config['port'],
'username': username.strip(),
'password': password.strip()}
@staticmethod
def _connect(d_config: dict) -> BaseDB2Client:
return BaseDB2Client(some_database_name=d_config['database'],
some_hostname=d_config['host'],
some_port=d_config['port'],
some_username=d_config['username'],
some_password=d_config['password'])
def wft_dev(self) -> BaseDB2Client:
"""
Purpose:
Connect to DB2 WFT DEV
:return:
"""
return self._connect(self._values(self._config['wft_dev']))
def cendant(self) -> BaseDB2Client:
"""
:return:
"""
return self._connect(self._values(self._config['cendant']))
if __name__ == "__main__":
# BuildDb2Url().wft_dev()
# BuildDb2Url().wft_prod()
BuildDb2Url().cendant()
| 28.788732
| 81
| 0.581703
| 217
| 2,044
| 5.16129
| 0.387097
| 0.075
| 0.0375
| 0.039286
| 0.141071
| 0.141071
| 0.141071
| 0.076786
| 0
| 0
| 0
| 0.017218
| 0.289628
| 2,044
| 70
| 82
| 29.2
| 0.754132
| 0.150685
| 0
| 0.057143
| 0
| 0
| 0.093284
| 0.019901
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.085714
| 0.142857
| 0.028571
| 0.457143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
47bcdf89bfb403747fce6b37d8765b1f6f980172
| 431
|
py
|
Python
|
ex067 - Tabuada v3.0.py
|
marvincosmo/Python-Curso-em-Video
|
47ee3dd6423835e7bca159ffd7ee796423569176
|
[
"MIT"
] | null | null | null |
ex067 - Tabuada v3.0.py
|
marvincosmo/Python-Curso-em-Video
|
47ee3dd6423835e7bca159ffd7ee796423569176
|
[
"MIT"
] | null | null | null |
ex067 - Tabuada v3.0.py
|
marvincosmo/Python-Curso-em-Video
|
47ee3dd6423835e7bca159ffd7ee796423569176
|
[
"MIT"
] | null | null | null |
""" 67 - Faça um programa que mostre a tabuada de vários números, um de cada vez, para cada valor digitado pelo
usuário. O programa será interrompido quando o número solicitado for negativo. """
while True:
n = int(input('Informe um número para ver sua tabuada: '))
if n < 0:
break
print('-' * 13)
for m in range(1, 11):
print(f'{n} x {m} = {n*m}')
print('-' * 13)
print('Programa encerrado.')
| 33.153846
| 111
| 0.62645
| 67
| 431
| 4.029851
| 0.701493
| 0.051852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030769
| 0.24594
| 431
| 12
| 112
| 35.916667
| 0.8
| 0.431555
| 0
| 0.222222
| 0
| 0
| 0.329114
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.444444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
47cd00f1c6e6fe88e15b29bda7971944f1ec4024
| 2,127
|
py
|
Python
|
mylast.py
|
JohnTocher/descrobbler
|
0bca4d05e0029b63d11fe615e933362cadb30c11
|
[
"Apache-2.0"
] | null | null | null |
mylast.py
|
JohnTocher/descrobbler
|
0bca4d05e0029b63d11fe615e933362cadb30c11
|
[
"Apache-2.0"
] | null | null | null |
mylast.py
|
JohnTocher/descrobbler
|
0bca4d05e0029b63d11fe615e933362cadb30c11
|
[
"Apache-2.0"
] | null | null | null |
''' this file creates the objects used to access the scrobbling service api
No actual creds should be stored here!
This module will be imported and used by the main code
'''
import os
import sys
import pylast
try:
API_KEY = os.environ["LASTFM_API_KEY"]
API_SECRET = os.environ["LASTFM_API_SECRET"]
except KeyError:
API_KEY = "my_api_key"
API_SECRET = "my_apy_secret"
try:
lastfm_username = os.environ["LASTFM_USERNAME"]
lastfm_password_hash = os.environ["LASTFM_PASSWORD_HASH"]
print("Environment variables for user OK")
except KeyError:
# In order to perform a write operation you need to authenticate yourself
lastfm_username = "my_username"
# You can use either use the password, or find the hash once and use that
lastfm_password_hash = pylast.md5("my_password")
print(lastfm_password_hash)
# lastfm_password_hash = "my_password_hash"
print("Environment variables for user missing! So far:")
print(f"API_KEY: {API_KEY}")
print(f"API_SECRET: {API_SECRET}")
print(f"LFM USER: {lastfm_username}")
print(f"LPW HASH: {lastfm_password_hash}")
lastfm_network = pylast.LastFMNetwork(
api_key=API_KEY,
api_secret=API_SECRET,
username=lastfm_username,
password_hash=lastfm_password_hash,
)
def track_and_timestamp(track):
return f"{track.playback_date}\t{track.track}"
def print_track(track):
print(track_and_timestamp(track))
TRACK_SEPARATOR = " - "
def split_artist_track(artist_track):
artist_track = artist_track.replace(" – ", " - ")
artist_track = artist_track.replace("“", '"')
artist_track = artist_track.replace("”", '"')
(artist, track) = artist_track.split(TRACK_SEPARATOR)
artist = artist.strip()
track = track.strip()
print("Artist:\t\t'" + artist + "'")
print("Track:\t\t'" + track + "'")
# Validate
if len(artist) == 0 and len(track) == 0:
sys.exit("Error: Artist and track are blank")
if len(artist) == 0:
sys.exit("Error: Artist is blank")
if len(track) == 0:
sys.exit("Error: Track is blank")
return (artist, track)
| 29.136986
| 77
| 0.686883
| 296
| 2,127
| 4.726351
| 0.320946
| 0.08649
| 0.090064
| 0.094353
| 0.24732
| 0.181558
| 0.120086
| 0.057184
| 0.057184
| 0
| 0
| 0.002922
| 0.195581
| 2,127
| 72
| 78
| 29.541667
| 0.814144
| 0.170193
| 0
| 0.081633
| 0
| 0
| 0.255441
| 0.033219
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0.102041
| 0.061224
| 0.020408
| 0.163265
| 0.22449
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
47eb8cdb6e6b5599e5209b828d0aacfe3eb4df25
| 555
|
py
|
Python
|
Utilities/fe8_exp_test.py
|
Shahrose/lex-talionis
|
ef7e48124b36269f4212eb0e3a7747caf53bfadd
|
[
"MIT"
] | null | null | null |
Utilities/fe8_exp_test.py
|
Shahrose/lex-talionis
|
ef7e48124b36269f4212eb0e3a7747caf53bfadd
|
[
"MIT"
] | null | null | null |
Utilities/fe8_exp_test.py
|
Shahrose/lex-talionis
|
ef7e48124b36269f4212eb0e3a7747caf53bfadd
|
[
"MIT"
] | null | null | null |
mlevel = 1
elevel = 1
mclass_bonus_a = 20
eclass_bonus_a = 0
mclass_bonus_b = 60
eclass_bonus_b = 0
mclass_power = 3
eclass_power = 2
def damage_exp():
return (31 + elevel + eclass_bonus_a - mlevel - mclass_bonus_a) / mclass_power
def defeat_exp(mode=1):
return (elevel * eclass_power + eclass_bonus_b) - ((mlevel * mclass_power + mclass_bonus_b) / mode)
def kill_exp():
return damage_exp() + max(0, 20 + (defeat_exp() if defeat_exp() > 0 else defeat_exp(2)))
print(damage_exp())
print(defeat_exp())
print(defeat_exp(2))
print(kill_exp())
| 24.130435
| 103
| 0.715315
| 92
| 555
| 3.967391
| 0.282609
| 0.147945
| 0.065753
| 0.082192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040773
| 0.16036
| 555
| 22
| 104
| 25.227273
| 0.742489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0.166667
| 0.333333
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
47fd66e778b3e447ec3e01b548b142f968b5fb7f
| 599
|
py
|
Python
|
setup.py
|
xebialabs-community/xld-install-helper
|
a61baa9fabc6484afa5fd287a25fc6fb88d84670
|
[
"MIT"
] | null | null | null |
setup.py
|
xebialabs-community/xld-install-helper
|
a61baa9fabc6484afa5fd287a25fc6fb88d84670
|
[
"MIT"
] | null | null | null |
setup.py
|
xebialabs-community/xld-install-helper
|
a61baa9fabc6484afa5fd287a25fc6fb88d84670
|
[
"MIT"
] | 2
|
2016-12-27T12:12:09.000Z
|
2020-09-24T18:06:58.000Z
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='xl-helper',
version='1.0.5',
description='XL Deploy helper',
long_description='This tool helps with installation and upgrade of XL Deploy and plugins',
author='Mike Kotsur',
author_email='[email protected]',
url='http://xebialabs.com/',
packages=find_packages(where=".", exclude=["tests*"]),
package_data={'xl_helper': ['deployit.conf', '.xl-helper.defaults']},
include_package_data=True,
install_requires=['jenkinsapi', 'argparse', 'pytz'],
scripts=['xl-helper']
)
| 31.526316
| 94
| 0.684474
| 75
| 599
| 5.346667
| 0.72
| 0.079801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005894
| 0.15025
| 599
| 18
| 95
| 33.277778
| 0.781925
| 0.033389
| 0
| 0
| 0
| 0
| 0.401384
| 0.036332
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
9a003487767445f7e574b64c73392ed111a08837
| 493
|
py
|
Python
|
setup.py
|
hugorodgerbrown/django-netpromoterscore
|
f0a7ddc32fe942069abacfaa5a3220eaabe9e1db
|
[
"MIT"
] | 8
|
2016-06-21T21:56:17.000Z
|
2021-10-06T17:28:00.000Z
|
setup.py
|
hugorodgerbrown/django-netpromoterscore
|
f0a7ddc32fe942069abacfaa5a3220eaabe9e1db
|
[
"MIT"
] | null | null | null |
setup.py
|
hugorodgerbrown/django-netpromoterscore
|
f0a7ddc32fe942069abacfaa5a3220eaabe9e1db
|
[
"MIT"
] | 1
|
2018-10-19T21:57:54.000Z
|
2018-10-19T21:57:54.000Z
|
from setuptools import setup, find_packages
setup(
name = "django-netpromoterscore",
version = '0.0.2',
description = "Model, Tests, and API for collecting promoter score from users.",
author = "Austin Brennan",
author_email = "[email protected]",
url = "https://github.com/epantry/django-netpromoterscore",
keywords = ["promoter score", "net promoter score", "django"],
install_requires = [],
packages = find_packages(),
include_package_data=True,
)
| 32.866667
| 84
| 0.677485
| 56
| 493
| 5.857143
| 0.714286
| 0.118902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007538
| 0.192698
| 493
| 15
| 85
| 32.866667
| 0.816583
| 0
| 0
| 0
| 0
| 0
| 0.419028
| 0.046559
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
9a11a05b881b07a6c93ac169600004f78ada2754
| 434
|
py
|
Python
|
exercicios/Lista5/Q9.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
exercicios/Lista5/Q9.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
exercicios/Lista5/Q9.py
|
AlexandrePeBrito/CursoUdemyPython
|
3de58cb30c9f333b32078309847179ff3f9d7e22
|
[
"MIT"
] | null | null | null |
#9. Façaumam função que receba a altura e o raio de um cilindro circular e retorne o volume
#do cilindro. O volume de um cilindro circular é calculado por meio da seguinte fórmula:
#V =pi*raio^2 x altura, onde pi = 3.141592.
def volCilindro(raio,altura):
return 3.1415926535*pow(raio,2)*altura
r=float(input("Informe o raio do cilindro: "))
alt=float(input("Informe a altura do cilindro: "))
volume=volCilindro(3,2)
print(volume)
| 43.4
| 91
| 0.751152
| 76
| 434
| 4.289474
| 0.552632
| 0.092025
| 0.07362
| 0.122699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061995
| 0.145161
| 434
| 10
| 92
| 43.4
| 0.816712
| 0.504608
| 0
| 0
| 0
| 0
| 0.2723
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0.166667
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
9a122e1fac70741e43cd706c1bfea367874d0fa7
| 1,714
|
py
|
Python
|
sachima/publish.py
|
gitter-badger/Sachima
|
76547fb6a21f1fea597994e6ee02c5db080d1e7a
|
[
"MIT"
] | null | null | null |
sachima/publish.py
|
gitter-badger/Sachima
|
76547fb6a21f1fea597994e6ee02c5db080d1e7a
|
[
"MIT"
] | null | null | null |
sachima/publish.py
|
gitter-badger/Sachima
|
76547fb6a21f1fea597994e6ee02c5db080d1e7a
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
from sachima import conf
class Publisher(object):
@classmethod
def get_csrf_token(self, html):
soup = BeautifulSoup(html, "html.parser")
csrf = soup.find(id="csrf_token").attrs["value"]
return csrf
@classmethod
def to_superset(self, name, type_, param):
addr = conf.get("SUPERSET_WEBSERVER_ADDRESS")
port = conf.get("SUPERSET_WEBSERVER_PORT")
user = conf.get("SUPERSET_USERNAME")
pwd = conf.get("SUPERSET_PASSWORD")
bp_post = conf.get("SUPERSET_API_TABLE_BP")
if addr and port:
url = ":".join([addr.rstrip("/"), str(port)])
with requests.session() as s:
# 登陆
r = s.get(url + "/login/")
login_data = dict(
username=user,
password=pwd,
csrf_token=self.get_csrf_token(r.text),
)
r = s.post(url + "/login/", data=login_data)
# 调用接口
if r.url.endswith("welcome"):
r = s.post(
url + bp_post,
headers={
"Content-Type": "application/json; charset=utf-8",
"X-CSRFToken": self.get_csrf_token(r.text),
},
json={
"slice_name": name,
"api": type_,
"params": param,
},
)
print(r.text)
print("publish service to superset")
else:
pass
| 32.961538
| 78
| 0.449242
| 163
| 1,714
| 4.570552
| 0.466258
| 0.060403
| 0.100671
| 0.06443
| 0.056376
| 0.056376
| 0
| 0
| 0
| 0
| 0
| 0.002099
| 0.443991
| 1,714
| 51
| 79
| 33.607843
| 0.779643
| 0.004084
| 0
| 0.046512
| 0
| 0
| 0.148474
| 0.04108
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0.069767
| 0.069767
| 0
| 0.162791
| 0.046512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
9a13bf9dda86cde96d1e704297f9ca1d15b1b6aa
| 3,254
|
pyw
|
Python
|
src/mediator/Main.pyw
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | 31
|
2018-10-19T15:28:36.000Z
|
2022-02-14T03:01:25.000Z
|
src/mediator/Main.pyw
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | null | null | null |
src/mediator/Main.pyw
|
fuqinshen/Python--
|
aaa5230354258e1bba761e483c8b9fb6be00402a
|
[
"MIT"
] | 10
|
2019-01-10T04:02:12.000Z
|
2021-11-17T01:52:15.000Z
|
import tkinter
class Main(tkinter.Frame):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.pack()
self.checkValue = tkinter.StringVar()
self.checkGuest = tkinter.Radiobutton(self, text="Guest", variable=self.checkValue, value="Guest",
anchor=tkinter.W)
self.checkLogin = tkinter.Radiobutton(self, text="Login", variable=self.checkValue, value="Login",
anchor=tkinter.W)
usernameLabel = tkinter.Label(self, text="Username:", anchor=tkinter.W, underline=0)
self.textUser = tkinter.Entry(self, width=10, state="disable")
passwordLabel = tkinter.Label(self, text="Password:", anchor=tkinter.W, underline=0)
self.textPassword = tkinter.Entry(self, width=10, show='*', state="disable")
self.buttonOk = tkinter.Button(self, text="OK", state="normal")
self.buttonCancel = tkinter.Button(self, text="Cancel", command=self.quit)
self.checkGuest.select()
self.checkLogin.deselect()
self.checkGuest.grid(row=0, column=0, padx=2, pady=2,
sticky=tkinter.W)
self.checkLogin.grid(row=0, column=1, padx=2, pady=2,
sticky=tkinter.EW)
usernameLabel.grid(row=1, column=0, padx=2, pady=2,
sticky=tkinter.W)
self.textUser.grid(row=1, column=1, padx=2, pady=2,
sticky=tkinter.EW)
passwordLabel.grid(row=2, column=0, padx=2, pady=2,
sticky=tkinter.W)
self.textPassword.grid(row=2, column=1, padx=2, pady=2,
sticky=tkinter.EW)
self.buttonOk.grid(row=3, column=0, padx=2, pady=2,
sticky=tkinter.EW)
self.buttonCancel.grid(row=3, column=1, padx=2, pady=2,
sticky=tkinter.EW)
self.checkGuest.focus_set()
self.checkGuest.bind("<Button-1>", self.checkChange)
self.checkLogin.bind("<Button-1>", self.checkChange)
self.textUser.bind("<Key>", self.checkChange)
self.textPassword.bind("<Key>", self.checkChange)
def checkChange(self, event):
if self.checkValue == "Guest":
self.textUser["state"] = "disable"
self.textPassword["state"] = "disable"
self.buttonOk["state"] = "normal"
else:
self.textUser["state"] = "normal"
self.userpassChanged()
def userpassChanged(self):
if len(self.textUser.get()) > 0:
self.textPassword["state"] = "normal"
if len(self.textPassword.get()) > 0:
self.buttonOk["state"] = "normal"
else:
self.buttonOk["state"] = "disable"
else:
self.textPassword["state"] = "disable"
self.buttonOk["state"] = "disable"
def quit(self, event=None):
self.parent.destroy()
if __name__ == '__main__':
application = tkinter.Tk()
application.title("Mediator Sample")
window = Main(application)
application.protocol("WM_DELETE_WINDOW", window.quit)
application.mainloop()
| 39.682927
| 106
| 0.567609
| 352
| 3,254
| 5.193182
| 0.232955
| 0.030635
| 0.039387
| 0.043764
| 0.314004
| 0.28884
| 0.200766
| 0.151532
| 0.131838
| 0.096827
| 0
| 0.018301
| 0.294714
| 3,254
| 81
| 107
| 40.17284
| 0.778214
| 0
| 0
| 0.287879
| 0
| 0
| 0.075292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0.166667
| 0.015152
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
9a1676b9866c375100521ac48277fdcc219264ce
| 1,592
|
py
|
Python
|
datawinners/blue/urls.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | 1
|
2015-11-02T09:11:12.000Z
|
2015-11-02T09:11:12.000Z
|
datawinners/blue/urls.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | null | null | null |
datawinners/blue/urls.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls.defaults import patterns, url
from datawinners.blue import view
from datawinners.blue.view import new_xform_submission_post, edit_xform_submission_post, get_attachment, attachment_download, guest_survey, public_survey
from datawinners.blue.view import ProjectUpload, ProjectUpdate
from datawinners.blue.view import new_xform_submission_get
from datawinners.project.views.submission_views import edit_xform_submission_get
urlpatterns = patterns('',
url(r'^guest_survey/(?P<link_uid>.+?)/$', guest_survey, name='guest_survey'),
url(r'^survey/(?P<org_id>.+?)/(?P<anonymous_link_id>.+?)/*$', public_survey, name='public_survey'),
url(r'^xlsform/upload/$', ProjectUpload.as_view(), name="import_project"),
url(r'^xlsform/download/$', view.project_download),
url(r'^xlsform/upload/update/(?P<project_id>\w+?)/$', ProjectUpdate.as_view(), name="update_project"),
url(r'^xlsform/(?P<project_id>.+?)/web_submission/(?P<survey_response_id>[^\\/]+?)/$', edit_xform_submission_get, name="edit_xform_submission"),
url(r'^xlsform/(?P<project_id>\w+?)/web_submission/$', new_xform_submission_get, name="xform_web_questionnaire"),
url(r'^xlsform/web_submission/(?P<survey_response_id>.+?)/$', edit_xform_submission_post, name="update_web_submission"),
url(r'^xlsform/web_submission/$', new_xform_submission_post, name="new_web_submission"),
url(r'^attachment/(?P<document_id>.+?)/(?P<attachment_name>[^\\/]+?)/$', get_attachment),
url(r'^download/attachment/(?P<document_id>.+?)/(?P<attachment_name>[^\\/]+?)/$', attachment_download)
)
| 61.230769
| 153
| 0.741834
| 212
| 1,592
| 5.25
| 0.207547
| 0.039533
| 0.069182
| 0.061995
| 0.37646
| 0.274933
| 0.237197
| 0.172507
| 0.08805
| 0
| 0
| 0
| 0.072236
| 1,592
| 25
| 154
| 63.68
| 0.753555
| 0
| 0
| 0
| 0
| 0
| 0.403266
| 0.336055
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.368421
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
9a25e9fa72dd391d4676f6e0a6bb06f9710db5d6
| 1,837
|
py
|
Python
|
matilda/data_pipeline/data_streaming/consumer.py
|
AlainDaccache/Quantropy
|
6cfa06ed2b764471382ebf94d40af867f10433bb
|
[
"MIT"
] | 45
|
2021-01-28T04:12:21.000Z
|
2022-02-24T13:15:50.000Z
|
matilda/data_pipeline/data_streaming/consumer.py
|
AlainDaccache/Quantropy
|
6cfa06ed2b764471382ebf94d40af867f10433bb
|
[
"MIT"
] | 32
|
2021-03-02T18:45:16.000Z
|
2022-03-12T00:53:10.000Z
|
matilda/data_pipeline/data_streaming/consumer.py
|
AlainDaccache/Quantropy
|
6cfa06ed2b764471382ebf94d40af867f10433bb
|
[
"MIT"
] | 10
|
2020-12-25T15:02:40.000Z
|
2021-12-30T11:40:15.000Z
|
from kafka.consumer import KafkaConsumer
from json import loads
from mongoengine import *
from matilda.data_pipeline import object_model
consumer = KafkaConsumer(
'numtest', # kafka topic
bootstrap_servers=['localhost:9092'], # same as our producer
# It handles where the consumer restarts reading after breaking down or being turned off and can be set either
# to earliest or latest. When set to latest, the consumer starts reading at the end of the log.
# When set to earliest, the consumer starts reading at the latest committed offset.
auto_offset_reset='earliest',
enable_auto_commit=True, # makes sure the consumer commits its read offset every interval.
# join a consumer group for dynamic partition assignment and offset commits
# a consumer needs to be part of a consumer group to make the auto commit work.
# otherwise, need to do it manually i.e. consumer.assign([TopicPartition('foobar', 2)]); msg = next(consumer)
group_id='my-group',
# deserialize encoded values
value_deserializer=lambda x: loads(x.decode('utf-8')))
def get_atlas_db_url(username, password, dbname):
return f"mongodb+srv://{username}:{password}@cluster0.ptrie.mongodb.net/{dbname}?retryWrites=true&w=majority&" \
f"ssl=true"
atlas_url = get_atlas_db_url(username='AlainDaccache', password='qwerty98', dbname='matilda-db')
db = connect(host=atlas_url)
# The consumer iterator returns ConsumerRecords, which are simple namedtuples
# that expose basic message attributes: topic, partition, offset, key, and value:
for message in consumer:
message = message.value
print(message)
object_model.Test(number=message['number']).save()
print('{} added to db'.format(message))
# # Then to check whats in it:
# for doc in object_model.Test.objects:
# print(doc._data)
| 42.72093
| 116
| 0.740338
| 261
| 1,837
| 5.1341
| 0.563218
| 0.041045
| 0.013433
| 0.035821
| 0.074627
| 0.043284
| 0
| 0
| 0
| 0
| 0
| 0.005894
| 0.168753
| 1,837
| 42
| 117
| 43.738095
| 0.871644
| 0.495917
| 0
| 0
| 0
| 0.047619
| 0.221122
| 0.110011
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.142857
| 0.190476
| 0.047619
| 0.285714
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
9a2921aafee477055d03e47abb30d023e2f9b7df
| 2,645
|
py
|
Python
|
2017/day06/redistribution.py
|
kmcginn/advent-of-code
|
96a8d7d723f6f222d431fd9ede88d0a303d86761
|
[
"MIT"
] | null | null | null |
2017/day06/redistribution.py
|
kmcginn/advent-of-code
|
96a8d7d723f6f222d431fd9ede88d0a303d86761
|
[
"MIT"
] | null | null | null |
2017/day06/redistribution.py
|
kmcginn/advent-of-code
|
96a8d7d723f6f222d431fd9ede88d0a303d86761
|
[
"MIT"
] | null | null | null |
"""
from: http://adventofcode.com/2017/day/6
--- Day 6: Memory Reallocation ---
A debugger program here is having an issue: it is trying to repair a memory reallocation routine,
but it keeps getting stuck in an infinite loop.
In this area, there are sixteen memory banks; each memory bank can hold any number of blocks. The
goal of the reallocation routine is to balance the blocks between the memory banks.
The reallocation routine operates in cycles. In each cycle, it finds the memory bank with the most
blocks (ties won by the lowest-numbered memory bank) and redistributes those blocks among the banks.
To do this, it removes all of the blocks from the selected bank, then moves to the next (by index)
memory bank and inserts one of the blocks. It continues doing this until it runs out of blocks; if
it reaches the last memory bank, it wraps around to the first one.
The debugger would like to know how many redistributions can be done before a blocks-in-banks
configuration is produced that has been seen before.
For example, imagine a scenario with only four memory banks:
The banks start with 0, 2, 7, and 0 blocks. The third bank has the most blocks, so it is chosen for
redistribution.
Starting with the next bank (the fourth bank) and then continuing to the first bank, the second
bank, and so on, the 7 blocks are spread out over the memory banks. The fourth, first, and second
banks get two blocks each, and the third bank gets one back. The final result looks like this:
2 4 1 2.
Next, the second bank is chosen because it contains the most blocks (four). Because there are four
memory banks, each gets one block. The result is: 3 1 2 3.
Now, there is a tie between the first and fourth memory banks, both of which have three blocks. The
first bank wins the tie, and its three blocks are distributed evenly over the other three banks,
leaving it with none: 0 2 3 4.
The fourth bank is chosen, and its four blocks are distributed such that each of the four banks
receives one: 1 3 4 1.
The third bank is chosen, and the same thing happens: 2 4 1 2.
At this point, we've reached a state we've seen before: 2 4 1 2 was already seen. The infinite loop
is detected after the fifth block redistribution cycle, and so the answer in this example is 5.
Given the initial block counts in your puzzle input, how many redistribution cycles must be
completed before a configuration is produced that has been seen before?
"""
def main():
"""Solve the problem!"""
with open('input.txt') as input_file:
data = input_file.read()
banks = [int(x) for x in data.split()]
print(banks)
if __name__ == "__main__":
main()
| 56.276596
| 100
| 0.761815
| 477
| 2,645
| 4.203354
| 0.389937
| 0.032918
| 0.020948
| 0.005985
| 0.04389
| 0.04389
| 0.04389
| 0.04389
| 0
| 0
| 0
| 0.016768
| 0.18828
| 2,645
| 46
| 101
| 57.5
| 0.917094
| 0.921739
| 0
| 0
| 0
| 0
| 0.088083
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
9a2d7ee04fd9497228365f3b015187758913933a
| 965
|
py
|
Python
|
models.py
|
curieos/Django-Blog-TDD
|
ba40b285d87c88aa33b1e2eb3d4bda014a88a319
|
[
"MIT"
] | null | null | null |
models.py
|
curieos/Django-Blog-TDD
|
ba40b285d87c88aa33b1e2eb3d4bda014a88a319
|
[
"MIT"
] | 8
|
2019-04-14T13:53:55.000Z
|
2019-07-11T18:06:57.000Z
|
models.py
|
curieos/Django-Blog-TDD
|
ba40b285d87c88aa33b1e2eb3d4bda014a88a319
|
[
"MIT"
] | null | null | null |
from django.utils.text import slugify
from django_extensions.db.fields import AutoSlugField
from django.db import models
from datetime import datetime
def get_current_date_time():
return datetime.now()
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=50)
slug = AutoSlugField(max_length=50, populate_from=('title'))
projects = models.ManyToManyField("projects.Project", verbose_name="Related Projects", blank=True)
tags = models.ManyToManyField("Tag")
date_created = models.DateTimeField(primary_key=True, default=get_current_date_time, editable=False)
last_modified = models.DateTimeField(auto_now=True)
content = models.TextField()
def __str__(self):
return self.title
class Tag(models.Model):
name = models.CharField(max_length=50, primary_key=True)
slug = AutoSlugField(max_length=50, populate_from=('name'))
def __str__(self):
return self.name
def popularity(self):
return self.related.all()
| 29.242424
| 101
| 0.78342
| 131
| 965
| 5.564886
| 0.442748
| 0.049383
| 0.060357
| 0.049383
| 0.23594
| 0.109739
| 0.109739
| 0
| 0
| 0
| 0
| 0.009292
| 0.107772
| 965
| 32
| 102
| 30.15625
| 0.837398
| 0.02487
| 0
| 0.086957
| 0
| 0
| 0.046858
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0.173913
| 0.173913
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.