本文整理汇总了Python中multiprocessing.pool.Pool方法的典型用法代码示例。如果您正苦于以下问题:Python pool.Pool方法的具体用法?Python pool.Pool怎么用?Python pool.Pool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块multiprocessing.pool的用法示例。
在下文中一共展示了pool.Pool方法的27个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: save_tfrecord
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def save_tfrecord(filename, dataset, verbose=False):
observations = len(dataset['length'])
serialized = []
with Pool(processes=4) as pool:
for serialized_string in tqdm(pool.imap(
tfrecord_serializer,
zip(dataset['length'], dataset['source'], dataset['target']),
chunksize=10
), total=observations, disable=not verbose):
serialized.append(serialized_string)
# Save seriealized dataset
writer = tf.python_io.TFRecordWriter(
filename,
options=tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.ZLIB
)
)
for serialized_string in tqdm(serialized, disable=not verbose):
writer.write(serialized_string)
writer.close()
开发者ID:distillpub,项目名称:post--memorization-in-rnns,代码行数:26,
示例2: shuffled_analysis
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def shuffled_analysis(iterations: int, meta: pd.DataFrame, counts: pd.DataFrame, interactions: pd.DataFrame,
cluster_interactions: list, base_result: pd.DataFrame, threads: int, separator: str,
suffixes: tuple = ('_1', '_2'), counts_data: str = 'ensembl') -> list:
"""
Shuffles meta and calculates the means for each and saves it in a list.
Runs it in a multiple threads to run it faster
"""
core_logger.info('Running Statistical Analysis')
with Pool(processes=threads) as pool:
statistical_analysis_thread = partial(_statistical_analysis,
base_result,
cluster_interactions,
counts,
interactions,
meta,
separator,
suffixes,
counts_data=counts_data
)
results = pool.map(statistical_analysis_thread, range(iterations))
return results
开发者ID:Teichlab,项目名称:cellphonedb,代码行数:25,
示例3: fill_queue
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def fill_queue(self):
if self.results is None:
self.results = queue.deque(maxlen=self.max_queue)
if self.num_workers > 0:
if self.pool is None:
self.pool = Pool(processes=self.num_workers)
while len(self.results) < self.max_queue:
if self.distinct_levels is not None and self.idx >= self.distinct_levels:
break
elif not self.repeat_levels and self.idx >= len(self.file_data):
break
else:
data = self.get_next_parameters()
if data is None:
break
self.idx += 1
kwargs = {'seed': self._seed.spawn(1)[0]}
if self.num_workers > 0:
result = self.pool.apply_async(_game_from_data, data, kwargs)
else:
result = _game_from_data(*data, **kwargs)
self.results.append((data, result))
开发者ID:PartnershipOnAI,项目名称:safelife,代码行数:25,
示例4: __init__
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def __init__(self, configer=None, num_classes=None, boundary_threshold=0.00088, num_proc=15):
assert configer is not None or num_classes is not None
self.configer = configer
if configer is not None:
self.n_classes = self.configer.get('data', 'num_classes')
else:
self.n_classes = num_classes
self.ignore_index = -1
self.boundary_threshold = boundary_threshold
self.pool = Pool(processes=num_proc)
self.num_proc = num_proc
self._Fpc = 0
self._Fc = 0
self.seg_map_cache = []
self.gt_map_cache = []
开发者ID:openseg-group,项目名称:openseg.pytorch,代码行数:21,
示例5: create_features_from_path
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def create_features_from_path(self, train_path: str, test_path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
column_pairs = self.get_column_pairs()
col1s = []
col2s = []
latent_vectors = []
gc.collect()
with Pool(4) as p:
for col1, col2, latent_vector in p.map(
partial(self.compute_latent_vectors, train_path=train_path, test_path=test_path), column_pairs):
col1s.append(col1)
col2s.append(col2)
latent_vectors.append(latent_vector.astype(np.float32))
gc.collect()
return self.get_feature(train_path, col1s, col2s, latent_vectors), \
self.get_feature(test_path, col1s, col2s, latent_vectors)
开发者ID:flowlight0,项目名称:talkingdata-adtracking-fraud-detection,代码行数:18,
示例6: run
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def run(self):
import gevent
from gevent import monkey
monkey.patch_all()
from gevent import pool
# default 200
# g_pool = pool.Pool(200)
g_pool = pool.Pool(self.coroutine)
tasks = [g_pool.spawn(self.gen_traffic, url) for url in self.url_list]
gevent.joinall(tasks)
traffic_list = []
for i in tasks:
if i.value is not None:
traffic_list.append(i.value)
# save traffic for rescan
Engine.save_traffic(traffic_list, self.id)
开发者ID:lwzSoviet,项目名称:NoXss,代码行数:18,
示例7: verify_async
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def verify_async(case_list,coroutine):
"""
Verify used gevent lib
:param case_list:
:param coroutine:
:return:
"""
from gevent import monkey
monkey.patch_all()
result = []
geventPool = pool.Pool(coroutine)
tasks = [geventPool.spawn(Verify.request_and_verify, case) for case in case_list]
gevent.joinall(tasks)
for i in tasks:
if i.value is not None:
result.append(i.value)
print_info('Total Verify-Case is: %s, %s error happened.' % (len(case_list), Verify.ERROR_COUNT))
return result
开发者ID:lwzSoviet,项目名称:NoXss,代码行数:20,
示例8: deduplicate
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def deduplicate(self, url_list):
print 'Start to deduplicate for all urls.'
filtered_path = self.file + '.filtered'
if os.path.exists(filtered_path):
print '%s has been filtered as %s.' % (self.file, filtered_path)
with open(filtered_path)as f:
filtered = f.read().split('\n')
return filtered
filtered = []
# result = map(filter, url_list)
from multiprocessing import cpu_count
from multiprocessing.pool import Pool
p=Pool(cpu_count())
result=p.map(url_filter,url_list)
for i in result:
if isinstance(i, str):
filtered.append(i)
with open(filtered_path, 'w') as f:
f.write('\n'.join(filtered))
print 'Saved filtered urls to %s.' % filtered_path
return filtered
开发者ID:lwzSoviet,项目名称:NoXss,代码行数:23,
示例9: _schedule_runs_lk
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def _schedule_runs_lk(self, et_pool, job):
""" Schedule runs to execute up to max possible parallelism
suffix '_lk' means caller must already hold lock.
:param et_pool: A multiprocessor pool handle
:type: Pool
:param job: current job
:type: WorkerJob
"""
while (self._has_more_runs_to_schedule(job) and
job.runs_in_flight < job.max_runs_in_flight):
run = job.schedule_next_run()
if run.id is None:
raise ValueError("Unexpected end of runs")
self.etl_helper.etl_step_started(job.msg_dict, run.id, run.step)
log('scheduled: {0}'.format(run.id))
et_pool.apply_async(
run.func,
args=run.args,
callback=self._create_run_complete_callback(job, run.id, run.step),
)
job.runs_in_flight += 1
开发者ID:Yelp,项目名称:mycroft,代码行数:26,
示例10: GenerateMode
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def GenerateMode(corpus, context_token_limit):
for dataset in datasets:
print 'Generating questions for the %s set:' % dataset
urls_filename = '%s/wayback_%s_urls.txt' % (corpus, dataset)
urls = ReadUrls(urls_filename)
p = Pool()
question_context_lists = p.imap_unordered(
GenerateMapper, izip(urls, repeat(corpus), repeat(context_token_limit)))
progress_bar = ProgressBar(len(urls))
for question_context_list in question_context_lists:
if question_context_list:
for question_context in question_context_list:
WriteQuestionContext(question_context, corpus, dataset)
progress_bar.Increment()
开发者ID:deepmind,项目名称:rc-data,代码行数:20,
示例11: fit
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : FeatureUnion
This estimator
"""
self.transformer_list = list(self.transformer_list)
self._validate_transformers()
with Pool(self.n_jobs) as pool:
transformers = pool.starmap(_fit_one_transformer,
((trans, X[trans.steps[0][1].columns], y) for _, trans, _ in self._iter()))
self._update_transformer_list(transformers)
return self
开发者ID:pjankiewicz,项目名称:mercari-solution,代码行数:25,
示例12: transform
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
with Pool(self.n_jobs) as pool:
Xs = pool.starmap(_transform_one, ((trans, weight, X[trans.steps[0][1].columns])
for name, trans, weight in self._iter()))
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
开发者ID:pjankiewicz,项目名称:mercari-solution,代码行数:27,
示例13: calculate_cell_score_selim
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def calculate_cell_score_selim(y_true, y_pred, num_threads=32, ids=None):
yps = []
for m in range(len(y_true)):
yps.append((y_true[m].copy(), y_pred[m].copy()))
pool = Pool(num_threads)
results = pool.map(calculate_jaccard, yps)
if ids:
import pandas as pd
s_iou = np.argsort(results)
d = []
for i in range(len(s_iou)):
id = ids[s_iou[i]]
res = results[s_iou[i]]
d.append([id, res])
pd.DataFrame(d, columns=["ID", "METRIC_SCORE"]).to_csv("gt_vs_oof.csv", index=False)
return np.array(results).mean()
开发者ID:selimsef,项目名称:dsb2018_topcoders,代码行数:19,
示例14: process_specification_directory
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def process_specification_directory(glob_pattern, outfile_name, namespace, write_baseclass=True,):
with open(os.path.join(options.out_path, outfile_name), 'w+') as out_file:
paths = [p for p in glob.glob(os.path.join(options.spec_path, glob_pattern))]
classes = list()
func = functools.partial(process_file, namespace)
with Pool() as pool:
classes.extend(pool.map(func, paths))
print("Formatting...")
formatted_code = FormatCode("\n".join(sorted(classes)))[0]
if write_baseclass:
header = BASE_CLASS
else:
header = "from zenpy.lib.api_objects import BaseObject\nimport dateutil.parser"
out_file.write("\n\n\n".join((header, formatted_code)))
开发者ID:facetoe,项目名称:zenpy,代码行数:18,
示例15: main
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def main():
# Parameters
process_num = 24
image_size = (512, 512)
url = 'http://v18.proteinatlas.org/images/'
csv_path = "data/HPAv18RBGY_wodpl.csv"
save_dir = "data/raw/external"
os.makedirs(save_dir, exist_ok=True)
print('Parent process %s.' % os.getpid())
img_list = list(pd.read_csv(csv_path)['Id'])
img_splits = np.array_split(img_list, process_num)
assert sum([len(v) for v in img_splits]) == len(img_list)
p = Pool(process_num)
for i, split in enumerate(img_splits):
p.apply_async(
download, args=(str(i), list(split), url, save_dir, image_size)
)
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
开发者ID:pudae,项目名称:kaggle-hpa,代码行数:25,
示例16: main
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def main():
print('\nWelcome here to get pictures from www.toutiao.com!')
keyword = input('Please input your search keywords > ')
count = None
while count == None:
number_str = input(
'Please input count of picture collection that you want(Divisible by 20 ) > ')
try:
count = int(number_str)
except ValueError:
print('Please input a valid number!')
if count > 0:
print('Getting %s pictures...' % keyword)
page_num = count // 20 + (0 if count % 20 == 0 else 1)
offset_list = [x * 20 for x in range(0, page_num)]
pool = Pool()
partial_getter = partial(get_images_of, keyword=keyword)
pool.map(partial_getter, offset_list)
pool.close()
pool.join()
else:
print('Get Cancel!')
开发者ID:Litreily,项目名称:capturer,代码行数:25,
示例17: main
点赞 6
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def main():
parser = argparse.ArgumentParser(
description="Downloads wheels from AppVeyor")
parser.add_argument("--buildid", "-id", type=int,
default=None, help='AppVeyor build id')
parser.add_argument("--pool", "-p", type=int, default=6,
help="Multiprocess pool size")
parser.add_argument("--dir", "-d", type=check_dir, default=getcwd(),
help='Directory to download the files into.')
options = parser.parse_args()
download_urls = []
for job in get_jobs(options.buildid):
download_urls += get_artifacts(job)
# Download them in parallel
pool = Pool(options.pool)
for download_url in download_urls:
pool.apply_async(download_file, args=(download_url, options.dir))
pool.close()
pool.join()
print("Done")
开发者ID:piqueserver,项目名称:piqueserver,代码行数:23,
示例18: init_pool
点赞 5
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def init_pool(self, worker_count):
return Pool(worker_count)
开发者ID:JohnStarich,项目名称:python-pool-performance,代码行数:4,
示例19: cloud_tpu
点赞 5
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def cloud_tpu(vm_name, tpu_name, delete_on_done=False, skip_confirmation=False):
"""Gets or creates a VM and TPU instance, and forwards ports.
Args:
vm_name: str, name of VM.
tpu_name: str, name of TPU instance.
delete_on_done: bool, whether to delete the instances when done.
skip_confirmation: bool, whether to skip launch confirmations.
Yields:
master: str, grpc master pointing to the TPU instance.
"""
state = CloudState()
# Read state from previous processes and possibly cleanup
state.cleanup(current_vm_name=vm_name, current_tpu_name=tpu_name,
skip_confirmation=skip_confirmation)
done_str = "" if delete_on_done else "NOT "
print("Will %sdelete VM and TPU instance on done." % done_str)
if not skip_confirmation:
assert confirm()
_, tpu_ip = create_vm_tpu_pair(vm_name, tpu_name,
skip_confirmation=skip_confirmation)
with tpu_tunnel(vm_name, tpu_ip) as (local_ports, tunnel_pid):
master = "grpc://localhost:%d" % local_ports["tpu"]
state.add_current(tunnel_pid, vm_name, tpu_name, delete_on_done)
yield master
if delete_on_done:
pool = mp.Pool(2)
vm_res = pool.apply_async(delete_vm, (vm_name,))
tpu_res = pool.apply_async(delete_tpu, (tpu_name,))
vm_res.get()
tpu_res.get()
# Cleanup state from this process
state.delete_current()
开发者ID:akzaidi,项目名称:fine-lm,代码行数:41,
示例20: Pool
点赞 5
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
开发者ID:war-and-code,项目名称:jawfish,代码行数:8,
示例21: parallel_variability_analysis
点赞 5
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def parallel_variability_analysis(tmodel, kind='reactions', proc_num = BEST_THREAD_RATIO):
"""
WIP.
:param tmodel:
:param kind:
:param proc_num:
:return:
"""
raise(NotImplementedError)
objective = tmodel.objective
if kind == Reaction or kind.lower() in ['reaction','reactions']:
these_vars = tmodel.reactions
else:
these_vars = tmodel.get_variables_of_type(kind)
func = partial(_variability_analysis_element, tmodel)
pool = Pool(processes=proc_num)
async_result = pool.map_async(func, these_vars)
pool.close()
pool.join()
# aggregated_result = pd.DataFrame(async_result.get(),
# columns = ['minimize','maximize'])
tmodel.objective = objective
return async_result
开发者ID:EPFL-LCSB,项目名称:pytfa,代码行数:33,
示例22: __init__
点赞 5
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def __init__(self, processes: int = None):
super().__init__()
self.pool = Pool(processes)
开发者ID:jMetal,项目名称:jMetalPy,代码行数:5,
示例23: __init__
点赞 5
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def __init__(self, args):
super().__init__()
self.logger = set_logger(colored('VENTILATOR', 'magenta'), args.verbose)
self.model_dir = args.model_dir
self.max_seq_len = args.max_seq_len
self.num_worker = args.num_worker
self.max_batch_size = args.max_batch_size
self.num_concurrent_socket = max(8, args.num_worker * 2) # optimize concurrency for multi-clients
self.port = args.port
self.args = args
self.status_args = {k: (v if k != 'pooling_strategy' else v.value) for k, v in sorted(vars(args).items())}
self.status_static = {
'tensorflow_version': _tf_ver_,
'python_version': sys.version,
'server_version': __version__,
'pyzmq_version': zmq.pyzmq_version(),
'zmq_version': zmq.zmq_version(),
'server_start_time': str(datetime.now()),
}
self.processes = []
self.logger.info('freeze, optimize and export graph, could take a while...')
with Pool(processes=1) as pool:
# optimize the graph, must be done in another process
from .graph import optimize_graph
self.graph_path, self.bert_config = pool.apply(optimize_graph, (self.args,))
# from .graph import optimize_graph
# self.graph_path = optimize_graph(self.args, self.logger)
if self.graph_path:
self.logger.info('optimized graph is stored at: %s' % self.graph_path)
else:
raise FileNotFoundError('graph optimization fails and returns empty result')
self.is_ready = threading.Event()
开发者ID:hanxiao,项目名称:bert-as-service,代码行数:35,
示例24: match_plans
点赞 5
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def match_plans(self):
pool = Pool(multiprocessing.cpu_count() - 1)
plans = list(reversed(list(tqdm(pool.imap(match_plan, list(reversed(self.data))), total=len(self.data)))))
self.data = [d.set_plan(p) for d, plans in zip(self.data, plans) for p in plans]
return self
开发者ID:AmitMY,项目名称:chimera,代码行数:8,
示例25: exhaustive_plan
点赞 5
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def exhaustive_plan(self, planner):
unique = {d.graph.unique_key(): d.graph for d in self.data}
unique_graphs = list(reversed(list(unique.values())))
plan_iter = ((g, planner) for g in unique_graphs)
# pool = Pool(multiprocessing.cpu_count() - 1)
# plans = list(tqdm(pool.imap(exhaustive_plan_compress, plan_iter),
# total=len(unique_graphs)))
plans = [exhaustive_plan_compress(g_p) for g_p in tqdm(list(plan_iter))]
graph_plans = {g.unique_key(): p for g, p in zip(unique_graphs, plans)}
self.data = [d.set_plans(graph_plans[d.graph.unique_key()]) for d in self.data]
return self
开发者ID:AmitMY,项目名称:chimera,代码行数:16,
示例26: create_plans
点赞 5
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def create_plans(self, planner):
assert planner is not None
unique = {d.graph.unique_key(): d.graph for d in self.data}
unique_graphs = list(reversed(list(unique.values())))
# if planner.is_parallel:
# pool = Pool(multiprocessing.cpu_count() - 1)
# plans = list(tqdm(pool.imap(planner.plan_best, unique_graphs), total=len(unique_graphs)))
# else:
plans = []
for g in tqdm(unique_graphs):
start = time.time()
plans.append(planner.plan_best(g))
g_size = len(g.edges)
if g_size not in self.timing:
self.timing[g_size] = []
self.timing[g_size].append(time.time() - start)
graph_plan = {g.unique_key(): p for g, p in zip(unique_graphs, plans)}
for d in self.data:
plans = graph_plan[d.graph.unique_key()]
if isinstance(plans, list):
d.set_plan(plans[0])
d.set_plans(plans[1:])
else:
d.set_plan(plans)
return self
开发者ID:AmitMY,项目名称:chimera,代码行数:30,
示例27: process_all
点赞 5
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import Pool [as 别名]
def process_all(self, pool: Pool):
"""Process all masks."""
images = [
filename
for filename in self.in_dir.iterdir()
if has_image_extension(str(filename))
]
tqdm_parallel_imap(self.preprocess, images, pool)
开发者ID:catalyst-team,项目名称:segmentation,代码行数:10,
注:本文中的multiprocessing.pool.Pool方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。