-
Notifications
You must be signed in to change notification settings - Fork 0
/
bootstrap_linear_models.py
85 lines (65 loc) · 2.65 KB
/
bootstrap_linear_models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import config as conf
import functions.initialization_functions as init_funcs
import functions.parallelization_functions as parallel_funcs
import logging
import shelve
import numpy as np
import pandas as pd
if __name__ == '__main__':
paths = conf.DataFilePaths()
params = conf.LinearModelsBootstrappingParameters()
# Get column index for parsed data
index = shelve.open(paths.PARSED_DATA_COLUMN_INDEX)
logger = init_funcs.start_logger(
logfile = f'{paths.LOG_DIR}/{params.log_file_name}',
logstart_msg = 'Starting bootstrapping run'
)
# Block size used for parsed data loading needs to be
# the largest model model order plus five for the
block_size = max(params.model_orders) + 5
# Load parsed data
input_file = f'{paths.PARSED_DATA_PATH}/{params.input_file_root_name}{block_size}.npy'
timepoints = np.load(input_file)
# Log some details about the input data & run parameters
logging.info('')
logging.info(f'CPUs: {params.n_cpus}')
logging.info(f'Samples: {params.num_samples} ({params.samples_per_cpu} per CPU)')
logging.info(f'Sample size: {params.sample_size}')
logging.info(f'Model orders: {params.model_orders}')
logging.info(f'Model types: control + {params.model_types}')
logging.info('')
logging.info(f'Input timepoints shape: {timepoints.shape}')
logging.info('')
logging.info('Input column types:')
for column in timepoints[0,0,0,0:]:
logging.info(f'{type(column)}')
logging.info('')
logging.info(f'Example input block:')
for row in timepoints[0,0,0:,]:
row = [f'{x:.2e}' for x in row]
logging.info(f'{row}')
logging.info('')
# Fire up the pool
pool, result_objects = parallel_funcs.start_multiprocessing_pool()
# Loop on samples, assigning each to a different worker
for sample_num in range(params.num_samples):
result = pool.apply_async(parallel_funcs.parallel_bootstrapped_linear_smape,
args = (
index,
timepoints,
sample_num,
params.sample_size,
params.model_orders,
params.model_types,
params.time_fits
)
)
# Add result to collection
result_objects.append(result)
# Get and parse result objects, clean up pool
data = parallel_funcs.cleanup_bootstrapping_multiprocessing_pool(pool, result_objects)
# Convert result to Pandas DataFrame
data_df = pd.DataFrame(data)
# Persist to disk as HDF5
output_file = f'{paths.BOOTSTRAPPING_RESULTS_PATH}/{params.output_file_root_name}.parquet'
data_df.to_parquet(output_file)