-
Notifications
You must be signed in to change notification settings - Fork 1
/
endpoint_utils.py
118 lines (88 loc) · 3.85 KB
/
endpoint_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
from flask import url_for
import pandas as pd
from urllib.parse import unquote
def preFillDataParser(inputData):
"""
This function parses the input string given to the template generator.
Checks a few things then returns a dictionary of dataframes.
{
sheetName1 : [
{columnName : value1},
{columnName : value2},
{columnName : value3},
{columnName : value4},
]
}
:param inputString:
:type JSON string
:return: pandas dataframe
"""
# If it goes alright, test if it is a dictionary:
if not isinstance(inputData, dict):
return "[Error] Pre-fill data should be an dictionary of lists."
# Output data is a dictionary with pandas dataframes:
outputDataDict = {}
# If it is a dictionary, test if values are lists of dictionaries:
for sheetName, values in inputData.items():
if not isinstance(values, list):
return "[Error] Pre-fill data should be an array of dictionaries"
for value in values:
if not isinstance(value, dict):
return "[Error] Pre-fill data should be an array of dictionaries"
# If it's values are dictionaries, read as pandas dataframe
try:
outputDataDict[sheetName] = pd.DataFrame(values)
except TypeError:
return "[Error] Study data could not be imported as "
# Return data:
return outputDataDict
def schema_parser(filterParameters, filters, tabname, schemaDf):
"""
This function filters out the schema definition based on the provided parameters.
Only the default columns are visible: the list of default columns can be changed by the provided parameters.
Parameter definitions are in the properties file.
:param filterParameters: the payload of the POST request
:type dict
:param tabname: name of the schema eg. study or association
:type str
:param schemaDf: the schema definition of the entity
:type pandas DataFrame
:return: dictionary with the filtered dataframe.
"""
# TODO: test data types... sometime in the future
# Test if
# Generate compound parameters:
try:
if filterParameters['curator'] and filterParameters['backgroundTrait']:
filterParameters['curator_backgroundTrait'] = True
except KeyError:
pass
# Filtering based on the provided parameters:
for criteria, value in filterParameters.items():
if not value: continue # We only care about true parameters.
# The effect will be the carried value:
if criteria == 'effect':
criteria = value
if criteria not in filters: continue # We won't let undocumented filters breaking the code.
# Adding columns as defined by the properties file:
if 'addColumn' in filters[criteria] and tabname in filters[criteria]['addColumn']:
for field in filters[criteria]['addColumn'][tabname]:
schemaDf.loc[schemaDf.NAME == field, 'DEFAULT'] = True
# removing columns as defined by the properties file:
if 'removeColumn' in filters[criteria] and tabname in filters[criteria]['removeColumn']:
for field in filters[criteria]['removeColumn'][tabname]:
schemaDf.loc[schemaDf.NAME == field, 'DEFAULT'] = False
# Filter dataframe:
schemaDf = schemaDf.loc[schemaDf.DEFAULT]
# Resetting index:
schemaDf = schemaDf.reset_index(drop=True)
return(schemaDf)
def set_log_path(conf):
conf.LOG_CONF['handlers'][conf.LOGGER_HANDLER]['filename'] = conf.logging_path + "/" + conf.LOGGER_LOG
return conf.LOG_CONF
def _set_log_level(LOG_CONF, LOG_LEVEL):
for handler in LOG_CONF['handlers']:
LOG_CONF['handlers'][handler]['level'] = LOG_LEVEL
for logger in LOG_CONF['loggers']:
LOG_CONF['loggers'][logger]['level'] = LOG_LEVEL
return LOG_CONF