-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathdataIO.py
More file actions
239 lines (185 loc) · 7.9 KB
/
dataIO.py
File metadata and controls
239 lines (185 loc) · 7.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
import pandas as pd
import json
import os
def load_datasets(cnxn):
df = pd.read_sql("SELECT * from dataset", cnxn)
return df
def load_geojson():
with open(os.path.join("assets","map overlays","regions.geojson"), 'r') as f:
gj = json.load(f)
return gj
def load_dataset_linkage_groups(cnxn, source = "none", table_name = "none"):
rtn = pd.read_sql("SELECT * from dataset_linkage_by_group", cnxn)
if source == "none" and table_name == "none":
return rtn
elif source == "none":
return rtn.loc[rtn["source"].str.lower() == source.lower()]
elif table_name == "none":
return rtn.loc[rtn["table_name"].str.contains(table_name)]
else:
return rtn.loc[(rtn["source"].str.lower() == source.lower()) & (rtn["table_name"].str.contains(table_name))]
def load_dataset_linkage(cnxn, source = "none", table_name = "none"):
rtn = pd.read_sql("SELECT * from dataset_linkage", cnxn)
if source == "none" and table_name == "none":
return rtn
elif source == "none":
return rtn.loc[rtn["source"].str.lower() == source.lower()]
elif table_name == "none":
return rtn.loc[rtn["table_name"].str.contains(table_name)]
else:
return rtn.loc[(rtn["source"].str.lower() == source.lower()) & (rtn["table_name"].str.contains(table_name))]
def load_cohort_linkage_groups(cnxn, source = "none"):
rtn = pd.read_sql("SELECT * from cohort_linkage_by_group", cnxn)
if source == "none":
return rtn
else:
return rtn.loc[(rtn["cohort"].str.lower() == source.lower())]
def load_cohort_age(cnxn, source = "none"):
rtn = pd.read_sql("SELECT * from cohort_ages", cnxn)
if source == "none":
return rtn
else:
return rtn.loc[(rtn["source"].str.lower() == source.lower())]
def load_dataset_age(cnxn, source = "none", table_name = "none"):
rtn = pd.read_sql("SELECT * from dataset_ages", cnxn)
if source == "none" and table_name == "none":
return rtn
elif source == "none":
return rtn.loc[rtn["source"].str.lower() == source.lower()]
elif table_name == "none":
return rtn.loc[rtn["table_name"] == table_name]
else:
return rtn.loc[(rtn["source"].str.lower() == source.lower()) & (rtn["table_name"] == (table_name))]
def load_source_info(cnxn, source = "none"):
rtn = pd.read_sql("SELECT * from source_info", cnxn)
if source == "none":
return rtn
else:
return rtn.loc[(rtn["cohort"].str.lower() == source.lower())]
def load_dataset_count(cnxn, source = "none", table_name = "none"):
rtn = pd.read_sql("SELECT * from dataset_participants", cnxn)
if source == "none" and table_name == "none":
return rtn
elif source == "none":
return rtn.loc[rtn["source"].str.lower() == source.lower()]
elif table_name == "none":
return rtn.loc[rtn["table_name"].str.contains(table_name)]
else:
return rtn.loc[(rtn["source"].str.lower() == source.lower()) & (rtn["table_name"].str.contains(table_name))]
def load_search(cnxn, source = "none", table_name = "none"):
if source == "none" and table_name == "none":
return pd.read_sql("SELECT * from search", cnxn)
elif source == "none":
return pd.read_sql("SELECT * from search where [table] = '{}'".format(source, table_name), cnxn)
elif table_name == "none":
return pd.read_sql("SELECT * from search where [source] = '{}'".format(source, table_name), cnxn)
else:
return pd.read_sql("SELECT * from search where [source] = '{}' and [table] = '{}'".format(source, table_name), cnxn)
def load_study_request(cnxn):
'''
Data request form info
@depricated: should now use
'''
sheet_df = pd.read_sql("SELECT * from drf_lps", cnxn)
#sheet_df = pd.read_excel(os.path.join("assets", "Data Request Form.xlsx"), sheet_name="Study data requested",skiprows=5, usecols = "D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R")
return sheet_df
def load_linked_request(cnxn):
'''
'''
# What do we need?
# Data Block Name Data Block Description Coverage Time Period† Number of Participants Included (n=) (i.e. number of particpants with non-null data, and with UK LLC, and linkage permission)" Documentation Codelist Required Health Domain Groupings (i.e. covid infection, asthma, smoking, etc.) Justification of dataset request
#sheet_df = pd.read_excel(os.path.join("assets", "Data Request Form.xlsx"), sheet_name="Linked data requested",skiprows=5, usecols = "A,B,C,D,E,F,G,H")
#sheet_df = sheet_df.rename(columns = {"Data Block Name":"Block Name", "Data Block Description":"Block Description", "Time Period†":"Timepoint: Data Collected", "'Health Domain Groupings (i.e. covid infection, asthma, smoking, etc.)":"Keywords"})
#sheet_df["Source"] = "NHSD"
sheet_df = pd.read_sql("SELECT * from drf_nhs", cnxn)
return sheet_df
def load_study_info_and_links(cnxn):
'''
'''
#TODO Convert to database
#sheet_df = pd.read_excel(os.path.join("assets", "Data Request Form.xlsx"), sheet_name="Study info & links", skiprows=1, usecols = "B,C,D,E,F,G,H,I,J" )
sheet_df = pd.read_sql("SELECT * from study_info", cnxn)
return sheet_df
def load_study_metadata(cnxn, table_id):
'''
'''
print("DEBUG: Load request for", table_id)
study = table_id.split("-")[0]
table = table_id.split("-")[1]
# TODO change to joined metadata file (requires preprep, splitting all into proper folders)
try:
q = '''
SELECT * FROM metadata_{}
ORDER BY "Block Name", "Variable Name"
'''.format(study.lower()+"_"+table.lower())
values_df = pd.read_sql(q, cnxn)
except FileNotFoundError:
print("Couldn't find file {}. Skipping (shouldn't be a problem when we have a db...".format(str(study.upper())+table+".csv"))
return None
return values_df
def load_always_provisioned(cnxn):
df = pd.read_sql("SELECT * from always_provisioned", cnxn)
return df
def basket_out(basket, datasets_df):
basket_pd = pd.DataFrame({
"TABLE_SCHEMA" : [item.split("-")[0] for item in basket],
"TABLE_NAME" : [item.split("-")[1] for item in basket]
},
columns = ["TABLE_SCHEMA", "TABLE_NAME"]
)
# SN: added full table name and table type 160925
basket_pd = basket_pd.merge(
datasets_df,
left_on=["TABLE_SCHEMA", "TABLE_NAME"],
right_on=["source", "table"],
how="left")[
["TABLE_SCHEMA",
"TABLE_NAME",
"table_name",
"Type"]].rename(
columns={"table_name": "FULL_TABLE_NAME",
"Type": "TABLE_TYPE"})
basket_pd.to_csv("server_save_basket_[datetime].csv", index=False)
return basket_pd
def write_json(name, content):
with open(os.path.join("assets",name), "w") as f:
json.dump(content, f, ensure_ascii= False)
def read_json(name):
print("loading ",name)
with open(os.path.join("assets",name), "r") as f:
return json.load(f)
def load_map_data(cnxn):
return pd.read_sql("SELECT * from geo_locations", cnxn)
def get_map_overlays(study):
with open(os.path.join("assets","map overlays",study+".geojson"), 'r') as f:
returned_data = json.load(f)
return returned_data
'''
spine:
Minimum info required for searching. Source & dataset.
dataset_counts:
Source name + number of datasets within
(made redundant by...)
study_participants
source + participant count
dataset_participants:
source + dataset + participant count
dataset_Ages:
source + dataset + age stats
cohort_linkage:
cohort + col per linked source
cohort_linkage_by_groups:
cohort + col per possible linked source combo
dataset_linkage:
source + dataset + col per linked source
dataset_linkage_by_groups:
source + dataset + col per possible linked source combo
cohort_ages:
source + age stats
nhs_dataset_cohort_linkage:
dataset + cohort + count
nhs_dataset_extracts:
dataset + date + count
source_info:
full info for sources
'''