-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathbuild_db.py
More file actions
157 lines (131 loc) · 5.44 KB
/
build_db.py
File metadata and controls
157 lines (131 loc) · 5.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""A script to read in and store documents in a sqlite database."""
import argparse
import sqlite3
import json
import os
import logging
import importlib.util
from multiprocessing import Pool as ProcessPool
from tqdm import tqdm
from drqa_retriever import utils
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
# ------------------------------------------------------------------------------
# Import helper
# ------------------------------------------------------------------------------
PREPROCESS_FN = None
def init(filename):
global PREPROCESS_FN
if filename:
PREPROCESS_FN = import_module(filename).preprocess
def import_module(filename):
"""Import a module given a full path to the file."""
spec = importlib.util.spec_from_file_location('doc_filter', filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
# ------------------------------------------------------------------------------
# Store corpus.
# ------------------------------------------------------------------------------
def iter_files(path):
"""Walk through all files located under a root path."""
if os.path.isfile(path):
yield path
elif os.path.isdir(path):
for dirpath, _, filenames in os.walk(path):
for f in filenames:
yield os.path.join(dirpath, f)
else:
raise RuntimeError('Path %s is invalid' % path)
def get_contents(filename):
"""Parse the contents of a file. Each line is a JSON encoded document."""
global PREPROCESS_FN
documents = []
if filename.endswith(".tsv.gz"):
raise NotImplementedError("TODO")
elif filename.endswith(".tsv"):
import csv
with open(filename) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
# file format: doc_id, doc_text, title
for (doc_id, doc_text, title) in reader:
if doc_id=="id": continue
documents.append((doc_id, utils.normalize(title) + " " + doc_text))
else:
with open(filename) as f:
for line in f:
# Parse document
doc = json.loads(line)
# Maybe preprocess the document with custom function
if PREPROCESS_FN:
doc = PREPROCESS_FN(doc)
# Skip if it is empty or None
if not doc:
continue
# Add the document
documents.append((utils.normalize(doc['id']), doc['text']))
return documents
def store_contents(data_path, save_path, preprocess, num_workers=None):
"""Preprocess and store a corpus of documents in sqlite.
Args:
data_path: Root path to directory (or directory of directories) of files
containing json encoded documents (must have `id` and `text` fields).
save_path: Path to output sqlite db.
preprocess: Path to file defining a custom `preprocess` function. Takes
in and outputs a structured doc.
num_workers: Number of parallel processes to use when reading docs.
"""
if os.path.isfile(save_path):
raise RuntimeError('%s already exists! Not overwriting.' % save_path)
logger.info('Reading into database...')
conn = sqlite3.connect(save_path)
c = conn.cursor()
c.execute("CREATE TABLE documents (id PRIMARY KEY, text);")
if num_workers is None or num_workers==1:
files = [f for f in iter_files(data_path)]
count = 0
with tqdm(total=len(files)) as pbar:
for f in files:
pairs = get_contents(f)
count += len(pairs)
c.executemany("INSERT INTO documents VALUES (?,?)", pairs)
pbar.update()
else:
workers = ProcessPool(num_workers, initializer=init, initargs=(preprocess,))
files = [f for f in iter_files(data_path)]
count = 0
with tqdm(total=len(files)) as pbar:
for pairs in tqdm(workers.imap_unordered(get_contents, files)):
count += len(pairs)
c.executemany("INSERT INTO documents VALUES (?,?)", pairs)
pbar.update()
logger.info('Read %d docs.' % count)
logger.info('Committing...')
conn.commit()
conn.close()
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data_path', type=str, help='/path/to/data')
parser.add_argument('save_path', type=str, help='/path/to/saved/db.db')
parser.add_argument('--preprocess', type=str, default=None,
help=('File path to a python module that defines '
'a `preprocess` function'))
parser.add_argument('--num-workers', type=int, default=None,
help='Number of CPU processes (for tokenizing, etc)')
args = parser.parse_args()
store_contents(
args.data_path, args.save_path, args.preprocess, args.num_workers
)