-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathnormalise_notes.py
More file actions
970 lines (781 loc) · 31.4 KB
/
normalise_notes.py
File metadata and controls
970 lines (781 loc) · 31.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
"""Normalise footnotes in .docx files.
Pass 1 – Remove punctuation-only footnotes (containing only Tibetan
punctuation marks and edition labels, no real content).
Pass 2 – Fix incomplete-reference footnotes by extracting the missing
reference syllable from the main text and prepending it.
Pass 3 – Normalise archaic-word footnotes by swapping the archaic
reference spelling with the modern variant and updating
the edition label to its complement.
Walks all pandita folders under data/, processes each .docx, and logs
every change to dedicated log files at the project root.
"""
import re
import zipfile
from io import BytesIO
from pathlib import Path
from lxml import etree
from botok_rs import SimpleTokenizer
W_NS = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
FOOTNOTES_PART = "word/footnotes.xml"
DOCUMENT_PART = "word/document.xml"
PUNCT_LOG = "deleted_punctuation_footnotes.log"
FIX_LOG = "fixed_incomplete_footnotes.log"
ARCHAIC_LOG = "normalised_archaic_footnotes.log"
ARCHAIC_WORDS_FILE = "archaic_words.yml"
# Standard edition label order: Derge, Chone, Narthang, Peking
EDITION_ORDER: list[str] = ["སྡེ།", "ཅོ།", "སྣར།", "པེ།"]
# ---------------------------------------------------------------------------
# Text helpers
# ---------------------------------------------------------------------------
def is_punctuation_only(text: str) -> bool:
"""Return True if *text* contains only Tibetan punctuation and edition labels.
Strips edition label blocks (``༼…༽``), Tibetan punctuation marks
(``། ༄ ༅ ་ ༌``), Tibetan digits (``༠``–``༩``), and whitespace.
If nothing remains the text is punctuation-only.
"""
# Remove edition labels e.g. ༼སྣར། པེ།༽
cleaned = re.sub(r"༼[^༽]*༽", "", text)
# Remove Tibetan punctuation, digits, and whitespace
cleaned = re.sub(r"[།༄༅་༌༠-༩\s]+", "", cleaned)
return len(cleaned) == 0
def has_missing_reference(text: str) -> bool:
"""Return True if *text* starts with a bare ``།`` before the edition label.
Matches footnotes like ``། ༼སྣར། པེ།༽ ཞིག །`` where the reference
spelling from the main text is missing.
"""
if not text:
return False
return bool(re.match(r"^\s*།\s*༼", text))
def get_syllables(text: str) -> list[str]:
"""Return a list of Tibetan syllables from *text*.
The text is expected to come from the main document immediately before
a footnote marker. It typically ends with a shad (``།``).
"""
tokens = SimpleTokenizer.tokenize(text)
return [token.text for token in tokens if token.text.strip()]
def extract_last_syllable(text: str) -> str | None:
"""Extract the last Tibetan syllable from *text*.
The text is expected to come from the main document immediately before
a footnote marker. It typically ends with a shad (``།``).
Tibetan orthographic rules applied:
- **ང special case**: when the syllable ends with ང, the tsheg (་) is
kept before the shad, so the extracted syllable includes the tsheg.
e.g. ``བོད་ལ་ཡོང་།`` → ``ཡོང་``
- **Normal case**: the tsheg is omitted before the shad for non-ང
finals, so the extracted syllable has no trailing tsheg.
e.g. ``ཡོད་པར།`` → ``པར``
"""
if not text:
return None
syllables = get_syllables(text)
last_syllable = syllables[-1] if syllables else None
if last_syllable == "། །" or last_syllable == "།":
last_syllable = f'{syllables[-2]}{last_syllable}'
return last_syllable
# Matches: reference། ༼editions༽ variant [།]
FOOTNOTE_PARTS_RE = re.compile(
r"^(.+?)།\s*(༼[^༽]+༽)\s*(.+?)\s*།?\s*$"
)
def load_archaic_words(yaml_path: Path) -> set[str]:
"""Load archaic words from a YAML list and return a normalised set.
Each entry in the YAML file is a ``- word`` line. Trailing tshegs (``་``)
are stripped so that comparison with footnote text is consistent.
Args:
yaml_path: Path to the archaic words YAML file.
Returns:
Set of normalised archaic word strings.
"""
words: set[str] = set()
with yaml_path.open("r", encoding="utf-8") as fh:
for line in fh:
line = line.strip()
if line.startswith("- "):
word = line[2:].strip().rstrip("་")
if word:
words.add(word)
return words
def parse_footnote_parts(text: str) -> tuple[str, str, str] | None:
"""Parse a footnote into *(reference, edition_label, variant)*.
Expected format::
reference། ༼edition_label༽ variant །
Returns:
A 3-tuple of stripped strings, or ``None`` if the text does not match.
"""
match = FOOTNOTE_PARTS_RE.match(text.strip())
if match is None:
return None
return match.group(1).strip(), match.group(2).strip(), match.group(3).strip()
def complement_edition_label(label: str) -> str:
"""Return the complement edition label.
Given a label like ``༼སྣར། པེ།༽``, returns the label containing all
editions **not** present, in standard order (Derge, Chone, Narthang,
Peking).
Args:
label: Original edition label enclosed in ``༼…༽``.
Returns:
New edition label with the complement set of editions.
"""
inner = label.strip("༼༽ ")
present = {ed for ed in EDITION_ORDER if ed in inner}
complement = [ed for ed in EDITION_ORDER if ed not in present]
return "༼" + " ".join(complement) + "༽"
# ---------------------------------------------------------------------------
# XML context helpers
# ---------------------------------------------------------------------------
def _find_ref_context(
doc_root: etree._Element,
fn_id: str,
) -> tuple[etree._Element, list[etree._Element], int] | None:
"""Locate the paragraph, child list, and index for a footnote reference.
Args:
doc_root: Parsed document XML root.
fn_id: Footnote reference ID to find.
Returns:
``(paragraph, children, run_index)`` or ``None`` if not found.
"""
for ref in doc_root.iter(f"{{{W_NS}}}footnoteReference"):
if ref.get(f"{{{W_NS}}}id") != fn_id:
continue
run = ref.getparent() # <w:r>
if run is None:
return None
para = run.getparent() # <w:p>
if para is None:
return None
children = list(para)
try:
idx = children.index(run)
except ValueError:
return None
return para, children, idx
return None
def _collect_post_ref_t_elements(
footnote: etree._Element,
) -> list[etree._Element]:
"""Collect ``<w:t>`` elements after ``<w:footnoteRef/>`` in a footnote."""
t_elements: list[etree._Element] = []
past_ref = False
for elem in footnote.iter():
if elem.tag == f"{{{W_NS}}}footnoteRef":
past_ref = True
continue
if past_ref and elem.tag == f"{{{W_NS}}}t":
t_elements.append(elem)
return t_elements
def _collect_runs_text(
children: list[etree._Element],
end_idx: int,
) -> str | None:
"""Concatenate ``<w:t>`` text from ``<w:r>`` children before *end_idx*.
Args:
children: Direct children of a ``<w:p>`` element.
end_idx: Index of the footnote reference run (exclusive upper bound).
Returns:
Concatenated text, or ``None`` if no text was collected.
"""
parts: list[str] = []
for j in range(end_idx):
child = children[j]
if child.tag != f"{{{W_NS}}}r":
continue
for t_elem in child.findall(f"{{{W_NS}}}t"):
if t_elem.text:
parts.append(t_elem.text)
return "".join(parts) if parts else None
def _find_substantive_t_element(
t_elements: list[etree._Element],
) -> tuple[etree._Element, int]:
"""Find the first ``<w:t>`` element with non-whitespace text.
Returns:
Tuple of (element, index) within the list.
"""
for i, t_elem in enumerate(t_elements):
if t_elem.text and t_elem.text.strip():
return t_elem, i
return t_elements[0], 0
def _extract_leading_whitespace(elem: etree._Element) -> str:
"""Return leading whitespace from the element's text, if any."""
if not elem.text:
return ""
stripped = elem.text.lstrip()
if len(stripped) < len(elem.text):
return elem.text[: len(elem.text) - len(stripped)]
return ""
def _replace_in_preceding_runs(
children: list[etree._Element],
ref_idx: int,
old_text: str,
new_text: str,
) -> bool:
"""Walk backwards through runs and replace last occurrence of *old_text*.
Args:
children: Direct children of the paragraph.
ref_idx: Index of the footnote reference run.
old_text: Text to find.
new_text: Replacement text.
Returns:
True if replacement was applied.
"""
for j in range(ref_idx - 1, -1, -1):
child = children[j]
if child.tag != f"{{{W_NS}}}r":
continue
for t_elem in reversed(child.findall(f"{{{W_NS}}}t")):
if t_elem.text and old_text in t_elem.text:
pos = t_elem.text.rfind(old_text)
t_elem.text = (
t_elem.text[:pos]
+ new_text
+ t_elem.text[pos + len(old_text) :]
)
return True
return False
# ---------------------------------------------------------------------------
# XML readers (operate on already-parsed lxml trees)
# ---------------------------------------------------------------------------
def build_page_footnote_map(
doc_root: etree._Element,
) -> dict[int, list[str]]:
"""Map page numbers (1-based) to footnote reference IDs.
Page boundaries are detected via ``<w:lastRenderedPageBreak/>`` elements
that Word embeds during rendering.
"""
body = doc_root.find(f"{{{W_NS}}}body")
if body is None:
return {}
page = 1
page_map: dict[int, list[str]] = {}
for elem in body.iter():
if elem.tag == f"{{{W_NS}}}lastRenderedPageBreak":
page += 1
elif elem.tag == f"{{{W_NS}}}footnoteReference":
fn_id = elem.get(f"{{{W_NS}}}id")
if fn_id is not None:
page_map.setdefault(page, []).append(fn_id)
return page_map
def extract_footnote_texts(
fn_root: etree._Element,
) -> dict[str, str]:
"""Extract normalised text content for each footnote by ID.
Separator and continuation-separator footnotes are skipped.
"""
texts_by_id: dict[str, str] = {}
for footnote in fn_root.findall(f"{{{W_NS}}}footnote"):
fn_id = footnote.get(f"{{{W_NS}}}id")
fn_type = footnote.get(f"{{{W_NS}}}type")
if fn_type in ("separator", "continuationSeparator"):
continue
parts: list[str] = []
for t_elem in footnote.iter(f"{{{W_NS}}}t"):
if t_elem.text:
parts.append(t_elem.text)
full_text = "".join(parts).strip()
full_text = " ".join(full_text.split()) # normalise whitespace
if fn_id is not None:
texts_by_id[fn_id] = full_text
return texts_by_id
# ---------------------------------------------------------------------------
# Pass 1 – punctuation-only detection
# ---------------------------------------------------------------------------
def find_punctuation_only_ids(texts_by_id: dict[str, str]) -> set[str]:
"""Return IDs of footnotes that are punctuation-only."""
return {
fn_id
for fn_id, text in texts_by_id.items()
if text and is_punctuation_only(text)
}
# ---------------------------------------------------------------------------
# Pass 2 – incomplete-reference detection & extraction
# ---------------------------------------------------------------------------
def find_incomplete_ref_ids(
texts_by_id: dict[str, str],
punct_ids: set[str],
) -> set[str]:
"""Return IDs of footnotes whose reference text is just ``།``.
Footnotes already flagged as punctuation-only are excluded.
"""
return {
fn_id
for fn_id, text in texts_by_id.items()
if fn_id not in punct_ids and has_missing_reference(text)
}
# ---------------------------------------------------------------------------
# Pass 3 – archaic-word detection
# ---------------------------------------------------------------------------
def find_archaic_note_ids(
texts_by_id: dict[str, str],
archaic_words: set[str],
exclude_ids: set[str],
) -> dict[str, tuple[str, str, str]]:
"""Return footnotes whose reference spelling is an archaic word.
Parses each footnote into *(reference, edition_label, variant)* and
checks whether the reference (with trailing tsheg stripped) appears in
the *archaic_words* set.
Args:
texts_by_id: Mapping of footnote ID → normalised text.
archaic_words: Set of normalised archaic word strings.
exclude_ids: Footnote IDs to skip (e.g. already handled by other passes).
Returns:
Mapping of ``{fn_id: (reference, edition_label, variant)}``.
"""
result: dict[str, tuple[str, str, str]] = {}
for fn_id, text in texts_by_id.items():
if fn_id in exclude_ids:
continue
parts = parse_footnote_parts(text)
if parts is None:
continue
reference, editions, variant = parts
if reference.rstrip("་") in archaic_words:
result[fn_id] = (reference, editions, variant)
return result
def get_text_before_marker(
doc_root: etree._Element,
fn_id: str,
) -> str | None:
"""Collect all paragraph text preceding the footnote marker for *fn_id*.
Walks the runs in the same ``<w:p>`` that contains the
``<w:footnoteReference>`` and concatenates their ``<w:t>`` contents
up to (but not including) the reference run.
"""
ctx = _find_ref_context(doc_root, fn_id)
if ctx is None:
return None
_para, children, idx = ctx
return _collect_runs_text(children, idx)
# ---------------------------------------------------------------------------
# XML mutators
# ---------------------------------------------------------------------------
def remove_footnotes_from_tree(
fn_root: etree._Element,
ids_to_remove: set[str],
) -> None:
"""Remove ``<w:footnote>`` elements with matching IDs from the tree."""
for footnote in fn_root.findall(f"{{{W_NS}}}footnote"):
fn_id = footnote.get(f"{{{W_NS}}}id")
if fn_id in ids_to_remove:
fn_root.remove(footnote)
def remove_footnote_refs_from_tree(
doc_root: etree._Element,
ids_to_remove: set[str],
) -> None:
"""Remove ``<w:r>`` runs containing matching footnoteReference elements."""
# list() materialises the iterator so we can safely mutate the tree
refs = list(doc_root.iter(f"{{{W_NS}}}footnoteReference"))
for ref in refs:
ref_id = ref.get(f"{{{W_NS}}}id")
if ref_id in ids_to_remove:
run = ref.getparent()
if run is not None:
run_parent = run.getparent()
if run_parent is not None:
run_parent.remove(run)
def fix_footnote_text(
fn_root: etree._Element,
fn_id: str,
syllable: str,
) -> bool:
"""Prepend *syllable* before the first ``།`` in footnote *fn_id*.
Scans ``<w:t>`` elements after ``<w:footnoteRef/>``, finds the first
one whose (stripped) text starts with ``།``, and inserts the syllable
immediately before that shad.
Returns True if the modification was applied.
"""
for footnote in fn_root.findall(f"{{{W_NS}}}footnote"):
if footnote.get(f"{{{W_NS}}}id") != fn_id:
continue
for elem in _collect_post_ref_t_elements(footnote):
if elem.text and elem.text.lstrip().startswith("།"):
pos = elem.text.index("།")
elem.text = elem.text[:pos] + syllable + " " + elem.text[pos:]
return True
return False
return False
def rewrite_footnote_text(
fn_root: etree._Element,
fn_id: str,
new_text: str,
) -> bool:
"""Replace the text content after ``<w:footnoteRef/>`` with *new_text*.
Preserves leading whitespace in the first substantive ``<w:t>`` element
and clears any subsequent text elements.
Args:
fn_root: Parsed footnotes XML root.
fn_id: Target footnote ID.
new_text: Full replacement text (e.g. ``variant། ༼editions༽ ref །``).
Returns:
True if the modification was applied.
"""
for footnote in fn_root.findall(f"{{{W_NS}}}footnote"):
if footnote.get(f"{{{W_NS}}}id") != fn_id:
continue
t_elements = _collect_post_ref_t_elements(footnote)
if not t_elements:
return False
target, target_idx = _find_substantive_t_element(t_elements)
leading = _extract_leading_whitespace(target)
target.text = leading + new_text
for t_elem in t_elements[target_idx + 1 :]:
t_elem.text = ""
return True
return False
def replace_reference_in_main_text(
doc_root: etree._Element,
fn_id: str,
old_reference: str,
new_reference: str,
) -> bool:
"""Replace the reference spelling in the main text before footnote *fn_id*.
Searches backwards through the ``<w:r>`` runs preceding the footnote
marker in the same paragraph. The **last** occurrence of *old_reference*
in the matching ``<w:t>`` element is replaced (closest to the marker).
Args:
doc_root: Parsed document XML root.
fn_id: Target footnote reference ID.
old_reference: Text to find (archaic spelling).
new_reference: Replacement text (modern variant).
Returns:
True if the replacement was applied.
"""
ctx = _find_ref_context(doc_root, fn_id)
if ctx is None:
return False
_para, children, idx = ctx
return _replace_in_preceding_runs(children, idx, old_reference, new_reference)
# ---------------------------------------------------------------------------
# Orchestrator helpers
# ---------------------------------------------------------------------------
def _build_id_to_page(page_map: dict[int, list[str]]) -> dict[str, int]:
"""Invert *page_map* to a mapping of footnote ID → page number."""
result: dict[str, int] = {}
for page_num, fn_ids in page_map.items():
for fn_id in fn_ids:
result[fn_id] = page_num
return result
def _extract_syllables(
doc_root: etree._Element,
incomplete_ids: set[str],
) -> tuple[dict[str, str], set[str]]:
"""Extract the missing syllable for each incomplete-reference footnote.
Returns:
``(syllable_map, unfixable_ids)``
"""
syllable_map: dict[str, str] = {}
unfixable_ids: set[str] = set()
for fn_id in incomplete_ids:
text_before = get_text_before_marker(doc_root, fn_id)
syllable = extract_last_syllable(text_before) if text_before else None
if syllable:
syllable_map[fn_id] = syllable
else:
unfixable_ids.add(fn_id)
return syllable_map, unfixable_ids
def _report_findings(
punct_ids: set[str],
syllable_map: dict[str, str],
unfixable_ids: set[str],
archaic_map: dict[str, tuple[str, str, str]],
*,
dry_run: bool,
) -> None:
"""Print a summary of findings to stdout."""
if punct_ids:
action = "Would remove" if dry_run else "Removing"
print(f" {action} {len(punct_ids)} punctuation-only footnote(s)")
if syllable_map:
action = "Would fix" if dry_run else "Fixing"
print(f" {action} {len(syllable_map)} incomplete-reference footnote(s)")
if unfixable_ids:
print(
f" Skipping {len(unfixable_ids)} unfixable incomplete-reference(s)"
)
if archaic_map:
action = "Would normalise" if dry_run else "Normalising"
print(f" {action} {len(archaic_map)} archaic-word footnote(s)")
def _write_punct_log(
log_path: Path,
book_name: str,
id_to_page: dict[str, int],
texts_by_id: dict[str, str],
punct_ids: set[str],
) -> None:
"""Write punctuation-only deletion entries to the log."""
with log_path.open("a", encoding="utf-8") as fh:
for fn_id in sorted(punct_ids, key=int):
page = id_to_page.get(fn_id, -1)
text = texts_by_id.get(fn_id, "")
fh.write(f"{book_name} | page {page} | {text}\n")
def _write_fix_log(
log_path: Path,
book_name: str,
id_to_page: dict[str, int],
texts_by_id: dict[str, str],
syllable_map: dict[str, str],
unfixable_ids: set[str],
) -> None:
"""Write incomplete-reference fix entries to the log."""
with log_path.open("a", encoding="utf-8") as fh:
for fn_id in sorted(syllable_map, key=int):
page = id_to_page.get(fn_id, -1)
syl = syllable_map[fn_id]
old = texts_by_id.get(fn_id, "")
new = syl + " " + old.lstrip()
fh.write(
f"{book_name} | page {page} | {syl} | {old} → {new}\n"
)
for fn_id in sorted(unfixable_ids, key=int):
page = id_to_page.get(fn_id, -1)
old = texts_by_id.get(fn_id, "")
fh.write(f"{book_name} | page {page} | UNFIXABLE | {old}\n")
def _write_archaic_log(
log_path: Path,
book_name: str,
id_to_page: dict[str, int],
texts_by_id: dict[str, str],
archaic_map: dict[str, tuple[str, str, str]],
) -> None:
"""Write archaic-word normalisation entries to the log."""
with log_path.open("a", encoding="utf-8") as fh:
for fn_id in sorted(archaic_map, key=int):
page = id_to_page.get(fn_id, -1)
ref, editions, variant = archaic_map[fn_id]
new_editions = complement_edition_label(editions)
old_text = texts_by_id.get(fn_id, "")
new_text = f"{variant}། {new_editions} {ref} །"
fh.write(
f"{book_name} | page {page} | {old_text} → {new_text}\n"
)
def _write_change_logs(
book_name: str,
id_to_page: dict[str, int],
texts_by_id: dict[str, str],
punct_ids: set[str],
syllable_map: dict[str, str],
unfixable_ids: set[str],
archaic_map: dict[str, tuple[str, str, str]],
punct_log_path: Path,
fix_log_path: Path,
archaic_log_path: Path,
) -> None:
"""Append change details to the relevant log files."""
if punct_ids:
_write_punct_log(
punct_log_path, book_name, id_to_page, texts_by_id, punct_ids,
)
if syllable_map or unfixable_ids:
_write_fix_log(
fix_log_path, book_name, id_to_page, texts_by_id,
syllable_map, unfixable_ids,
)
if archaic_map:
_write_archaic_log(
archaic_log_path, book_name, id_to_page, texts_by_id, archaic_map,
)
def _apply_mutations(
fn_root: etree._Element,
doc_root: etree._Element,
punct_ids: set[str],
syllable_map: dict[str, str],
archaic_map: dict[str, tuple[str, str, str]],
) -> None:
"""Apply all three passes' mutations to the XML trees."""
# Pass 1: remove punctuation-only footnotes + their markers
if punct_ids:
remove_footnotes_from_tree(fn_root, punct_ids)
remove_footnote_refs_from_tree(doc_root, punct_ids)
# Pass 2: prepend extracted syllable to incomplete-reference notes
for fn_id, syllable in syllable_map.items():
fix_footnote_text(fn_root, fn_id, syllable)
# Pass 3: swap archaic reference/variant and update edition labels
for fn_id, (reference, editions, variant) in archaic_map.items():
new_editions = complement_edition_label(editions)
new_fn_text = f"{variant}། {new_editions} {reference} །"
rewrite_footnote_text(fn_root, fn_id, new_fn_text)
replace_reference_in_main_text(doc_root, fn_id, reference, variant)
def _serialize_docx(
zin: zipfile.ZipFile,
fn_root: etree._Element,
doc_root: etree._Element,
) -> bytes:
"""Serialise modified XML trees back into a .docx byte buffer."""
new_fn = etree.tostring(
fn_root, xml_declaration=True, encoding="UTF-8", standalone=True
)
new_doc = etree.tostring(
doc_root, xml_declaration=True, encoding="UTF-8", standalone=True
)
buf = BytesIO()
with zipfile.ZipFile(buf, "w", zipfile.ZIP_DEFLATED) as zout:
for item in zin.infolist():
if item.filename == FOOTNOTES_PART:
zout.writestr(item, new_fn)
elif item.filename == DOCUMENT_PART:
zout.writestr(item, new_doc)
else:
zout.writestr(item, zin.read(item.filename))
return buf.getvalue()
# ---------------------------------------------------------------------------
# Per-file orchestrator
# ---------------------------------------------------------------------------
def process_docx(
docx_path: Path,
punct_log_path: Path,
fix_log_path: Path,
archaic_log_path: Path,
archaic_words: set[str],
*,
dry_run: bool = False,
) -> tuple[int, int, int, int]:
"""Run all three normalisation passes on a single .docx file.
Args:
docx_path: Path to the .docx file.
punct_log_path: Path for the punctuation-only deletion log.
fix_log_path: Path for the incomplete-reference fix log.
archaic_log_path: Path for the archaic-word normalisation log.
archaic_words: Set of normalised archaic word strings.
dry_run: If True, report findings without modifying the file.
Returns:
Tuple of (punct_removed, refs_fixed, unfixable_count, archaic_normalised).
"""
with zipfile.ZipFile(docx_path, "r") as zin:
if FOOTNOTES_PART not in zin.namelist():
return 0, 0, 0, 0
fn_root = etree.fromstring(zin.read(FOOTNOTES_PART))
doc_root = etree.fromstring(zin.read(DOCUMENT_PART))
book_name = docx_path.stem
# ── analysis ─────────────────────────────────────────────────
id_to_page = _build_id_to_page(build_page_footnote_map(doc_root))
texts_by_id = extract_footnote_texts(fn_root)
punct_ids = find_punctuation_only_ids(texts_by_id)
incomplete_ids = find_incomplete_ref_ids(texts_by_id, punct_ids)
syllable_map, unfixable_ids = _extract_syllables(doc_root, incomplete_ids)
archaic_exclude = punct_ids | incomplete_ids
archaic_map = find_archaic_note_ids(
texts_by_id, archaic_words, archaic_exclude,
)
# ── reporting & logging ──────────────────────────────────────
_report_findings(
punct_ids, syllable_map, unfixable_ids, archaic_map,
dry_run=dry_run,
)
_write_change_logs(
book_name, id_to_page, texts_by_id,
punct_ids, syllable_map, unfixable_ids, archaic_map,
punct_log_path, fix_log_path, archaic_log_path,
)
counts = (
len(punct_ids), len(syllable_map),
len(unfixable_ids), len(archaic_map),
)
has_changes = any((punct_ids, syllable_map, archaic_map))
if not has_changes or dry_run:
return counts
# ── apply & write ────────────────────────────────────────────
_apply_mutations(
fn_root, doc_root, punct_ids, syllable_map, archaic_map,
)
output_bytes = _serialize_docx(zin, fn_root, doc_root)
docx_path.write_bytes(output_bytes)
return counts
# ---------------------------------------------------------------------------
# Main helpers
# ---------------------------------------------------------------------------
def _load_archaic_words_safe(path: Path) -> set[str]:
"""Load archaic words, returning an empty set if the file is missing."""
if path.is_file():
words = load_archaic_words(path)
print(f"Loaded {len(words)} archaic words from {path.name}")
return words
print(f"Warning: {path.name} not found – skipping Pass 3")
return set()
def _process_all_files(
data_dir: Path,
punct_log: Path,
fix_log: Path,
archaic_log: Path,
archaic_words: set[str],
*,
dry_run: bool,
) -> tuple[int, int, int, int, int, int]:
"""Process all .docx files under *data_dir* and return aggregate totals.
Returns:
``(total_files, total_touched, total_punct, total_fixed,
total_unfixable, total_archaic)``
"""
total_files = 0
total_touched = 0
total_punct = 0
total_fixed = 0
total_unfixable = 0
total_archaic = 0
for pandita_dir in sorted(data_dir.iterdir()):
if not pandita_dir.is_dir():
continue
for docx_path in sorted(pandita_dir.glob("*.docx")):
total_files += 1
print(f"Scanning: {pandita_dir.name}/{docx_path.name}")
punct, fixed, unfixable, archaic = process_docx(
docx_path, punct_log, fix_log, archaic_log,
archaic_words, dry_run=dry_run,
)
if any((punct, fixed, unfixable, archaic)):
total_touched += 1
total_punct += punct
total_fixed += fixed
total_unfixable += unfixable
total_archaic += archaic
return (
total_files, total_touched, total_punct,
total_fixed, total_unfixable, total_archaic,
)
def _print_summary(
total_files: int,
total_touched: int,
total_punct: int,
total_fixed: int,
total_unfixable: int,
total_archaic: int,
punct_log: Path,
fix_log: Path,
archaic_log: Path,
*,
dry_run: bool,
) -> None:
"""Print final summary to stdout."""
mode = "DRY RUN" if dry_run else "DONE"
print(f"\n[{mode}] Scanned {total_files} file(s), touched {total_touched}.")
print(f" Punctuation-only removed: {total_punct}")
print(f" Incomplete refs fixed: {total_fixed}")
print(f" Unfixable (skipped): {total_unfixable}")
print(f" Archaic words normalised: {total_archaic}")
if punct_log.exists():
print(f" Punctuation log: {punct_log}")
if fix_log.exists():
print(f" Fix log: {fix_log}")
if archaic_log.exists():
print(f" Archaic log: {archaic_log}")
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main(*, dry_run: bool = False) -> None:
"""Walk data/ and normalise footnotes in every .docx file.
Args:
dry_run: If True, report findings without modifying any files.
"""
base_dir = Path(__file__).parent
data_dir = base_dir / "data"
punct_log = base_dir / PUNCT_LOG
fix_log = base_dir / FIX_LOG
archaic_log = base_dir / ARCHAIC_LOG
if not data_dir.is_dir():
raise FileNotFoundError(f"Data directory not found: {data_dir}")
archaic_words = _load_archaic_words_safe(base_dir / ARCHAIC_WORDS_FILE)
totals = _process_all_files(
data_dir, punct_log, fix_log, archaic_log, archaic_words,
dry_run=dry_run,
)
_print_summary(
*totals, punct_log, fix_log, archaic_log, dry_run=dry_run,
)
if __name__ == "__main__":
main(dry_run=True)