-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathresource_detector.py
More file actions
2327 lines (2162 loc) · 125 KB
/
resource_detector.py
File metadata and controls
2327 lines (2162 loc) · 125 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import sys
import os
import codecs
import logging
import csv
import importlib
import re
import string
import locale
import json
import xml.etree.cElementTree as ET
import pyparsing as PY
from enum import Enum, IntEnum
from datetime import datetime, timezone
__author__ = "Zero<zero-cubed@outlook.com>"
__version__ = "1.0.0"
__application__ = "resource detector"
BASE_LANGUAGE = "en-us"
STANDARDIZED_LANGUAGES = {
#Tier0-------------------------------------------------------------------------------------------
"en" : "en-us", "en-us" : "en-us", "1033" : "en-us", "english" : "en-us", "en_us" : "en-us",
#Tier1-------------------------------------------------------------------------------------------
"de" : "de-de", "de-de" : "de-de", "1031" : "de-de", "german" : "de-de", "de_de" : "de-de",
"es" : "es-es", "es-es" : "es-es", "3082" : "es-es", "spanish" : "es-es", "es_es" : "es-es", "es-mx" : "es-es",
"fr" : "fr-fr", "fr-fr" : "fr-fr", "1036" : "fr-fr", "french" : "fr-fr", "fr_fr" : "fr-fr",
"ja" : "ja-jp", "ja-jp" : "ja-jp", "1041" : "ja-jp", "japanese" : "ja-jp", "ja_jp" : "ja-jp",
"zh" : "zh-cn", "zh-cn" : "zh-cn", "2052" : "zh-cn", "chinese" : "zh-cn", "zh_cn" : "zh-cn", "zh-rcn" : "zh-cn", "zh-hans" : "zh-cn", "zh-chs" : "zh-cn", "zh_hans" : "zh-cn", "sc" : "zh-cn", "cn" : "zh-cn",
#Tier2-------------------------------------------------------------------------------------------
"ko" : "ko-kr", "ko-kr" : "ko-kr", "1042" : "ko-kr", "korean" : "ko-kr", "ko_kr" : "ko-kr",
"ru" : "ru-ru", "ru-ru" : "ru-ru", "1049" : "ru-ru", "russian" : "ru-ru", "ru_ru" : "ru-ru",
"tc" : "zh-tw", "zh-tw" : "zh-tw", "1028" : "zh-tw", "tw" : "zh-tw", "zh_tw" : "zh-tw", "zh-rtw" : "zh-tw", "zh-hant" : "zh-tw", "zh-cht" : "zh-tw", "zh_hant" : "zh-tw",
#Tier3-------------------------------------------------------------------------------------------
"ar" : "ar-sa", "ar-sa" : "ar-sa", "1025" : "ar-sa", "arabic" : "ar-sa", "ar_sa" : "ar-sa",
"da" : "da-dk", "da-dk" : "da-dk", "1030" : "da-dk", "danish" : "da-dk", "da_dk" : "da-dk",
"he" : "he-il", "he-il" : "he-il", "1037" : "he-il", "hebrew" : "he-il", "he_il" : "he-il",
"it" : "it-it", "it-it" : "it-it", "1040" : "it-it", "italian" : "it-it", "it_it" : "it-it",
"nl" : "nl-nl", "nl-nl" : "nl-nl", "1043" : "nl-nl", "dutch" : "nl-nl", "nl_nl" : "nl-nl",
"no" : "no-no", "no-no" : "no-no", "1044" : "no-no", "norwegian" : "no-no", "no_no" : "no-no", "nb-no" : "no-no", "nb" : "no-no", "nn-no" : "no-no", "nn" : "no-no",#TBD
"pt" : "pt-br", "pt-br" : "pt-br", "1046" : "pt-br", "portuguese" : "pt-br", "pt_br" : "pt-br",
"pt" : "pt-br", "pt-pt" : "pt-pt", "2070" : "pt-pt", "portuguese" : "pt-br", "pt_pt" : "pt-pt", #Add some duplicate items to keep coding format
"pl" : "pl-pl", "pl-pl" : "pl-pl", "1045" : "pl-pl", "polish" : "pl-pl", "pl_pl" : "pl-pl",
"sv" : "sv-se", "sv-se" : "sv-se", "1053" : "sv-se", "swedish" : "sv-se", "sv_se" : "sv-se",
#Others-----------------------------------------------------------------------------------------------
"bg" : "bg-bg", "bg-bg" : "bg-bg",
"lt" : "lt-lt", "lt-lt" : "lt-lt",
"ca" : "ca-es", "ca-es" : "ca-es",
"cs" : "cs-cz", "cs-cz" : "cs-cz",
"cy" : "cy-gb", "cy-gb" : "cy-gb",
"el" : "el-gr", "el-gr" : "el-gr",
"fi" : "fi-fi", "fi-fi" : "fi-fi",
"et" : "et-ee", "et-ee" : "et-ee",
"hi" : "hi-in", "hi-in" : "hi-in",
"hu" : "hu-hu", "hu-hu" : "hu-hu",
"id" : "id-id", "id-id" : "id-id",
"lv" : "lv-lv", "lv-lv" : "lv-lv",
"ro" : "ro-ro", "ro-ro" : "ro-ro",
"ru" : "ru-ru", "ru-ru" : "ru-ru",
"sk" : "sk-sk", "sk-sk" : "sk-sk",
"sl" : "sl-si", "sl-si" : "sl-si",
"th" : "th-th", "th-th" : "th-th",
"tr" : "tr-tr", "tr-tr" : "tr-tr",
"uk" : "uk-ua", "uk-ua" : "uk-ua",
"af" : "af-za", "af-za" : "af-za",
"sq" : "sq-al", "sq-al" : "sq-al",
"am" : "am-et", "am-et" : "am-et",
"hy" : "hy-am", "hy-am" : "hy-am",
"as" : "as-in", "as-in" : "as-in",
"eu" : "eu-es", "eu-es" : "eu-es",
"be" : "be-by", "be-by" : "be-by",
"bn" : "bn-bd", "bn-bd" : "bn-bd", #TBD
"ca" : "ca-es", "ca-es" : "ca-es", #TBD
"gl" : "gl-es", "gl-es" : "gl-es",
"ka" : "ka-ge", "ka-ge" : "ka-ge",
"gu" : "gu-in", "gu-in" : "gu-in",
"is" : "is-is", "is-is" : "is-is",
"ga" : "ga-ie", "ga-ie" : "ga-ie",
"xh" : "xh-za", "xh-za" : "xh-za",
"zu" : "zu-za", "zu-za" : "zu-za",
"kn" : "kn-in", "kn-in" : "kn-in",
"kk" : "kk-kz", "kk-kz" : "kk-kz",
"km" : "km-kh", "km-kh" : "km-kh",
"rw" : "rw-rw", "rw-rw" : "rw-rw",
"sw" : "sw-ke", "sw-ke" : "sw-ke",
"lb" : "lb-lu", "lb-lu" : "lb-lu",
"mk" : "mk-mk", "mk-mk" : "mk-mk",
"ms" : "ms-bn", "ms-bn" : "ms-bn", #TBD
"ml" : "ml-in", "ml-in" : "ml-in",
"mt" : "mt-mt", "mt-mt" : "mt-mt",
"mr" : "mr-in", "mr-in" : "mr-in",
"ne" : "ne-np", "ne-np" : "ne-np",
"or" : "or-in", "or-in" : "or-in",
"fa" : "fa-ir", "fa-ir" : "fa-ir",
"tn" : "tn-bw", "tn-bw" : "tn-bw", #TBD
"si" : "si-lk", "si-lk" : "si-lk",
"ta" : "ta-in", "ta-in" : "ta-in",
"te" : "te-in", "te-in" : "te-in",
"ti" : "ti-et", "ti-et" : "ti-et",
"ur" : "ur-pk", "ur-pk" : "ur-pk",
"vi" : "vi-vn", "vi-vn" : "vi-vn",
"cy" : "cy-gb", "cy-gb" : "cy-gb",
"wo" : "wo-sn", "wo-sn" : "wo-sn",
"hr" : "hr-hr", "hr-hr" : "hr-hr", "hr-ba" : "hr-hr", #TBD
"sr" : "sr-Latn", "sr-Latn" : "sr-Latn", #TBD
"bs" : "bs-cyrl", "bs-cyrl" : "bs-cyrl", #TBD
"pa" : "pa-arab", "pa-arab" : "pa-arab", #TBD
"mi" : "mi-latn", "mi-latn" : "mi-latn", #TBD
"nso" : "nso-za", "nso-za" : "nso-za",
"quz" : "quz-bo", "quz-bo" : "quz-bo", #TBD
"prs" : "prs-af", "prs-af" : "prs-af", #TBD
"kok" : "kok-in", "kok-in" : "kok-in",
"fil" : "fil-latn", "fil-latn" : "fil-latn", #TBD
"gb-latn" : "gb-gb", "gb-gb" : "gb-gb",
"ig-latn" : "ig-ng", "ig-ng" : "ig-ng",
"yo-latn" : "yo-ng", "yo-ng" : "yo-ng",
"ky-cyrl" : "ky-kg", "ky-kg" : "ky-kg",
"tk-cyrl" : "tk-latn", "tk-latn" : "tk-latn", #TBD
"tt-arab" : "tt-cyrl", "tt-cyrl" : "tt-cyrl", #TBD
"tg-arab" : "tg-cyrl", "tg-cyrl" : "tg-cyrl", #TBD
"iu-cans" : "iu-latn", "iu-latn" : "iu-latn", #TBD
"mn-cyrl" : "mn-mong", "mn-mong" : "mn-mong", #TBD
"az-arab" : "az-arab-az", "az-arab-az" : "az-arab-az", #TBD
"sr-cyrl" : "sr-cyrl-cs", "sr-cyrl-cs" : "sr-cyrl-cs", #TBD
"quc-latn" : "qut-gt", "qut-gt" : "qut-gt", #TBD
"chr-cher" : "chr-cher-us", "chr-cher-us" : "chr-cher-us", #TBD
"uz-latn-uz" : "uz-latn", "uz-latn" : "uz-latn",
"sd-arab-pk" : "sd-arab", "sd-arab" : "sd-arab", #TBD
"ha-latn-ng" : "ha-latn", "ha-latn" : "ha-latn",
"ku-arab-iq" : "ku-arab", "ku-arab" : "ku-arab",
}
LANGUAGE_ENCODINGS = {
#Tier0------------------------------------------------------
"en-us" : "cp1252", #Use "cp1252" instead of "ascii" here because sometimes English resource file can be successfully opened with the former but not the later
#Tier1------------------------------------------------------
"de-de" : "cp1252",
"es-es" : "cp1252",
"fr-fr" : "cp1252",
"ja-jp" : "shift_jis", #"cp932"
"zh-cn" : "cp936",
#Tier2------------------------------------------------------
"ko-kr" : "cp949",
"ru-ru" : "cp1251",
"zh-tw" : "big5", #"cp950"
#Tier3------------------------------------------------------
"ar-sa" : "cp1256",
"da-dk" : "cp865",
"he-il" : "cp1255",
"it-it" : "ascii", #TBD
"nl-nl" : "ascii", #TBD
"no-no" : "cp865",
"pt-br" : "cp860",
"pl-pl" : "ascii", #TBD
"sv-se" : "ascii", #TBD
}
TAB_WIDTH = 4
LOG = None
class Severity(Enum):
warning = "warning"
error = "error"
class IssueCode(IntEnum):
duplicate_key = 2000
missing_key = 2001
redundant_key = 2002
untranslated_value = 2003
unused_key = 2004
improperly_used_key = 2005
missing_file = 2006
redundant_file = 2007
unmatched_placeholder = 2008
format_error = 2009
encoding_error = 2010
class IssueName(Enum):
duplicate_key = "duplicate key"
missing_key = "missing key"
redundant_key = "redundant key"
untranslated_value = "untranslated value"
unused_key = "unused key"
improperly_used_key = "undefined key"
missing_file = "missing file"
redundant_file = "redundant file"
unmatched_placeholder = "unmatched placeholder"
format_error = "format error"
encoding_error = "encoding error"
class Description(Enum):
duplicate_key = "duplicate key in resource file(s)"
missing_key = "missing key in localized resource file(s)"
redundant_key = "redundant key in localized resource file(s)"
untranslated_value = "untranslated string value in localized resource file"
unused_key = "unused key in resource file"
improperly_used_key = "undefined resource key used in source code"
missing_file = "missing resource file(s)"
redundant_file = "redundant resource file(s)"
unmatched_placeholder = "unmatched placeholder(s) in localized resource file"
format_error = "string value with format error in resource file"
encoding_error = "unknown or incorrect encoding of resource file"
class Context(Enum):
duplicate_key = "key=\u2308{0}\u2309, language(s)=\u2308{1}\u2309"
missing_key = "key=\u2308{0}\u2309, language(s)=\u2308{1}\u2309"
redundant_key = "key=\u2308{0}\u2309, language(s)=\u2308{1}\u2309"
untranslated_value = "key=\u2308{0}\u2309, value=\u2308{1}\u2309"
unused_key = "key=\u2308{0}\u2309"
improperly_used_key = "{0}"
missing_file = "language(s)=\u2308{0}\u2309"
redundant_file = "language(s)=\u2308{0}\u2309"
unmatched_placeholder = "key=\u2308{0}\u2309, base value=\u2308{1}\u2309, localized value=\u2308{2}\u2309"
format_error = "key=\u2308{0}\u2309, value=\u2308{1}\u2309"
encoding_error = "{0}"
class Issue:
def __init__(self, file, line, column_begin, column_begin_offset, column_end, severity, code, description, context, information = None):
self.file= file
self.line = line
self.column_begin = column_begin
self.column_begin_offset = column_begin_offset
self.column_end = column_end
self.code = code
self.description = description
self.severity = severity
self.context = context
self.information = information
def write(self):
issue = "file: {file}, ".format(file = self.file)
if self.line or self.column_begin or self.column_end:
issue += "line: {line}, column begin: {column_begin}, column end: {column_end}, ".format(line = self.line, column_begin = self.column_begin + self.column_begin_offset, column_end = self.column_end)
issue += "issue: {description}, severity: {severity}, context: {context}".format(description = self.description.value, severity = self.severity.value, context = self.context.replace("\u2308", "").replace("\u2309", ""))
if self.information:
issue += ", information: {information}".format(information = self.information)
LOG.info(issue)
def write_with_position(self):
LOG.info("file: {file}, line: {line}, column begin: {column_begin}, column end: {column_end}, issue: {description}, severity: {severity}, context: {context}".format(file = self.file, line = self.line, column_begin = self.column_begin + self.column_begin_offset, column_end = self.column_end, description = self.description.value, severity = self.severity.value, context = self.context.replace("\u2308", "").replace("\u2309", "")))
def write_without_position(self):
LOG.info("file: {file}, issue: {description}, severity: {severity}, context: {context}".format(file = self.file, description = self.description.value, severity = self.severity.value, context = self.context.replace("\u2308", "").replace("\u2309", "")))
class Issues:
def __init__(self):
self.issues = []
self.warnings = []
self.errors = []
self.issue_count = 0
self.warning_count = 0
self.error_count = 0
def add(self, issue):
self.issues.append(issue)
self.issue_count += 1
if issue.severity == Severity.warning:
self.warnings.append(issue)
self.warning_count += 1
elif issue.severity == Severity.error:
self.errors.append(issue)
self.error_count += 1
else:
pass
def extend(self, issues_add):
if not issues_add:
return
self.issues.extend(issues_add.issues)
self.warnings.extend(issues_add.warnings)
self.errors.extend(issues_add.errors)
self.issue_count += issues_add.issue_count
self.warning_count += issues_add.warning_count
self.error_count += issues_add.error_count
def get_issues(self):
for issue in self.issues:
yield issue
def get_warnings(self):
for warning in self.warnings:
yield warning
def get_errors(self):
for error in self.errors:
yield error
class BaseResFile:
def __init__(self, directory, file, extension, language = None):
self.directory = directory
self.file = file
self.extension = extension
self.path = os.path.join(self.directory, self.file)
if language:
self.language = language
else:
self.language = self.get_language()
self.keys = set()
self.values = []
self.key_value_pairs = {}
#self.key_line_pairs = {}
self.duplicate_keys = []
self.escape_error_keys = []
self.item_count = 0
self.encoding_error = ""
def reset_value_containers(self):
self.keys = set()
self.values = []
self.key_value_pairs = {}
#self.key_line_pairs = {}
self.duplicate_keys = []
self.escape_error_keys = []
self.item_count = 0
def get_language(self):
sub_names = self.file.lower().split(".")
try:
sub_name = sub_names[-2]
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
except IndexError:
pass
for sub_name in sub_names:
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
sub_dirs = self.directory.lower().split(os.sep)
try:
sub_dir = sub_dirs[-1]
if sub_dir in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_dir]
except IndexError:
pass
#Is the following necessary? Do we need to decide whether the other sub directory is language id besides the last sub directory?
for sub_dir in sub_dirs:
if sub_dir in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_dir]
return BASE_LANGUAGE
def is_file(self):
return os.path.isfile(self.path)
def read(self):
try:
f = open(self.path, "rb")
bin_data = f.read()
f.close()
except Exception as e:
LOG.error("Cannot open file '{path}' to read: {exception}".format(path = self.path, exception = e))
return None
for bom, encoding in {codecs.BOM_UTF8 : "utf_8", codecs.BOM_UTF16_BE : "utf_16_be", codecs.BOM_UTF16_LE : "utf_16_le", codecs.BOM_UTF32_BE : "utf_32_be", codecs.BOM_UTF32_LE : "utf_32_le"}.items():
if bin_data.startswith(bom):
try:
return bin_data[len(bom):].decode(encoding)
except UnicodeDecodeError:
#LOG.error("Cannot read file '{path}', the real encoding is not the same as {encoding} encoding detected by BOM".format(path = self.path, encoding = encoding))
self.encoding_error = "the real encoding is not the same as '{encoding}' encoding detected by BOM".format(encoding = encoding)
return None
try:
return bin_data.decode("utf_8")
except UnicodeDecodeError:
pass
if self.language in LANGUAGE_ENCODINGS.keys():
try:
return bin_data.decode(LANGUAGE_ENCODINGS[self.language])
except UnicodeDecodeError:
pass
try:
return bin_data.decode("cp1252")#some localized resource files are not translated
except UnicodeDecodeError:
#LOG.error("Cannot read file '{0}', encoding is unknown".format(self.path))
self.encoding_error = "unknown encoding"
return None
else:
#LOG.error("Cannot read file '{0}', encoding is unknown".format(self.path))
self.encoding_error = "unknown encoding"
return None
def get_group_id(self):
sub_names = self.file.split(".")
file_adjusted = ""
for sub_name in sub_names:
if not sub_name.lower() in STANDARDIZED_LANGUAGES.keys():
file_adjusted += sub_name
#dir_adjusted = self.directory
#base_name = os.path.basename(self.directory).lower()
#if base_name in STANDARDIZED_LANGUAGES.keys():
# dir_adjusted = os.path.dirname(self.directory)
#return file_adjusted, dir_adjusted
#remove language in whatever position instead of the last position: add language position as the third id(the position set to 1 if there is no language)
sub_dirs = self.directory.split(os.sep)
dir_adjusted = sub_dirs
index = 0
for sub_dir in sub_dirs:
if sub_dir.lower() in STANDARDIZED_LANGUAGES.keys():
dir_adjusted.remove(sub_dir)
break
index += 1
return file_adjusted, os.sep.join(dir_adjusted), index
def parse(self, parsing_patterns = None):
pass
class ResFileGroup:
def __init__(self, base_res_file = None):
self.res_files = {}
self.localized_res_files = {}
self.base_res_file = base_res_file
#TODO: check whether the language of base_res_file is BASE_LANGUAGE
if base_res_file:
self.res_files[base_res_file.language] = base_res_file
def add_resource_file(self, res_file):
#TODO: check the language of current file exists in group
self.res_files[res_file.language] = res_file
if res_file.language != BASE_LANGUAGE:
self.localized_res_files[res_file.language] = res_file
else:
if self.base_res_file:
LOG.warning("Two English resource files found in a group. If the languages of them are wrongly-determined, contact the tool author, otherwise remove unused resource file in source code or check the configuration file to make sure correct resource file is used. Two suspect resource files are:\n '{base_file}'\n '{current_file}'".format(base_file = self.base_res_file.path, current_file = res_file.path))
self.base_res_file = res_file
class BaseResDetector:
def __init__(self, dir_input, res_files_input, config_input, type_input):
self.src_dir = dir_input
self.config = config_input
self.detect_languages = set()
self.detect_issues = set()
self.res_files = res_files_input
self.res_file_type = type_input
self.res_file_ext = self.res_files[0].extension
self.res_file_groups = []
self.issues = Issues()
self.res_file_count = 0
self.item_count = 0
def detect(self):
self.group_resource_files()
self.parse_resource_files()
self.filter_resource_file_groups()
self.get_detect_languages()
self.get_detect_issues()
self.detect_duplicate_keys()
self.detect_missing_keys()
self.detect_redundant_keys()
self.detect_untranslated_values()
self.detect_unused_and_undefined_keys()
self.detect_missing_resource_files()
self.detect_redundant_resource_files()
self.detect_unmatched_placeholders()
self.detect_values_with_format_error()
self.detect_encoding_errors()
def print_group(self):
for res_file_group in self.res_file_groups:
for language, res_file in sorted(res_file_group.res_files.items()):
res_file_info = res_file.path + " " + language
if res_file == res_file_group.base_res_file:
res_file_info += "------base------"
LOG.info(res_file_info)
LOG.info("************************************************************************************************************************")
def write_configuration(self):
self.group_resource_files()
self.parse_resource_files()
self.filter_resource_file_groups()
self.get_detect_languages()
self.get_detect_issues()
config_file = open(file = self.config.config_file_path, mode = "a", encoding = "utf_8_sig")
LOG.info("Writing configuration...")
config_file.write(self.config.detector_switch_attrs[self.res_file_type] + " = True\n")
config_file.write("{attr_name} = [{detect_issues}]\n".format(attr_name = self.config.detect_issues_attrs[self.res_file_type], detect_issues = ", ".join(['"{item}"'.format(item = item.value) for item in IssueName if item.value in self.detect_issues])))
config_file.write(self.config.detect_languages_attrs[self.res_file_type] + " = [")
for language in sorted(self.detect_languages):
config_file.write("\"" + language + "\", ")
config_file.write("]\n")
config_file.write(self.config.fixed_res_groups_attrs[self.res_file_type] + " = True\n")
config_file.write(self.config.res_groups_attrs[self.res_file_type] + " =\\\n[\n")
for res_file_group in self.res_file_groups:
config_file.write("{\n")
for language, res_file in sorted(res_file_group.res_files.items()):
config_file.write("\"" + language + "\" : R\"" + res_file.path + "\",\n")
config_file.write("},\n")
config_file.write("]\n\n\n")
config_file.close()
def group_resource_files(self):
use_fixed_res_file_group = True
if self.config.use_user_config:
try:
use_fixed_res_file_group = getattr(self.config.config_module, self.config.fixed_res_groups_attrs[self.res_file_type])
except AttributeError:
pass
if self.config.use_user_config and use_fixed_res_file_group:
LOG.info("Reading resource file group information from configuration file...")
try:
res_file_groups_config = getattr(self.config.config_module, self.config.res_groups_attrs[self.res_file_type])
except AttributeError:
LOG.critical("'{group_name}' is not defined in configuration file".format(group_name = self.config.res_groups_attrs[self.res_file_type]))
quit_application(-1)
for res_file_group_config in res_file_groups_config:
res_file_group = ResFileGroup()
for language_key, path in res_file_group_config.items():
absolute_path = os.path.join(self.src_dir, path)
directory = os.path.dirname(absolute_path)
file = os.path.basename(absolute_path)
if not file.endswith("." + self.res_file_ext):
LOG.critical("'{file}' is not a '{type}' resource file".format(file = absolute_path, type = self.res_file_ext))
quit_application(-1)
if not os.path.isfile(absolute_path):
LOG.critical("'{path}' does not exist".format(path = absolute_path))
quit_application(-1)
language = None
try:
language = STANDARDIZED_LANGUAGES[language_key]
except KeyError:
LOG.critical("'{language_key}' is not a valid language, please refer to the following: {standardized_languages}".format(language_key = language_key, standardized_languages = "'" + "', '".join(STANDARDIZED_LANGUAGES.keys()) + "'."))
quit_application(-1)
res_file = self.config.res_file_classes[self.res_file_type](directory , file, self.res_file_ext, language)
res_file_group.add_resource_file(res_file)
self.res_file_groups.append(res_file_group)
else:
LOG.info("Grouping resource files...")
id_group_pairs = {}
for res_file in self.res_files:
group_id = res_file.get_group_id()
res_file_group = id_group_pairs.get(group_id)
if res_file_group:
res_file_group.add_resource_file(res_file)
else:
res_file_group = ResFileGroup()
res_file_group.add_resource_file(res_file)
id_group_pairs[group_id] = res_file_group
self.res_file_groups.append(res_file_group)
def get_detect_issues(self):
if self.config.use_user_config:
LOG.info("Reading issue types to be detected from configuration file...")
self.detect_issues = getattr(self.config.config_module, self.config.detect_issues_attrs[self.res_file_type], self.config.default_detect_issues)
else:
LOG.info("Getting default issue types to be detected...")
self.detect_issues = self.config.default_detect_issues
def get_detect_languages(self):
if self.config.use_user_config:
LOG.info("Reading languages to be detected from configuration file...")
try:
self.detect_languages = set(getattr(self.config.config_module, self.config.detect_languages_attrs[self.res_file_type]))
except AttributeError:
LOG.critical("Cannot read languages from configuration files")
quit_application(-1)
else:
LOG.info("Determining languages to be detected...")
language_counts = {}
max_count = 0
for res_file_group in self.res_file_groups:
num = len(res_file_group.res_files.keys())
if num != 1:
if not num in language_counts.keys():
language_counts[num] = 0
language_counts[num] += 1
current_count = language_counts[num]
if current_count > max_count:
max_count = current_count
self.detect_languages = set(res_file_group.res_files.keys())
elif current_count == max_count:
current_languages = set(res_file_group.res_files.keys())
if len(current_languages) > len(self.detect_languages):
self.detect_languages = current_languages
else:
pass
if max_count == 0:
try:
self.detect_languages = set(self.res_file_groups[0].res_files.keys())
except IndexError:
pass
LOG.info("Detect language(s): {languages}".format(languages = " ".join(sorted(self.detect_languages))))
def get_parsing_patterns(self):
return None
def parse_resource_files(self):
LOG.info("Parsing resource files, which may take some time...")
parsing_patterns = self.get_parsing_patterns()
for res_file_group in self.res_file_groups:
for language, res_file in res_file_group.res_files.items():
res_file.parse(parsing_patterns)
self.item_count += res_file.item_count
def filter_resource_file_groups(self):
LOG.info("Removing group where each file has no string...")
temp_groups = list(self.res_file_groups)
self.res_file_groups = []
for res_file_group in temp_groups:
qualified_flag = False
for language, res_file in res_file_group.res_files.items():
if (res_file.item_count != 0) or res_file.encoding_error:
qualified_flag = True
if qualified_flag:
self.res_file_groups.append(res_file_group)
self.res_file_count += len(res_file_group.res_files)
def detect_missing_resource_files(self):
if IssueName.missing_file.value not in self.detect_issues:
return
LOG.info("Detecting missing localized resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
missing_languages = self.detect_languages - set(res_file_group.res_files.keys())
formatted_languages = "/".join(sorted(missing_languages))
if formatted_languages:
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.missing_file, description = Description.missing_file, severity = Severity.warning, context = Context.missing_file.value.format(formatted_languages))
self.issues.add(issue)
def detect_redundant_resource_files(self):
if IssueName.redundant_file.value not in self.detect_issues:
return
LOG.info("Detecting redundant localized resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
redundant_languages = set(res_file_group.res_files.keys()) - self.detect_languages
formatted_languages = "/".join(sorted(redundant_languages))
if formatted_languages:
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.redundant_file, description = Description.redundant_file, severity = Severity.warning, context = Context.redundant_file.value.format(formatted_languages))
self.issues.add(issue)
def detect_duplicate_keys(self):
if IssueName.duplicate_key.value not in self.detect_issues:
return
LOG.info("Detecting duplicate keys in resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
key_languages = {}
for language, res_file in sorted(res_file_group.res_files.items()):
for duplicate_key in res_file.duplicate_keys:
duplicate_languages = key_languages.get(duplicate_key, None)
if duplicate_languages:
key_languages[duplicate_key] = duplicate_languages + "/" + language
else:
key_languages[duplicate_key] = language
for duplicate_key, duplicate_languages in sorted(key_languages.items()):
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.duplicate_key, description = Description.duplicate_key, severity = Severity.error, context = Context.duplicate_key.value.format(duplicate_key, duplicate_languages))
self.issues.add(issue)
def detect_missing_keys(self):
if IssueName.missing_key.value not in self.detect_issues:
return
LOG.info("Detecting missing keys in localized resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
base_keys = base_res_file.keys
key_languages = {}
for language, res_file in sorted(res_file_group.localized_res_files.items()):
missing_keys = base_keys - res_file.keys
for missing_key in missing_keys:
missing_languages = key_languages.get(missing_key, None)
if missing_languages:
key_languages[missing_key] = missing_languages + "/" + language
else:
key_languages[missing_key] = language
for missing_key, missing_languages in sorted(key_languages.items()):
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.missing_key, description = Description.missing_key, severity = Severity.error, context = Context.missing_key.value.format(missing_key, missing_languages))
self.issues.add(issue)
def detect_redundant_keys(self):
if IssueName.redundant_key.value not in self.detect_issues:
return
LOG.info("Detecting redundant keys in localized resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
base_keys = base_res_file.keys
key_languages = {}
for language, res_file in sorted(res_file_group.localized_res_files.items()):
redundant_keys = res_file.keys - base_keys
for redundant_key in redundant_keys:
redundant_languages = key_languages.get(redundant_key, None)
if redundant_languages:
key_languages[redundant_key] = redundant_languages + "/" + language
else:
key_languages[redundant_key] = language
for redundant_key, redundant_languages in sorted(key_languages.items()):
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.redundant_key, description = Description.redundant_key, severity = Severity.error, context = Context.redundant_key.value.format(redundant_key, redundant_languages))
self.issues.add(issue)
def is_translation_necessary(self, value):
if not value:
return False
if value.isnumeric():
return False
#cannot make sure url is not necessary to be translated
#if value.startswith("http://") or value.startswith("https://"):
#return False
return True
def detect_untranslated_values(self):
if IssueName.untranslated_value.value not in self.detect_issues:
return
LOG.info("Detecting untranslated values in resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
base_keys = base_res_file.keys
sorted_base_keys = sorted(base_keys)
base_key_value_pairs = base_res_file.key_value_pairs
for language, res_file in sorted(res_file_group.localized_res_files.items()):
target_keys = res_file.keys
target_key_value_pairs = res_file.key_value_pairs
for key in sorted_base_keys:
if key in target_keys:
target_value = target_key_value_pairs[key]
if (base_key_value_pairs[key] == target_value) and self.is_translation_necessary(target_value):
issue = Issue(file = res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.untranslated_value, description = Description.untranslated_value, severity = Severity.warning, context = Context.untranslated_value.value.format(key, target_value))
self.issues.add(issue)
def detect_values_with_format_error(self):
if IssueName.format_error.value not in self.detect_issues:
return
LOG.info("Detecting string value format issues in resource files...")
for res_file_group in self.res_file_groups:
for language, res_file in sorted(res_file_group.res_files.items()):
key_value_pairs = res_file.key_value_pairs
for escape_error_key in res_file.escape_error_keys:
issue = Issue(file = res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.format_error, description = Description.format_error, severity = Severity.error, context = Context.format_error.value.format(escape_error_key, key_value_pairs[escape_error_key]))
self.issues.add(issue)
def get_placeholder_pattern(self):
return None
def detect_unmatched_placeholders(self):
if IssueName.unmatched_placeholder.value not in self.detect_issues:
return
LOG.info("Detecting unmatched placeholders in localized resource files...")
placeholder_pattern = self.get_placeholder_pattern()
if not placeholder_pattern:
LOG.info("Placeholder pattern is not defined, skip detection")
return
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
base_key_value_pairs = base_res_file.key_value_pairs
sorted_localized_res_files = sorted(res_file_group.localized_res_files.items())
for base_key, base_value in sorted(base_key_value_pairs.items()):#If this sorting cosumes a lot of time, sorting detection result instead
base_placeholders = {}
#LOG.info("scanning string: {0}".format(base_value))
for tokens, start, end in placeholder_pattern.scanString(base_value):
placeholder = tokens[0]
#LOG.info(placeholder)
if placeholder in base_placeholders.keys():
base_placeholders[placeholder] += 1
else:
base_placeholders[placeholder] = 1
if not base_placeholders:
continue
for language, res_file in sorted_localized_res_files:
target_keys = res_file.keys
target_key_value_pairs = res_file.key_value_pairs
target_placeholders = {}
if base_key in target_keys:
target_value = target_key_value_pairs[base_key]
for tokens, start, end in placeholder_pattern.scanString(target_value):
placeholder = tokens[0]
if placeholder in target_placeholders.keys():
target_placeholders[placeholder] += 1
else:
target_placeholders[placeholder] = 1
if not base_placeholders == target_placeholders:
#LOG.info(",".join(base_placeholders.keys()) + "---" + ",".join(target_placeholders.keys()))
issue = Issue(file = res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.unmatched_placeholder, description = Description.unmatched_placeholder, severity = Severity.error, context = Context.unmatched_placeholder.value.format(base_key, base_value, target_value))
self.issues.add(issue)
def detect_unused_and_undefined_keys(self):
pass
def detect_encoding_errors(self):
if IssueName.encoding_error.value not in self.detect_issues:
return
LOG.info("Detecting resource file encoding errors...")
for res_file_group in self.res_file_groups:
for language, res_file in sorted(res_file_group.res_files.items()):
if res_file.encoding_error:
issue = Issue(file = res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.encoding_error, description = Description.encoding_error, severity = Severity.error, context = Context.encoding_error.value.format(res_file.encoding_error))
self.issues.add(issue)
class RcResFile(BaseResFile):
def parse(self, parsing_patterns):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
string_table, key_value_pair = parsing_patterns
for table_content_token, start_location, end_location in string_table.scanString(data):
for tokens, start, end in key_value_pair.scanString(table_content_token[0]):
for token in tokens:
key = token[0]
value = token[1]
pure_value = value[1:-1]
#compare values to decide whether it is duplicated, workaround for Receiver for Windows since there are many #ifdef statements
if key in self.keys and pure_value == self.key_value_pairs[key]:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(pure_value)
self.key_value_pairs[key] = pure_value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class RcResDetector(BaseResDetector):
def get_parsing_patterns(self):
key = PY.Word(PY.alphas + "_", PY.alphanums + "_") | PY.Word(PY.nums)
value = PY.dblQuotedString
define_patterns = PY.Regex(R"#ifdef.*") | PY.Regex(R"#ifndef.*") | PY.Regex(R"#elif.*") | PY.Regex(R"#endif.*") # add for Receiver for Windows
key_value_pair = (PY.Group(key + value) | define_patterns.suppress()).ignore(PY.cppStyleComment).parseWithTabs()
white_char = PY.Word(string.whitespace, exact = 1)
string_table = (white_char + PY.Literal("STRINGTABLE") + white_char).suppress() + PY.SkipTo((white_char + PY.Literal("BEGIN") + white_char) | PY.Literal("{"), ignore = PY.dblQuotedString | PY.cppStyleComment | define_patterns, include = True).suppress() + PY.originalTextFor(PY.SkipTo((white_char + PY.Literal("END") + (white_char | PY.stringEnd)) | PY.Literal("}"), ignore = PY.dblQuotedString | PY.cppStyleComment | define_patterns, include = True))
#string_table_sign = (white_char + PY.Literal("STRINGTABLE") + white_char).suppress() + PY.SkipTo(PY.Literal("{"), ignore = PY.dblQuotedString | PY.cppStyleComment | define_patterns, include = True).suppress() + PY.originalTextFor(PY.SkipTo(PY.Literal("}"), ignore = PY.dblQuotedString | PY.cppStyleComment | define_patterns, include = True))
string_table = string_table.ignore(PY.cppStyleComment).parseWithTabs().leaveWhitespace()
return string_table, key_value_pair
def get_placeholder_pattern(self):
#reference: http://msdn.microsoft.com/en-us/library/windows/desktop/ms679351%28v=vs.85%29.aspx, http://msdn.microsoft.com/en-us/library/56e442dc.aspx
positive_integer = PY.Word("123456789", PY.nums)
integer = PY.Literal("0") | positive_integer
flags = PY.Word("-+ #0")
width = integer | PY.Literal("*")
precision = PY.Literal(".") + width
type_prefix = PY.Literal("ll") | PY.Literal("l") | PY.Literal("I32") | PY.Literal("I64") | PY.Literal("I") | PY.Literal("h") | PY.Literal("w")
type_flag = PY.Word("cCdiouxXeEfgGaAnpsSZ", exact = 1)
format_string_body = PY.Optional(flags) + PY.Optional(width) + PY.Optional(precision) + PY.Optional(type_prefix) + type_flag
special_characters = PY.Combine(PY.Literal("%") + PY.Word("0% .!nrt", exact = 1))
format_string = PY.Combine(PY.Literal("%") + format_string_body)
numbered_format_string = PY.Combine(PY.Literal("%") + positive_integer + PY.Optional(PY.Literal("!") + format_string_body + PY.Literal("!")))
placeholder_pattern = PY.originalTextFor(numbered_format_string | format_string | special_characters)
return placeholder_pattern
class Rc2ResFile(RcResFile):
pass
class Rc2ResDetector(RcResDetector):
pass
class McResFile(BaseResFile):
#reference : http://msdn.microsoft.com/en-us/library/windows/desktop/dd996906(v=vs.85).aspx
def parse(self, parsing_patterns):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
key_value_pair = parsing_patterns
for tokens, start_location, end_location in key_value_pair.scanString(data):
key = tokens[0]
value = tokens[1]
if key in self.keys:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class McResDetector(BaseResDetector):
def get_parsing_patterns(self):
comment = PY.Regex(R";/(?:\*(?:[^*]*;\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))")
#comment = PY.Regex(R";.*") #this kind of comments are used in some projects
variable = PY.Word(PY.alphanums + "_", PY.alphanums + "_")
message_key = PY.Literal("MessageId") + PY.Literal("=") + PY.Optional(PY.Optional(PY.Literal("+")) + variable)
severity = PY.Literal("Severity") + PY.Literal("=") + variable
facility = PY.Literal("Facility") + PY.Literal("=") + variable
symbolic_name = (PY.Literal("SymbolicName") + PY.Literal("=")).suppress() + variable
output_base = PY.Literal("OutputBase") + PY.Literal("=") + PY.Optional(PY.Literal("{")) + variable + PY.Optional(PY.Literal("}"))
language = PY.Literal("Language") + PY.Literal("=") + variable
message_value = PY.SkipTo(PY.lineStart + PY.Literal(".")).setParseAction(lambda s, l, t: t[0].strip())
#comment out below pattern since severity/facility/symbolic items can be in any order in reality, not like MSDN says...
#key_value_pair = message_key.suppress() + PY.Optional(severity).suppress() + PY.Optional(facility).suppress() + symbolic_name + PY.Optional(output_base).suppress() + PY.Optional(language).suppress() + message_value
careless_item = language | severity | facility | output_base
key_value_pair = message_key.suppress() + PY.ZeroOrMore(careless_item).suppress() + symbolic_name + PY.ZeroOrMore(careless_item).suppress() + message_value
return key_value_pair.ignore(comment).parseWithTabs()
def get_placeholder_pattern(self):
#reference : http://msdn.microsoft.com/en-us/library/windows/desktop/dd996906(v=vs.85).aspx and the links ont the page
positive_integer = PY.Word("123456789", PY.nums)
integer = PY.Literal("0") | positive_integer
flags = PY.Word("-#0")
width = integer
precision = PY.Literal(".") + PY.Optional(integer)
type_flag = PY.Word("h", "cCdsSu", exact = 2) | PY.Word("l", "cCdisSuxX", exact = 2) | PY.Word("cCdipsSu", exact = 1)
format_string_body = PY.Optional(flags) + PY.Optional(width) + PY.Optional(precision) + type_flag
special_characters = PY.Combine(PY.Literal("%") + PY.Word("0.!%nbr", exact = 1))
numbered_format_string = PY.Combine(PY.Literal("%") + positive_integer + PY.Optional(PY.Literal("!") + format_string_body + PY.Literal("!")))
placeholder_pattern = PY.originalTextFor(numbered_format_string | special_characters)
return placeholder_pattern
class ResxResFile(BaseResFile):
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
root = ET.fromstring(data)
#escape_pattern = None # need to add whether there is an escape error, no need for now since parseError will be thrown in current implementation
for elem in root.findall("data"):
key = elem.get("name")
if key is None:
continue
#filter strings from all values parsed
if ("." in key) and (not key.endswith(".Text")):
continue
#if there is no child named "value" under "data", the actual value in C# project is null, we set it to "" in order to save effort to handle it
#if there is no text in "value" node, the actual value in C# project is ""
value = ""
sub_elem = elem.find("value")
if sub_elem != None:
value = "".join(sub_elem.itertext())
if key in self.keys:
self.duplicate_keys.append(key)
#if escape_pattern.match(value):
# self.escape_error_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class ResxResDetector(BaseResDetector):
def is_translation_necessary(self, value):
return (BaseResDetector.is_translation_necessary(self, value) and (not "PublicKeyToken" in value))
def get_placeholder_pattern(self):
return PY.Literal("{").suppress() + PY.Word(PY.nums) + PY.Literal("}").suppress()
class ReswResFile(ResxResFile):
pass
class ReswResDetector(ResxResDetector):
pass
class WxlResFile(BaseResFile):
# Maybe the most effeicent way is to get the last five character of the pure file name when determining the language based on the file name
def get_language(self):
sub_names = self.file.lower().split(".")
try:
sub_name = sub_names[-2]
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
except IndexError:
pass
for sub_name in sub_names:
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]