|
| 1 | +#!/usr/bin/env python3 |
| 2 | +# |
| 3 | +# exportTranslations.py |
| 4 | +# |
| 5 | +# Export LCD language strings to CSV files for easier translation. |
| 6 | +# Use importTranslations.py to import CSV into the language files. |
| 7 | +# |
| 8 | + |
| 9 | +import re |
| 10 | +from pathlib import Path |
| 11 | + |
| 12 | +# Write multiple sheets if true, otherwise write one giant sheet |
| 13 | +MULTISHEET = True |
| 14 | + |
| 15 | +# Where to look for the language files |
| 16 | +LANGHOME = "Marlin/src/lcd/language" |
| 17 | + |
| 18 | +# Check for the path to the language files |
| 19 | +if not Path(LANGHOME).is_dir(): |
| 20 | + print("Error: Couldn't find the '%s' directory." % LANGHOME) |
| 21 | + print("Edit LANGHOME or cd to the root of the repo before running.") |
| 22 | + exit(1) |
| 23 | + |
| 24 | +# A dictionary to contain language names |
| 25 | +LANGNAME = { |
| 26 | + 'an': "Aragonese", |
| 27 | + 'bg': "Bulgarian", |
| 28 | + 'ca': "Catalan", |
| 29 | + 'cz': "Czech", |
| 30 | + 'da': "Danish", |
| 31 | + 'de': "German", |
| 32 | + 'el': "Greek", 'el_CY': "Greek (Cyprus)", 'el_gr': "Greek (Greece)", |
| 33 | + 'en': "English", |
| 34 | + 'es': "Spanish", |
| 35 | + 'eu': "Basque-Euskera", |
| 36 | + 'fi': "Finnish", |
| 37 | + 'fr': "French", 'fr_na': "French (no accent)", |
| 38 | + 'gl': "Galician", |
| 39 | + 'hr': "Croatian (Hrvatski)", |
| 40 | + 'hu': "Hungarian / Magyar", |
| 41 | + 'it': "Italian", |
| 42 | + 'jp_kana': "Japanese (Kana)", |
| 43 | + 'ko_KR': "Korean", |
| 44 | + 'nl': "Dutch", |
| 45 | + 'pl': "Polish", |
| 46 | + 'pt': "Portuguese", 'pt_br': "Portuguese (Brazil)", |
| 47 | + 'ro': "Romanian", |
| 48 | + 'ru': "Russian", |
| 49 | + 'sk': "Slovak", |
| 50 | + 'sv': "Swedish", |
| 51 | + 'tr': "Turkish", |
| 52 | + 'uk': "Ukrainian", |
| 53 | + 'vi': "Vietnamese", |
| 54 | + 'zh_CN': "Simplified Chinese", 'zh_TW': "Traditional Chinese" |
| 55 | +} |
| 56 | + |
| 57 | +# A limit just for testing |
| 58 | +LIMIT = 0 |
| 59 | + |
| 60 | +# A dictionary to contain strings for each language. |
| 61 | +# Init with 'en' so English will always be first. |
| 62 | +language_strings = { 'en': 0 } |
| 63 | + |
| 64 | +# A dictionary to contain all distinct LCD string names |
| 65 | +names = {} |
| 66 | + |
| 67 | +# Get all "language_*.h" files |
| 68 | +langfiles = sorted(list(Path(LANGHOME).glob('language_*.h'))) |
| 69 | + |
| 70 | +# Read each language file |
| 71 | +for langfile in langfiles: |
| 72 | + # Get the language code from the filename |
| 73 | + langcode = langfile.name.replace('language_', '').replace('.h', '') |
| 74 | + |
| 75 | + # Skip 'test' and any others that we don't want |
| 76 | + if langcode in ['test']: continue |
| 77 | + |
| 78 | + # Open the file |
| 79 | + f = open(langfile, 'r', encoding='utf-8') |
| 80 | + if not f: continue |
| 81 | + |
| 82 | + # Flags to indicate a wide or tall section |
| 83 | + wideflag = False |
| 84 | + tallflag = False |
| 85 | + # A counter for the number of strings in the file |
| 86 | + stringcount = 0 |
| 87 | + # A dictionary to hold all the strings |
| 88 | + strings = { 'narrow': {}, 'wide': {}, 'tall': {} } |
| 89 | + # Read each line in the file |
| 90 | + for line in f: |
| 91 | + # Clean up the line for easier parsing |
| 92 | + line = line.split("//")[0].strip() |
| 93 | + if line.endswith(';'): line = line[:-1].strip() |
| 94 | + |
| 95 | + # Check for wide or tall sections, assume no complicated nesting |
| 96 | + if line.startswith("#endif") or line.startswith("#else"): |
| 97 | + wideflag = False |
| 98 | + tallflag = False |
| 99 | + elif re.match(r'#if.*WIDTH\s*>=?\s*2[01].*', line): wideflag = True |
| 100 | + elif re.match(r'#if.*LCD_HEIGHT\s*>=?\s*4.*', line): tallflag = True |
| 101 | + |
| 102 | + # For string-defining lines capture the string data |
| 103 | + match = re.match(r'LSTR\s+([A-Z0-9_]+)\s*=\s*(.+)\s*', line) |
| 104 | + if match: |
| 105 | + # The name is the first captured group |
| 106 | + name = match.group(1) |
| 107 | + # The value is the second captured group |
| 108 | + value = match.group(2) |
| 109 | + # Replace escaped quotes temporarily |
| 110 | + value = value.replace('\\"', '__Q__') |
| 111 | + |
| 112 | + # Remove all _UxGT wrappers from the value in a non-greedy way |
| 113 | + value = re.sub(r'_UxGT\((".*?")\)', r'\1', value) |
| 114 | + |
| 115 | + # Multi-line strings will get one or more bars | for identification |
| 116 | + multiline = 0 |
| 117 | + multimatch = re.match(r'.*MSG_(\d)_LINE\s*\(\s*(.+?)\s*\).*', value) |
| 118 | + if multimatch: |
| 119 | + multiline = int(multimatch.group(1)) |
| 120 | + value = '|' + re.sub(r'"\s*,\s*"', '|', multimatch.group(2)) |
| 121 | + |
| 122 | + # Wrap inline defines in parentheses |
| 123 | + value = re.sub(r' *([A-Z0-9]+_[A-Z0-9_]+) *', r'(\1)', value) |
| 124 | + # Remove quotes around strings |
| 125 | + value = re.sub(r'"(.*?)"', r'\1', value).replace('__Q__', '"') |
| 126 | + # Store all unique names as dictionary keys |
| 127 | + names[name] = 1 |
| 128 | + # Store the string as narrow or wide |
| 129 | + strings['tall' if tallflag else 'wide' if wideflag else 'narrow'][name] = value |
| 130 | + |
| 131 | + # Increment the string counter |
| 132 | + stringcount += 1 |
| 133 | + # Break for testing |
| 134 | + if LIMIT and stringcount >= LIMIT: break |
| 135 | + |
| 136 | + # Close the file |
| 137 | + f.close() |
| 138 | + # Store the array in the dict |
| 139 | + language_strings[langcode] = strings |
| 140 | + |
| 141 | +# Get the language codes from the dictionary |
| 142 | +langcodes = list(language_strings.keys()) |
| 143 | + |
| 144 | +# Print the array |
| 145 | +#print(language_strings) |
| 146 | + |
| 147 | +# Write a single language entry to the CSV file with narrow, wide, and tall strings |
| 148 | +def write_csv_lang(f, strings, name): |
| 149 | + f.write(',') |
| 150 | + if name in strings['narrow']: f.write('"%s"' % strings['narrow'][name]) |
| 151 | + f.write(',') |
| 152 | + if name in strings['wide']: f.write('"%s"' % strings['wide'][name]) |
| 153 | + f.write(',') |
| 154 | + if name in strings['tall']: f.write('"%s"' % strings['tall'][name]) |
| 155 | + |
| 156 | +if MULTISHEET: |
| 157 | + # |
| 158 | + # Export a separate sheet for each language |
| 159 | + # |
| 160 | + OUTDIR = 'csv-out' |
| 161 | + Path.mkdir(Path(OUTDIR), exist_ok=True) |
| 162 | + |
| 163 | + for lang in langcodes: |
| 164 | + f = open("%s/language_%s.csv" % (OUTDIR, lang), 'w', encoding='utf-8') |
| 165 | + if not f: continue |
| 166 | + |
| 167 | + lname = lang + ' ' + LANGNAME[lang] |
| 168 | + header = ['name', lname, lname + ' (wide)', lname + ' (tall)'] |
| 169 | + f.write('"' + '","'.join(header) + '"\n') |
| 170 | + |
| 171 | + for name in names.keys(): |
| 172 | + f.write('"' + name + '"') |
| 173 | + write_csv_lang(f, language_strings[lang], name) |
| 174 | + f.write('\n') |
| 175 | + f.close() |
| 176 | + |
| 177 | +else: |
| 178 | + # |
| 179 | + # Export one large sheet containing all languages |
| 180 | + # |
| 181 | + f = open("languages.csv", 'w', encoding='utf-8') |
| 182 | + if f: |
| 183 | + header = ['name'] |
| 184 | + for lang in langcodes: |
| 185 | + lname = lang + ' ' + LANGNAME[lang] |
| 186 | + header += [lname, lname + ' (wide)', lname + ' (tall)'] |
| 187 | + f.write('"' + '","'.join(header) + '"\n') |
| 188 | + |
| 189 | + for name in names.keys(): |
| 190 | + f.write('"' + name + '"') |
| 191 | + for lang in langcodes: write_csv_lang(f, language_strings[lang], name) |
| 192 | + f.write('\n') |
| 193 | + f.close() |
0 commit comments