Merge upstream changes from Marlin 2.1.2.2
This commit is contained in:
@@ -11,11 +11,14 @@ import sys
|
||||
import datetime
|
||||
import random
|
||||
try:
|
||||
import heatshrink
|
||||
import heatshrink2 as heatshrink
|
||||
heatshrink_exists = True
|
||||
except ImportError:
|
||||
heatshrink_exists = False
|
||||
|
||||
try:
|
||||
import heatshrink
|
||||
heatshrink_exists = True
|
||||
except ImportError:
|
||||
heatshrink_exists = False
|
||||
|
||||
def millis():
|
||||
return time.perf_counter() * 1000
|
||||
@@ -72,7 +75,7 @@ class Protocol(object):
|
||||
self.device = device
|
||||
self.baud = baud
|
||||
self.block_size = int(bsize)
|
||||
self.simulate_errors = max(min(simerr, 1.0), 0.0);
|
||||
self.simulate_errors = max(min(simerr, 1.0), 0.0)
|
||||
self.connected = True
|
||||
self.response_timeout = timeout
|
||||
|
||||
@@ -234,8 +237,8 @@ class Protocol(object):
|
||||
|
||||
# checksum 16 fletchers
|
||||
def checksum(self, cs, value):
|
||||
cs_low = (((cs & 0xFF) + value) % 255);
|
||||
return ((((cs >> 8) + cs_low) % 255) << 8) | cs_low;
|
||||
cs_low = (((cs & 0xFF) + value) % 255)
|
||||
return ((((cs >> 8) + cs_low) % 255) << 8) | cs_low
|
||||
|
||||
def build_checksum(self, buffer):
|
||||
cs = 0
|
||||
@@ -267,7 +270,7 @@ class Protocol(object):
|
||||
|
||||
def response_ok(self, data):
|
||||
try:
|
||||
packet_id = int(data);
|
||||
packet_id = int(data)
|
||||
except ValueError:
|
||||
return
|
||||
if packet_id != self.sync:
|
||||
@@ -276,7 +279,7 @@ class Protocol(object):
|
||||
self.packet_status = 1
|
||||
|
||||
def response_resend(self, data):
|
||||
packet_id = int(data);
|
||||
packet_id = int(data)
|
||||
self.errors += 1
|
||||
if not self.syncronised:
|
||||
print("Retrying syncronisation")
|
||||
@@ -327,7 +330,7 @@ class FileTransferProtocol(object):
|
||||
return self.responses.popleft()
|
||||
|
||||
def connect(self):
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.QUERY);
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.QUERY)
|
||||
|
||||
token, data = self.await_response()
|
||||
if token != 'PFT:version:':
|
||||
@@ -349,7 +352,7 @@ class FileTransferProtocol(object):
|
||||
|
||||
timeout = TimeOut(5000)
|
||||
token = None
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.OPEN, payload);
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.OPEN, payload)
|
||||
while token != 'PFT:success' and not timeout.timedout():
|
||||
try:
|
||||
token, data = self.await_response(1000)
|
||||
@@ -360,7 +363,7 @@ class FileTransferProtocol(object):
|
||||
print("Broken transfer detected, purging")
|
||||
self.abort()
|
||||
time.sleep(0.1)
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.OPEN, payload);
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.OPEN, payload)
|
||||
timeout.reset()
|
||||
elif token == 'PFT:fail':
|
||||
raise Exception("Can not open file on client")
|
||||
@@ -369,10 +372,10 @@ class FileTransferProtocol(object):
|
||||
raise ReadTimeout()
|
||||
|
||||
def write(self, data):
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.WRITE, data);
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.WRITE, data)
|
||||
|
||||
def close(self):
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.CLOSE);
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.CLOSE)
|
||||
token, data = self.await_response(1000)
|
||||
if token == 'PFT:success':
|
||||
print("File closed")
|
||||
@@ -385,7 +388,7 @@ class FileTransferProtocol(object):
|
||||
return False
|
||||
|
||||
def abort(self):
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.ABORT);
|
||||
self.protocol.send(FileTransferProtocol.protocol_id, FileTransferProtocol.Packet.ABORT)
|
||||
token, data = self.await_response()
|
||||
if token == 'PFT:success':
|
||||
print("Transfer Aborted")
|
||||
@@ -393,18 +396,19 @@ class FileTransferProtocol(object):
|
||||
def copy(self, filename, dest_filename, compression, dummy):
|
||||
self.connect()
|
||||
|
||||
compression_support = heatshrink_exists and self.compression['algorithm'] == 'heatshrink' and compression
|
||||
if compression and (not heatshrink_exists or not self.compression['algorithm'] == 'heatshrink'):
|
||||
print("Compression not supported by client")
|
||||
#compression_support = False
|
||||
has_heatshrink = heatshrink_exists and self.compression['algorithm'] == 'heatshrink'
|
||||
if compression and not has_heatshrink:
|
||||
hs = '2' if sys.version_info[0] > 2 else ''
|
||||
print("Compression not supported by client. Use 'pip install heatshrink%s' to fix." % hs)
|
||||
compression = False
|
||||
|
||||
data = open(filename, "rb").read()
|
||||
filesize = len(data)
|
||||
|
||||
self.open(dest_filename, compression_support, dummy)
|
||||
self.open(dest_filename, compression, dummy)
|
||||
|
||||
block_size = self.protocol.block_size
|
||||
if compression_support:
|
||||
if compression:
|
||||
data = heatshrink.encode(data, window_sz2=self.compression['window'], lookahead_sz2=self.compression['lookahead'])
|
||||
|
||||
cratio = filesize / len(data)
|
||||
@@ -419,17 +423,17 @@ class FileTransferProtocol(object):
|
||||
self.write(data[start:end])
|
||||
kibs = (( (i+1) * block_size) / 1024) / (millis() + 1 - start_time) * 1000
|
||||
if (i / blocks) >= dump_pctg:
|
||||
print("\r{0:2.0f}% {1:4.2f}KiB/s {2} Errors: {3}".format((i / blocks) * 100, kibs, "[{0:4.2f}KiB/s]".format(kibs * cratio) if compression_support else "", self.protocol.errors), end='')
|
||||
print("\r{0:2.0f}% {1:4.2f}KiB/s {2} Errors: {3}".format((i / blocks) * 100, kibs, "[{0:4.2f}KiB/s]".format(kibs * cratio) if compression else "", self.protocol.errors), end='')
|
||||
dump_pctg += 0.1
|
||||
if self.protocol.errors > 0:
|
||||
# Dump last status (errors may not be visible)
|
||||
print("\r{0:2.0f}% {1:4.2f}KiB/s {2} Errors: {3} - Aborting...".format((i / blocks) * 100, kibs, "[{0:4.2f}KiB/s]".format(kibs * cratio) if compression_support else "", self.protocol.errors), end='')
|
||||
print("\r{0:2.0f}% {1:4.2f}KiB/s {2} Errors: {3} - Aborting...".format((i / blocks) * 100, kibs, "[{0:4.2f}KiB/s]".format(kibs * cratio) if compression else "", self.protocol.errors), end='')
|
||||
print("") # New line to break the transfer speed line
|
||||
self.close()
|
||||
print("Transfer aborted due to protocol errors")
|
||||
#raise Exception("Transfer aborted due to protocol errors")
|
||||
return False;
|
||||
print("\r{0:2.0f}% {1:4.2f}KiB/s {2} Errors: {3}".format(100, kibs, "[{0:4.2f}KiB/s]".format(kibs * cratio) if compression_support else "", self.protocol.errors)) # no one likes transfers finishing at 99.8%
|
||||
return False
|
||||
print("\r{0:2.0f}% {1:4.2f}KiB/s {2} Errors: {3}".format(100, kibs, "[{0:4.2f}KiB/s]".format(kibs * cratio) if compression else "", self.protocol.errors)) # no one likes transfers finishing at 99.8%
|
||||
|
||||
if not self.close():
|
||||
print("Transfer failed")
|
||||
|
0
buildroot/share/scripts/__init__.py
Normal file
0
buildroot/share/scripts/__init__.py
Normal file
@@ -9,6 +9,29 @@
|
||||
# If no language codes are specified then all languages will be checked
|
||||
#
|
||||
|
||||
langname() {
|
||||
case "$1" in
|
||||
an ) echo "Aragonese" ;; bg ) echo "Bulgarian" ;;
|
||||
ca ) echo "Catalan" ;; cz ) echo "Czech" ;;
|
||||
da ) echo "Danish" ;; de ) echo "German" ;;
|
||||
el ) echo "Greek" ;; el_CY ) echo "Greek (Cyprus)" ;;
|
||||
el_gr) echo "Greek (Greece)" ;; en ) echo "English" ;;
|
||||
es ) echo "Spanish" ;; eu ) echo "Basque-Euskera" ;;
|
||||
fi ) echo "Finnish" ;; fr ) echo "French" ;;
|
||||
fr_na) echo "French (no accent)" ;; gl ) echo "Galician" ;;
|
||||
hr ) echo "Croatian (Hrvatski)" ;; hu ) echo "Hungarian / Magyar" ;;
|
||||
it ) echo "Italian" ;; jp_kana) echo "Japanese (Kana)" ;;
|
||||
ko_KR) echo "Korean" ;; nl ) echo "Dutch" ;;
|
||||
pl ) echo "Polish" ;; pt ) echo "Portuguese" ;;
|
||||
pt_br) echo "Portuguese (Brazil)" ;; ro ) echo "Romanian" ;;
|
||||
ru ) echo "Russian" ;; sk ) echo "Slovak" ;;
|
||||
sv ) echo "Swedish" ;; tr ) echo "Turkish" ;;
|
||||
uk ) echo "Ukrainian" ;; vi ) echo "Vietnamese" ;;
|
||||
zh_CN) echo "Simplified Chinese" ;; zh_TW ) echo "Traditional Chinese" ;;
|
||||
* ) echo "<unknown>" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
LANGHOME="Marlin/src/lcd/language"
|
||||
|
||||
[ -d $LANGHOME ] && cd $LANGHOME
|
||||
@@ -20,7 +43,7 @@ TEST_LANGS=""
|
||||
if [[ -n $@ ]]; then
|
||||
for K in "$@"; do
|
||||
for F in $FILES; do
|
||||
[[ "$F" != "${F%$K*}" ]] && TEST_LANGS+="$F "
|
||||
[[ $F == $K ]] && TEST_LANGS+="$F "
|
||||
done
|
||||
done
|
||||
[[ -z $TEST_LANGS ]] && { echo "No languages matching $@." ; exit 0 ; }
|
||||
@@ -28,20 +51,54 @@ else
|
||||
TEST_LANGS=$FILES
|
||||
fi
|
||||
|
||||
echo "Missing strings for $TEST_LANGS..."
|
||||
echo "Finding all missing strings for $TEST_LANGS..."
|
||||
|
||||
WORD_LINES=() # Complete lines for all words (or, grep out of en at the end instead)
|
||||
ALL_MISSING=() # All missing languages for each missing word
|
||||
#NEED_WORDS=() # All missing words across all specified languages
|
||||
|
||||
WORD_COUNT=0
|
||||
|
||||
# Go through all strings in the English language file
|
||||
# For each word, query all specified languages for the word
|
||||
# If the word is missing, add its language to the list
|
||||
for WORD in $(awk '/LSTR/{print $2}' language_en.h); do
|
||||
# Skip MSG_MARLIN
|
||||
[[ $WORD == "MSG_MARLIN" ]] && break
|
||||
LANG_LIST=""
|
||||
|
||||
((WORD_COUNT++))
|
||||
|
||||
# Find all selected languages that lack the string
|
||||
LANG_MISSING=" "
|
||||
for LANG in $TEST_LANGS; do
|
||||
if [[ $(grep -c -E "^ *LSTR +$WORD\b" language_${LANG}.h) -eq 0 ]]; then
|
||||
INHERIT=$(awk '/using namespace/{print $3}' language_${LANG}.h | sed -E 's/Language_([a-zA-Z_]+)\s*;/\1/')
|
||||
if [[ -z $INHERIT || $INHERIT == "en" ]]; then
|
||||
LANG_LIST+=" $LANG"
|
||||
LANG_MISSING+="$LANG "
|
||||
elif [[ $(grep -c -E "^ *LSTR +$WORD\b" language_${INHERIT}.h) -eq 0 ]]; then
|
||||
LANG_LIST+=" $LANG"
|
||||
LANG_MISSING+="$LANG "
|
||||
fi
|
||||
fi
|
||||
done
|
||||
[[ -n $LANG_LIST ]] && printf "%-38s :%s\n" "$WORD" "$LANG_LIST"
|
||||
# For each word store all the missing languages
|
||||
if [[ $LANG_MISSING != " " ]]; then
|
||||
WORD_LINES+=("$(grep -m 1 -E "$WORD\b" language_en.h)")
|
||||
ALL_MISSING+=("$LANG_MISSING")
|
||||
#NEED_WORDS+=($WORD)
|
||||
fi
|
||||
done
|
||||
|
||||
echo
|
||||
echo "${#WORD_LINES[@]} out of $WORD_COUNT LCD strings need translation"
|
||||
|
||||
for LANG in $TEST_LANGS; do
|
||||
HED=0 ; IND=0
|
||||
for WORDLANGS in "${ALL_MISSING[@]}"; do
|
||||
# If the current word is missing from the current language then print it
|
||||
if [[ $WORDLANGS =~ " $LANG " ]]; then
|
||||
[[ $HED == 0 ]] && { echo ; echo "Missing strings for language_$LANG.h ($(langname $LANG)):" ; HED=1 ; }
|
||||
echo "${WORD_LINES[$IND]}"
|
||||
fi
|
||||
((IND++))
|
||||
done
|
||||
done
|
||||
|
153
buildroot/share/scripts/languageExport.py
Executable file
153
buildroot/share/scripts/languageExport.py
Executable file
@@ -0,0 +1,153 @@
|
||||
#!/usr/bin/env python3
|
||||
'''
|
||||
languageExport.py
|
||||
|
||||
Export LCD language strings to CSV files for easier translation.
|
||||
Use importTranslations.py to import CSV into the language files.
|
||||
|
||||
'''
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from languageUtil import namebyid
|
||||
|
||||
LANGHOME = "Marlin/src/lcd/language"
|
||||
|
||||
# Write multiple sheets if true, otherwise write one giant sheet
|
||||
MULTISHEET = True
|
||||
OUTDIR = 'out-csv'
|
||||
|
||||
# Check for the path to the language files
|
||||
if not Path(LANGHOME).is_dir():
|
||||
print("Error: Couldn't find the '%s' directory." % LANGHOME)
|
||||
print("Edit LANGHOME or cd to the root of the repo before running.")
|
||||
exit(1)
|
||||
|
||||
# A limit just for testing
|
||||
LIMIT = 0
|
||||
|
||||
# A dictionary to contain strings for each language.
|
||||
# Init with 'en' so English will always be first.
|
||||
language_strings = { 'en': 0 }
|
||||
|
||||
# A dictionary to contain all distinct LCD string names
|
||||
names = {}
|
||||
|
||||
# Get all "language_*.h" files
|
||||
langfiles = sorted(list(Path(LANGHOME).glob('language_*.h')))
|
||||
|
||||
# Read each language file
|
||||
for langfile in langfiles:
|
||||
# Get the language code from the filename
|
||||
langcode = langfile.name.replace('language_', '').replace('.h', '')
|
||||
|
||||
# Skip 'test' and any others that we don't want
|
||||
if langcode in ['test']: continue
|
||||
|
||||
# Open the file
|
||||
f = open(langfile, 'r', encoding='utf-8')
|
||||
if not f: continue
|
||||
|
||||
# Flags to indicate a wide or tall section
|
||||
wideflag, tallflag = False, False
|
||||
# A counter for the number of strings in the file
|
||||
stringcount = 0
|
||||
# A dictionary to hold all the strings
|
||||
strings = { 'narrow': {}, 'wide': {}, 'tall': {} }
|
||||
# Read each line in the file
|
||||
for line in f:
|
||||
# Clean up the line for easier parsing
|
||||
line = line.split("//")[0].strip()
|
||||
if line.endswith(';'): line = line[:-1].strip()
|
||||
|
||||
# Check for wide or tall sections, assume no complicated nesting
|
||||
if line.startswith("#endif") or line.startswith("#else"):
|
||||
wideflag, tallflag = False, False
|
||||
elif re.match(r'#if.*WIDTH\s*>=?\s*2[01].*', line): wideflag = True
|
||||
elif re.match(r'#if.*LCD_HEIGHT\s*>=?\s*4.*', line): tallflag = True
|
||||
|
||||
# For string-defining lines capture the string data
|
||||
match = re.match(r'LSTR\s+([A-Z0-9_]+)\s*=\s*(.+)\s*', line)
|
||||
if match:
|
||||
# Name and quote-sanitized value
|
||||
name, value = match.group(1), match.group(2).replace('\\"', '$$$')
|
||||
|
||||
# Remove all _UxGT wrappers from the value in a non-greedy way
|
||||
value = re.sub(r'_UxGT\((".*?")\)', r'\1', value)
|
||||
|
||||
# Multi-line strings get one or more bars | for identification
|
||||
multiline = 0
|
||||
multimatch = re.match(r'.*MSG_(\d)_LINE\s*\(\s*(.+?)\s*\).*', value)
|
||||
if multimatch:
|
||||
multiline = int(multimatch.group(1))
|
||||
value = '|' + re.sub(r'"\s*,\s*"', '|', multimatch.group(2))
|
||||
|
||||
# Wrap inline defines in parentheses
|
||||
value = re.sub(r' *([A-Z0-9]+_[A-Z0-9_]+) *', r'(\1)', value)
|
||||
# Remove quotes around strings
|
||||
value = re.sub(r'"(.*?)"', r'\1', value).replace('$$$', '""')
|
||||
# Store all unique names as dictionary keys
|
||||
names[name] = 1
|
||||
# Store the string as narrow or wide
|
||||
strings['tall' if tallflag else 'wide' if wideflag else 'narrow'][name] = value
|
||||
|
||||
# Increment the string counter
|
||||
stringcount += 1
|
||||
# Break for testing
|
||||
if LIMIT and stringcount >= LIMIT: break
|
||||
|
||||
# Close the file
|
||||
f.close()
|
||||
# Store the array in the dict
|
||||
language_strings[langcode] = strings
|
||||
|
||||
# Get the language codes from the dictionary
|
||||
langcodes = list(language_strings.keys())
|
||||
|
||||
# Print the array
|
||||
#print(language_strings)
|
||||
|
||||
# Report the total number of unique strings
|
||||
print("Found %s distinct LCD strings." % len(names))
|
||||
|
||||
# Write a single language entry to the CSV file with narrow, wide, and tall strings
|
||||
def write_csv_lang(f, strings, name):
|
||||
f.write(',')
|
||||
if name in strings['narrow']: f.write('"%s"' % strings['narrow'][name])
|
||||
f.write(',')
|
||||
if name in strings['wide']: f.write('"%s"' % strings['wide'][name])
|
||||
f.write(',')
|
||||
if name in strings['tall']: f.write('"%s"' % strings['tall'][name])
|
||||
|
||||
if MULTISHEET:
|
||||
#
|
||||
# Export a separate sheet for each language
|
||||
#
|
||||
Path.mkdir(Path(OUTDIR), exist_ok=True)
|
||||
|
||||
for lang in langcodes:
|
||||
with open("%s/language_%s.csv" % (OUTDIR, lang), 'w', encoding='utf-8') as f:
|
||||
lname = lang + ' ' + namebyid(lang)
|
||||
header = ['name', lname, lname + ' (wide)', lname + ' (tall)']
|
||||
f.write('"' + '","'.join(header) + '"\n')
|
||||
|
||||
for name in names.keys():
|
||||
f.write('"' + name + '"')
|
||||
write_csv_lang(f, language_strings[lang], name)
|
||||
f.write('\n')
|
||||
|
||||
else:
|
||||
#
|
||||
# Export one large sheet containing all languages
|
||||
#
|
||||
with open("languages.csv", 'w', encoding='utf-8') as f:
|
||||
header = ['name']
|
||||
for lang in langcodes:
|
||||
lname = lang + ' ' + namebyid(lang)
|
||||
header += [lname, lname + ' (wide)', lname + ' (tall)']
|
||||
f.write('"' + '","'.join(header) + '"\n')
|
||||
|
||||
for name in names.keys():
|
||||
f.write('"' + name + '"')
|
||||
for lang in langcodes: write_csv_lang(f, language_strings[lang], name)
|
||||
f.write('\n')
|
219
buildroot/share/scripts/languageImport.py
Executable file
219
buildroot/share/scripts/languageImport.py
Executable file
@@ -0,0 +1,219 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
languageImport.py
|
||||
|
||||
Import LCD language strings from a CSV file or Google Sheets
|
||||
and write Marlin LCD language files based on the data.
|
||||
|
||||
Use languageExport.py to export CSV from the language files.
|
||||
|
||||
Google Sheets Link:
|
||||
https://docs.google.com/spreadsheets/d/12yiy-kS84ajKFm7oQIrC4CF8ZWeu9pAR4zrgxH4ruk4/edit#gid=84528699
|
||||
|
||||
TODO: Use the defines and comments above the namespace from existing language files.
|
||||
Get the 'constexpr uint8_t CHARSIZE' from existing language files.
|
||||
Get the correct 'using namespace' for languages that don't inherit from English.
|
||||
|
||||
"""
|
||||
|
||||
import sys, re, requests, csv, datetime
|
||||
from languageUtil import namebyid
|
||||
|
||||
LANGHOME = "Marlin/src/lcd/language"
|
||||
OUTDIR = 'out-language'
|
||||
|
||||
# Get the file path from the command line
|
||||
FILEPATH = sys.argv[1] if len(sys.argv) > 1 else None
|
||||
|
||||
download = FILEPATH == 'download'
|
||||
|
||||
if not FILEPATH or download:
|
||||
SHEETID = "12yiy-kS84ajKFm7oQIrC4CF8ZWeu9pAR4zrgxH4ruk4"
|
||||
FILEPATH = 'https://docs.google.com/spreadsheet/ccc?key=%s&output=csv' % SHEETID
|
||||
|
||||
if FILEPATH.startswith('http'):
|
||||
response = requests.get(FILEPATH)
|
||||
assert response.status_code == 200, 'GET failed for %s' % FILEPATH
|
||||
csvdata = response.content.decode('utf-8')
|
||||
else:
|
||||
if not FILEPATH.endswith('.csv'): FILEPATH += '.csv'
|
||||
with open(FILEPATH, 'r', encoding='utf-8') as f: csvdata = f.read()
|
||||
|
||||
if not csvdata:
|
||||
print("Error: couldn't read CSV data from %s" % FILEPATH)
|
||||
exit(1)
|
||||
|
||||
if download:
|
||||
DLNAME = sys.argv[2] if len(sys.argv) > 2 else 'languages.csv'
|
||||
if not DLNAME.endswith('.csv'): DLNAME += '.csv'
|
||||
with open(DLNAME, 'w', encoding='utf-8') as f: f.write(csvdata)
|
||||
print("Downloaded %s from %s" % (DLNAME, FILEPATH))
|
||||
exit(0)
|
||||
|
||||
lines = csvdata.splitlines()
|
||||
print(lines)
|
||||
reader = csv.reader(lines, delimiter=',')
|
||||
gothead = False
|
||||
columns = ['']
|
||||
numcols = 0
|
||||
strings_per_lang = {}
|
||||
for row in reader:
|
||||
if not gothead:
|
||||
gothead = True
|
||||
numcols = len(row)
|
||||
if row[0] != 'name':
|
||||
print('Error: first column should be "name"')
|
||||
exit(1)
|
||||
# The rest of the columns are language codes and names
|
||||
for i in range(1, numcols):
|
||||
elms = row[i].split(' ')
|
||||
lang = elms[0]
|
||||
style = ('Wide' if elms[-1] == '(wide)' else 'Tall' if elms[-1] == '(tall)' else 'Narrow')
|
||||
columns.append({ 'lang': lang, 'style': style })
|
||||
if not lang in strings_per_lang: strings_per_lang[lang] = {}
|
||||
if not style in strings_per_lang[lang]: strings_per_lang[lang][style] = {}
|
||||
continue
|
||||
# Add the named string for all the included languages
|
||||
name = row[0]
|
||||
for i in range(1, numcols):
|
||||
str = row[i]
|
||||
if str:
|
||||
col = columns[i]
|
||||
strings_per_lang[col['lang']][col['style']][name] = str
|
||||
|
||||
# Create a folder for the imported language outfiles
|
||||
from pathlib import Path
|
||||
Path.mkdir(Path(OUTDIR), exist_ok=True)
|
||||
|
||||
FILEHEADER = '''
|
||||
/**
|
||||
* Marlin 3D Printer Firmware
|
||||
* Copyright (c) 2023 MarlinFirmware [https://github.com/MarlinFirmware/Marlin]
|
||||
*
|
||||
* Based on Sprinter and grbl.
|
||||
* Copyright (c) 2011 Camiel Gubbels / Erik van der Zalm
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
*
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
/**
|
||||
* %s
|
||||
*
|
||||
* LCD Menu Messages
|
||||
* See also https://marlinfw.org/docs/development/lcd_language.html
|
||||
*
|
||||
* Substitutions are applied for the following characters when used in menu items titles:
|
||||
*
|
||||
* $ displays an inserted string
|
||||
* { displays '0'....'10' for indexes 0 - 10
|
||||
* ~ displays '1'....'11' for indexes 0 - 10
|
||||
* * displays 'E1'...'E11' for indexes 0 - 10 (By default. Uses LCD_FIRST_TOOL)
|
||||
* @ displays an axis name such as XYZUVW, or E for an extruder
|
||||
*/
|
||||
|
||||
'''
|
||||
|
||||
# Iterate over the languages which correspond to the columns
|
||||
# The columns are assumed to be grouped by language in the order Narrow, Wide, Tall
|
||||
# TODO: Go through lang only, then impose the order Narrow, Wide, Tall.
|
||||
# So if something is missing or out of order everything still gets built correctly.
|
||||
|
||||
f = None
|
||||
gotlang = {}
|
||||
for i in range(1, numcols):
|
||||
#if i > 6: break # Testing
|
||||
col = columns[i]
|
||||
lang, style = col['lang'], col['style']
|
||||
|
||||
# If we haven't already opened a file for this language, do so now
|
||||
if not lang in gotlang:
|
||||
gotlang[lang] = {}
|
||||
if f: f.close()
|
||||
fn = "%s/language_%s.h" % (OUTDIR, lang)
|
||||
f = open(fn, 'w', encoding='utf-8')
|
||||
if not f:
|
||||
print("Failed to open %s." % fn)
|
||||
exit(1)
|
||||
|
||||
# Write the opening header for the new language file
|
||||
#f.write(FILEHEADER % namebyid(lang))
|
||||
f.write('/**\n * Imported from %s on %s at %s\n */\n' % (FILEPATH, datetime.date.today(), datetime.datetime.now().strftime("%H:%M:%S")))
|
||||
|
||||
# Start a namespace for the language and style
|
||||
f.write('\nnamespace Language%s_%s {\n' % (style, lang))
|
||||
|
||||
# Wide and tall namespaces inherit from the others
|
||||
if style == 'Wide':
|
||||
f.write(' using namespace LanguageNarrow_%s;\n' % lang)
|
||||
f.write(' #if LCD_WIDTH >= 20 || HAS_DWIN_E3V2\n')
|
||||
elif style == 'Tall':
|
||||
f.write(' using namespace LanguageWide_%s;\n' % lang)
|
||||
f.write(' #if LCD_HEIGHT >= 4\n')
|
||||
elif lang != 'en':
|
||||
f.write(' using namespace Language_en; // Inherit undefined strings from English\n')
|
||||
|
||||
# Formatting for the lines
|
||||
indent = ' ' if style == 'Narrow' else ' '
|
||||
width = 34 if style == 'Narrow' else 32
|
||||
lstr_fmt = '%sLSTR %%-%ds = %%s;%%s\n' % (indent, width)
|
||||
|
||||
# Emit all the strings for this language and style
|
||||
for name in strings_per_lang[lang][style].keys():
|
||||
# Get the raw string value
|
||||
val = strings_per_lang[lang][style][name]
|
||||
# Count the number of bars
|
||||
if val.startswith('|'):
|
||||
bars = val.count('|')
|
||||
val = val[1:]
|
||||
else:
|
||||
bars = 0
|
||||
# Escape backslashes, substitute quotes, and wrap in _UxGT("...")
|
||||
val = '_UxGT("%s")' % val.replace('\\', '\\\\').replace('"', '$$$')
|
||||
# Move named references outside of the macro
|
||||
val = re.sub(r'\(([A-Z0-9]+_[A-Z0-9_]+)\)', r'") \1 _UxGT("', val)
|
||||
# Remove all empty _UxGT("") that result from the above
|
||||
val = re.sub(r'\s*_UxGT\(""\)\s*', '', val)
|
||||
# No wrapper needed for just spaces
|
||||
val = re.sub(r'_UxGT\((" +")\)', r'\1', val)
|
||||
# Multi-line strings start with a bar...
|
||||
if bars:
|
||||
# Wrap the string in MSG_#_LINE(...) and split on bars
|
||||
val = re.sub(r'^_UxGT\((.+)\)', r'_UxGT(MSG_%s_LINE(\1))' % bars, val)
|
||||
val = val.replace('|', '", "')
|
||||
# Restore quotes inside the string
|
||||
val = val.replace('$$$', '\\"')
|
||||
# Add a comment with the English string for reference
|
||||
comm = ''
|
||||
if lang != 'en' and 'en' in strings_per_lang:
|
||||
en = strings_per_lang['en']
|
||||
if name in en[style]: str = en[style][name]
|
||||
elif name in en['Narrow']: str = en['Narrow'][name]
|
||||
if str:
|
||||
cfmt = '%%%ss// %%s' % (50 - len(val) if len(val) < 50 else 1)
|
||||
comm = cfmt % (' ', str)
|
||||
|
||||
# Write out the string definition
|
||||
f.write(lstr_fmt % (name, val, comm))
|
||||
|
||||
if style == 'Wide' or style == 'Tall': f.write(' #endif\n')
|
||||
|
||||
f.write('}\n') # End namespace
|
||||
|
||||
# Assume the 'Tall' namespace comes last
|
||||
if style == 'Tall': f.write('\nnamespace Language_%s {\n using namespace LanguageTall_%s;\n}\n' % (lang, lang))
|
||||
|
||||
# Close the last-opened output file
|
||||
if f: f.close()
|
41
buildroot/share/scripts/languageUtil.py
Executable file
41
buildroot/share/scripts/languageUtil.py
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# marlang.py
|
||||
#
|
||||
|
||||
# A dictionary to contain language names
|
||||
LANGNAME = {
|
||||
'an': "Aragonese",
|
||||
'bg': "Bulgarian",
|
||||
'ca': "Catalan",
|
||||
'cz': "Czech",
|
||||
'da': "Danish",
|
||||
'de': "German",
|
||||
'el': "Greek", 'el_CY': "Greek (Cyprus)", 'el_gr': "Greek (Greece)",
|
||||
'en': "English",
|
||||
'es': "Spanish",
|
||||
'eu': "Basque-Euskera",
|
||||
'fi': "Finnish",
|
||||
'fr': "French", 'fr_na': "French (no accent)",
|
||||
'gl': "Galician",
|
||||
'hr': "Croatian (Hrvatski)",
|
||||
'hu': "Hungarian / Magyar",
|
||||
'it': "Italian",
|
||||
'jp_kana': "Japanese (Kana)",
|
||||
'ko_KR': "Korean",
|
||||
'nl': "Dutch",
|
||||
'pl': "Polish",
|
||||
'pt': "Portuguese", 'pt_br': "Portuguese (Brazil)",
|
||||
'ro': "Romanian",
|
||||
'ru': "Russian",
|
||||
'sk': "Slovak",
|
||||
'sv': "Swedish",
|
||||
'tr': "Turkish",
|
||||
'uk': "Ukrainian",
|
||||
'vi': "Vietnamese",
|
||||
'zh_CN': "Simplified Chinese", 'zh_TW': "Traditional Chinese"
|
||||
}
|
||||
|
||||
def namebyid(id):
|
||||
if id in LANGNAME: return LANGNAME[id]
|
||||
return '<unknown>'
|
@@ -10,6 +10,11 @@
|
||||
|
||||
const fs = require("fs");
|
||||
|
||||
var do_log = false
|
||||
function logmsg(msg, line='') {
|
||||
if (do_log) console.log(msg, line);
|
||||
}
|
||||
|
||||
// String lpad / rpad
|
||||
String.prototype.lpad = function(len, chr) {
|
||||
if (!len) return this;
|
||||
@@ -27,8 +32,17 @@ String.prototype.rpad = function(len, chr) {
|
||||
return s;
|
||||
};
|
||||
|
||||
// Concatenate a string, adding a space if necessary
|
||||
// to avoid merging two words
|
||||
String.prototype.concat_with_space = function(str) {
|
||||
const c = this.substr(-1), d = str.charAt(0);
|
||||
if (c !== ' ' && c !== '' && d !== ' ' && d !== '')
|
||||
str = ' ' + str;
|
||||
return this + str;
|
||||
};
|
||||
|
||||
const mpatt = [ '-?\\d{1,3}', 'P[A-I]\\d+', 'P\\d_\\d+', 'Pin[A-Z]\\d\\b' ],
|
||||
definePatt = new RegExp(`^\\s*(//)?#define\\s+[A-Z_][A-Z0-9_]+\\s+(${mpatt[0]}|${mpatt[1]}|${mpatt[2]}|${mpatt[3]})\\s*(//.*)?$`, 'gm'),
|
||||
definePatt = new RegExp(`^\\s*(//)?#define\\s+[A-Z_][A-Z0-9_]+\\s+(${mpatt.join('|')})\\s*(//.*)?$`, 'gm'),
|
||||
ppad = [ 3, 4, 5, 5 ],
|
||||
col_comment = 50,
|
||||
col_value_rj = col_comment - 3;
|
||||
@@ -38,11 +52,11 @@ for (let m of mpatt) mexpr.push(new RegExp('^' + m + '$'));
|
||||
|
||||
const argv = process.argv.slice(2), argc = argv.length;
|
||||
|
||||
var src_file = 0, src_name = 'STDIN', dst_file, do_log = false;
|
||||
var src_file = 0, dst_file;
|
||||
if (argc > 0) {
|
||||
let ind = 0;
|
||||
if (argv[0] == '-v') { do_log = true; ind++; }
|
||||
dst_file = src_file = src_name = argv[ind++];
|
||||
dst_file = src_file = argv[ind++];
|
||||
if (ind < argc) dst_file = argv[ind];
|
||||
}
|
||||
|
||||
@@ -56,6 +70,7 @@ else
|
||||
// Find the pin pattern so non-pin defines can be skipped
|
||||
function get_pin_pattern(txt) {
|
||||
var r, m = 0, match_count = [ 0, 0, 0, 0 ];
|
||||
var max_match_count = 0, max_match_index = -1;
|
||||
definePatt.lastIndex = 0;
|
||||
while ((r = definePatt.exec(txt)) !== null) {
|
||||
let ind = -1;
|
||||
@@ -65,12 +80,15 @@ function get_pin_pattern(txt) {
|
||||
return r[2].match(p);
|
||||
}) ) {
|
||||
const m = ++match_count[ind];
|
||||
if (m >= 5) {
|
||||
return { match: mpatt[ind], pad:ppad[ind] };
|
||||
if (m > max_match_count) {
|
||||
max_match_count = m;
|
||||
max_match_index = ind;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
if (max_match_index === -1) return null;
|
||||
|
||||
return { match:mpatt[max_match_index], pad:ppad[max_match_index] };
|
||||
}
|
||||
|
||||
function process_text(txt) {
|
||||
@@ -79,13 +97,14 @@ function process_text(txt) {
|
||||
if (!patt) return txt;
|
||||
const pindefPatt = new RegExp(`^(\\s*(//)?#define)\\s+([A-Z_][A-Z0-9_]+)\\s+(${patt.match})\\s*(//.*)?$`),
|
||||
noPinPatt = new RegExp(`^(\\s*(//)?#define)\\s+([A-Z_][A-Z0-9_]+)\\s+(-1)\\s*(//.*)?$`),
|
||||
skipPatt1 = new RegExp('^(\\s*(//)?#define)\\s+(AT90USB|USBCON|(BOARD|DAC|FLASH|HAS|IS|USE)_.+|.+_(ADDRESS|AVAILABLE|BAUDRATE|CLOCK|CONNECTION|DEFAULT|FREQ|ITEM|MODULE|NAME|ONLY|PERIOD|RANGE|RATE|SERIAL|SIZE|SPI|STATE|STEP|TIMER))\\s+(.+)\\s*(//.*)?$'),
|
||||
skipPatt1 = new RegExp('^(\\s*(//)?#define)\\s+(AT90USB|USBCON|(BOARD|DAC|FLASH|HAS|IS|USE)_.+|.+_(ADDRESS|AVAILABLE|BAUDRATE|CLOCK|CONNECTION|DEFAULT|ERROR|EXTRUDERS|FREQ|ITEM|MKS_BASE_VERSION|MODULE|NAME|ONLY|ORIENTATION|PERIOD|RANGE|RATE|READ_RETRIES|SERIAL|SIZE|SPI|STATE|STEP|TIMER|VERSION))\\s+(.+)\\s*(//.*)?$'),
|
||||
skipPatt2 = new RegExp('^(\\s*(//)?#define)\\s+([A-Z_][A-Z0-9_]+)\\s+(0x[0-9A-Fa-f]+|\d+|.+[a-z].+)\\s*(//.*)?$'),
|
||||
skipPatt3 = /^\s*#e(lse|ndif)\b.*$/,
|
||||
aliasPatt = new RegExp('^(\\s*(//)?#define)\\s+([A-Z_][A-Z0-9_]+)\\s+([A-Z_][A-Z0-9_()]+)\\s*(//.*)?$'),
|
||||
switchPatt = new RegExp('^(\\s*(//)?#define)\\s+([A-Z_][A-Z0-9_]+)\\s*(//.*)?$'),
|
||||
undefPatt = new RegExp('^(\\s*(//)?#undef)\\s+([A-Z_][A-Z0-9_]+)\\s*(//.*)?$'),
|
||||
defPatt = new RegExp('^(\\s*(//)?#define)\\s+([A-Z_][A-Z0-9_]+)\\s+([-_\\w]+)\\s*(//.*)?$'),
|
||||
condPatt = new RegExp('^(\\s*(//)?#(if|ifn?def|else|elif)(\\s+\\S+)*)\\s+(//.*)$'),
|
||||
condPatt = new RegExp('^(\\s*(//)?#(if|ifn?def|elif)(\\s+\\S+)*)\\s+(//.*)$'),
|
||||
commPatt = new RegExp('^\\s{20,}(//.*)?$');
|
||||
const col_value_lj = col_comment - patt.pad - 2;
|
||||
var r, out = '', check_comment_next = false;
|
||||
@@ -101,74 +120,75 @@ function process_text(txt) {
|
||||
//
|
||||
// #define SKIP_ME
|
||||
//
|
||||
if (do_log) console.log("skip:", line);
|
||||
logmsg("skip:", line);
|
||||
}
|
||||
else if ((r = pindefPatt.exec(line)) !== null) {
|
||||
//
|
||||
// #define MY_PIN [pin]
|
||||
//
|
||||
if (do_log) console.log("pin:", line);
|
||||
logmsg("pin:", line);
|
||||
const pinnum = r[4].charAt(0) == 'P' ? r[4] : r[4].lpad(patt.pad);
|
||||
line = r[1] + ' ' + r[3];
|
||||
line = line.rpad(col_value_lj) + pinnum;
|
||||
if (r[5]) line = line.rpad(col_comment) + r[5];
|
||||
line = line.rpad(col_value_lj).concat_with_space(pinnum);
|
||||
if (r[5]) line = line.rpad(col_comment).concat_with_space(r[5]);
|
||||
}
|
||||
else if ((r = noPinPatt.exec(line)) !== null) {
|
||||
//
|
||||
// #define MY_PIN -1
|
||||
//
|
||||
if (do_log) console.log("pin -1:", line);
|
||||
logmsg("pin -1:", line);
|
||||
line = r[1] + ' ' + r[3];
|
||||
line = line.rpad(col_value_lj) + '-1';
|
||||
if (r[5]) line = line.rpad(col_comment) + r[5];
|
||||
line = line.rpad(col_value_lj).concat_with_space('-1');
|
||||
if (r[5]) line = line.rpad(col_comment).concat_with_space(r[5]);
|
||||
}
|
||||
else if (skipPatt2.exec(line) !== null) {
|
||||
else if (skipPatt2.exec(line) !== null || skipPatt3.exec(line) !== null) {
|
||||
//
|
||||
// #define SKIP_ME
|
||||
// #else, #endif
|
||||
//
|
||||
if (do_log) console.log("skip:", line);
|
||||
logmsg("skip:", line);
|
||||
}
|
||||
else if ((r = aliasPatt.exec(line)) !== null) {
|
||||
//
|
||||
// #define ALIAS OTHER
|
||||
//
|
||||
if (do_log) console.log("alias:", line);
|
||||
logmsg("alias:", line);
|
||||
line = r[1] + ' ' + r[3];
|
||||
line += r[4].lpad(col_value_rj + 1 - line.length);
|
||||
if (r[5]) line = line.rpad(col_comment) + r[5];
|
||||
line = line.concat_with_space(r[4].lpad(col_value_rj + 1 - line.length));
|
||||
if (r[5]) line = line.rpad(col_comment).concat_with_space(r[5]);
|
||||
}
|
||||
else if ((r = switchPatt.exec(line)) !== null) {
|
||||
//
|
||||
// #define SWITCH
|
||||
//
|
||||
if (do_log) console.log("switch:", line);
|
||||
logmsg("switch:", line);
|
||||
line = r[1] + ' ' + r[3];
|
||||
if (r[4]) line = line.rpad(col_comment) + r[4];
|
||||
if (r[4]) line = line.rpad(col_comment).concat_with_space(r[4]);
|
||||
check_comment_next = true;
|
||||
}
|
||||
else if ((r = defPatt.exec(line)) !== null) {
|
||||
//
|
||||
// #define ...
|
||||
//
|
||||
if (do_log) console.log("def:", line);
|
||||
logmsg("def:", line);
|
||||
line = r[1] + ' ' + r[3] + ' ';
|
||||
line += r[4].lpad(col_value_rj + 1 - line.length);
|
||||
line = line.concat_with_space(r[4].lpad(col_value_rj + 1 - line.length));
|
||||
if (r[5]) line = line.rpad(col_comment - 1) + ' ' + r[5];
|
||||
}
|
||||
else if ((r = undefPatt.exec(line)) !== null) {
|
||||
//
|
||||
// #undef ...
|
||||
//
|
||||
if (do_log) console.log("undef:", line);
|
||||
logmsg("undef:", line);
|
||||
line = r[1] + ' ' + r[3];
|
||||
if (r[4]) line = line.rpad(col_comment) + r[4];
|
||||
if (r[4]) line = line.rpad(col_comment).concat_with_space(r[4]);
|
||||
}
|
||||
else if ((r = condPatt.exec(line)) !== null) {
|
||||
//
|
||||
// #if ...
|
||||
// #if, #ifdef, #ifndef, #elif ...
|
||||
//
|
||||
if (do_log) console.log("cond:", line);
|
||||
line = r[1].rpad(col_comment) + r[5];
|
||||
logmsg("cond:", line);
|
||||
line = r[1].rpad(col_comment).concat_with_space(r[5]);
|
||||
check_comment_next = true;
|
||||
}
|
||||
out += line + '\n';
|
||||
|
272
buildroot/share/scripts/pinsformat.py
Executable file
272
buildroot/share/scripts/pinsformat.py
Executable file
@@ -0,0 +1,272 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
#
|
||||
# Formatter script for pins_MYPINS.h files
|
||||
#
|
||||
# Usage: pinsformat.py [infile] [outfile]
|
||||
#
|
||||
# With no parameters convert STDIN to STDOUT
|
||||
#
|
||||
|
||||
import sys, re
|
||||
|
||||
do_log = False
|
||||
def logmsg(msg, line):
|
||||
if do_log: print(msg, line)
|
||||
|
||||
col_comment = 50
|
||||
|
||||
# String lpad / rpad
|
||||
def lpad(astr, fill, c=' '):
|
||||
if not fill: return astr
|
||||
need = fill - len(astr)
|
||||
return astr if need <= 0 else (need * c) + astr
|
||||
|
||||
def rpad(astr, fill, c=' '):
|
||||
if not fill: return astr
|
||||
need = fill - len(astr)
|
||||
return astr if need <= 0 else astr + (need * c)
|
||||
|
||||
# Pin patterns
|
||||
mpatt = [ r'-?\d{1,3}', r'P[A-I]\d+', r'P\d_\d+', r'Pin[A-Z]\d\b' ]
|
||||
mstr = '|'.join(mpatt)
|
||||
mexpr = [ re.compile(f'^{m}$') for m in mpatt ]
|
||||
|
||||
# Corrsponding padding for each pattern
|
||||
ppad = [ 3, 4, 5, 5 ]
|
||||
|
||||
# Match a define line
|
||||
definePatt = re.compile(rf'^\s*(//)?#define\s+[A-Z_][A-Z0-9_]+\s+({mstr})\s*(//.*)?$')
|
||||
|
||||
def format_pins(argv):
|
||||
src_file = 'stdin'
|
||||
dst_file = None
|
||||
|
||||
scnt = 0
|
||||
for arg in argv:
|
||||
if arg == '-v':
|
||||
do_log = True
|
||||
elif scnt == 0:
|
||||
# Get a source file if specified. Default destination is the same file
|
||||
src_file = dst_file = arg
|
||||
scnt += 1
|
||||
elif scnt == 1:
|
||||
# Get destination file if specified
|
||||
dst_file = arg
|
||||
scnt += 1
|
||||
|
||||
# No text to process yet
|
||||
file_text = ''
|
||||
|
||||
if src_file == 'stdin':
|
||||
# If no source file specified read from STDIN
|
||||
file_text = sys.stdin.read()
|
||||
else:
|
||||
# Open and read the file src_file
|
||||
with open(src_file, 'r') as rf: file_text = rf.read()
|
||||
|
||||
if len(file_text) == 0:
|
||||
print('No text to process')
|
||||
return
|
||||
|
||||
# Read from file or STDIN until it terminates
|
||||
filtered = process_text(file_text)
|
||||
if dst_file:
|
||||
with open(dst_file, 'w') as wf: wf.write(filtered)
|
||||
else:
|
||||
print(filtered)
|
||||
|
||||
# Find the pin pattern so non-pin defines can be skipped
|
||||
def get_pin_pattern(txt):
|
||||
r = ''
|
||||
m = 0
|
||||
match_count = [ 0, 0, 0, 0 ]
|
||||
|
||||
# Find the most common matching pattern
|
||||
match_threshold = 5
|
||||
for line in txt.split('\n'):
|
||||
r = definePatt.match(line)
|
||||
if r == None: continue
|
||||
ind = -1
|
||||
for p in mexpr:
|
||||
ind += 1
|
||||
if not p.match(r[2]): continue
|
||||
match_count[ind] += 1
|
||||
if match_count[ind] >= match_threshold:
|
||||
return { 'match': mpatt[ind], 'pad':ppad[ind] }
|
||||
return None
|
||||
|
||||
def process_text(txt):
|
||||
if len(txt) == 0: return '(no text)'
|
||||
patt = get_pin_pattern(txt)
|
||||
if patt == None: return txt
|
||||
|
||||
pmatch = patt['match']
|
||||
pindefPatt = re.compile(rf'^(\s*(//)?#define)\s+([A-Z_][A-Z0-9_]+)\s+({pmatch})\s*(//.*)?$')
|
||||
noPinPatt = re.compile(r'^(\s*(//)?#define)\s+([A-Z_][A-Z0-9_]+)\s+(-1)\s*(//.*)?$')
|
||||
skipPatt1 = re.compile(r'^(\s*(//)?#define)\s+(AT90USB|USBCON|(BOARD|DAC|FLASH|HAS|IS|USE)_.+|.+_(ADDRESS|AVAILABLE|BAUDRATE|CLOCK|CONNECTION|DEFAULT|ERROR|EXTRUDERS|FREQ|ITEM|MKS_BASE_VERSION|MODULE|NAME|ONLY|ORIENTATION|PERIOD|RANGE|RATE|READ_RETRIES|SERIAL|SIZE|SPI|STATE|STEP|TIMER|VERSION))\s+(.+)\s*(//.*)?$')
|
||||
skipPatt2 = re.compile(r'^(\s*(//)?#define)\s+([A-Z_][A-Z0-9_]+)\s+(0x[0-9A-Fa-f]+|\d+|.+[a-z].+)\s*(//.*)?$')
|
||||
skipPatt3 = re.compile(r'^\s*#e(lse|ndif)\b.*$')
|
||||
aliasPatt = re.compile(r'^(\s*(//)?#define)\s+([A-Z_][A-Z0-9_]+)\s+([A-Z_][A-Z0-9_()]+)\s*(//.*)?$')
|
||||
switchPatt = re.compile(r'^(\s*(//)?#define)\s+([A-Z_][A-Z0-9_]+)\s*(//.*)?$')
|
||||
undefPatt = re.compile(r'^(\s*(//)?#undef)\s+([A-Z_][A-Z0-9_]+)\s*(//.*)?$')
|
||||
defPatt = re.compile(r'^(\s*(//)?#define)\s+([A-Z_][A-Z0-9_]+)\s+([-_\w]+)\s*(//.*)?$')
|
||||
condPatt = re.compile(r'^(\s*(//)?#(if|ifn?def|elif)(\s+\S+)*)\s+(//.*)$')
|
||||
commPatt = re.compile(r'^\s{20,}(//.*)?$')
|
||||
|
||||
col_value_lj = col_comment - patt['pad'] - 2
|
||||
col_value_rj = col_comment - 3
|
||||
|
||||
#
|
||||
# #define SKIP_ME
|
||||
#
|
||||
def trySkip1(d):
|
||||
if skipPatt1.match(d['line']) == None: return False
|
||||
logmsg("skip:", d['line'])
|
||||
return True
|
||||
|
||||
#
|
||||
# #define MY_PIN [pin]
|
||||
#
|
||||
def tryPindef(d):
|
||||
line = d['line']
|
||||
r = pindefPatt.match(line)
|
||||
if r == None: return False
|
||||
logmsg("pin:", line)
|
||||
pinnum = r[4] if r[4][0] == 'P' else lpad(r[4], patt['pad'])
|
||||
line = f'{r[1]} {r[3]}'
|
||||
line = rpad(line, col_value_lj) + pinnum
|
||||
if r[5]: line = rpad(line, col_comment) + r[5]
|
||||
d['line'] = line
|
||||
return True
|
||||
|
||||
#
|
||||
# #define MY_PIN -1
|
||||
#
|
||||
def tryNoPin(d):
|
||||
line = d['line']
|
||||
r = noPinPatt.match(line)
|
||||
if r == None: return False
|
||||
logmsg("pin -1:", line)
|
||||
line = f'{r[1]} {r[3]}'
|
||||
line = rpad(line, col_value_lj) + '-1'
|
||||
if r[5]: line = rpad(line, col_comment) + r[5]
|
||||
d['line'] = line
|
||||
return True
|
||||
|
||||
#
|
||||
# #define SKIP_ME_TOO
|
||||
#
|
||||
def trySkip2(d):
|
||||
if skipPatt2.match( d['line']) == None: return False
|
||||
logmsg("skip:", d['line'])
|
||||
return True
|
||||
|
||||
#
|
||||
# #else|endif
|
||||
#
|
||||
def trySkip3(d):
|
||||
if skipPatt3.match( d['line']) == None: return False
|
||||
logmsg("skip:", d['line'])
|
||||
return True
|
||||
|
||||
#
|
||||
# #define ALIAS OTHER
|
||||
#
|
||||
def tryAlias(d):
|
||||
line = d['line']
|
||||
r = aliasPatt.match(line)
|
||||
if r == None: return False
|
||||
logmsg("alias:", line)
|
||||
line = f'{r[1]} {r[3]}'
|
||||
line += lpad(r[4], col_value_rj + 1 - len(line))
|
||||
if r[5]: line = rpad(line, col_comment) + r[5]
|
||||
d['line'] = line
|
||||
return True
|
||||
|
||||
#
|
||||
# #define SWITCH
|
||||
#
|
||||
def trySwitch(d):
|
||||
line = d['line']
|
||||
r = switchPatt.match(line)
|
||||
if r == None: return False
|
||||
logmsg("switch:", line)
|
||||
line = f'{r[1]} {r[3]}'
|
||||
if r[4]: line = rpad(line, col_comment) + r[4]
|
||||
d['line'] = line
|
||||
d['check_comment_next'] = True
|
||||
return True
|
||||
|
||||
#
|
||||
# #define ...
|
||||
#
|
||||
def tryDef(d):
|
||||
line = d['line']
|
||||
r = defPatt.match(line)
|
||||
if r == None: return False
|
||||
logmsg("def:", line)
|
||||
line = f'{r[1]} {r[3]} '
|
||||
line += lpad(r[4], col_value_rj + 1 - len(line))
|
||||
if r[5]: line = rpad(line, col_comment - 1) + ' ' + r[5]
|
||||
d['line'] = line
|
||||
return True
|
||||
|
||||
#
|
||||
# #undef ...
|
||||
#
|
||||
def tryUndef(d):
|
||||
line = d['line']
|
||||
r = undefPatt.match(line)
|
||||
if r == None: return False
|
||||
logmsg("undef:", line)
|
||||
line = f'{r[1]} {r[3]}'
|
||||
if r[4]: line = rpad(line, col_comment) + r[4]
|
||||
d['line'] = line
|
||||
return True
|
||||
|
||||
#
|
||||
# #if|ifdef|ifndef|elif ...
|
||||
#
|
||||
def tryCond(d):
|
||||
line = d['line']
|
||||
r = condPatt.match(line)
|
||||
if r == None: return False
|
||||
logmsg("cond:", line)
|
||||
line = rpad(r[1], col_comment) + r[5]
|
||||
d['line'] = line
|
||||
d['check_comment_next'] = True
|
||||
return True
|
||||
|
||||
out = ''
|
||||
wDict = { 'check_comment_next': False }
|
||||
|
||||
# Transform each line and add it to the output
|
||||
for line in txt.split('\n'):
|
||||
wDict['line'] = line
|
||||
if wDict['check_comment_next']:
|
||||
r = commPatt.match(line)
|
||||
wDict['check_comment_next'] = (r != None)
|
||||
|
||||
if wDict['check_comment_next']:
|
||||
# Comments in column 50
|
||||
line = rpad('', col_comment) + r[1]
|
||||
|
||||
elif trySkip1(wDict): pass #define SKIP_ME
|
||||
elif tryPindef(wDict): pass #define MY_PIN [pin]
|
||||
elif tryNoPin(wDict): pass #define MY_PIN -1
|
||||
elif trySkip2(wDict): pass #define SKIP_ME_TOO
|
||||
elif trySkip3(wDict): pass #else|endif
|
||||
elif tryAlias(wDict): pass #define ALIAS OTHER
|
||||
elif trySwitch(wDict): pass #define SWITCH
|
||||
elif tryDef(wDict): pass #define ...
|
||||
elif tryUndef(wDict): pass #undef ...
|
||||
elif tryCond(wDict): pass #if|ifdef|ifndef|elif ...
|
||||
|
||||
out += wDict['line'] + '\n'
|
||||
|
||||
return re.sub('\n\n$', '\n', re.sub(r'\n\n+', '\n\n', out))
|
||||
|
||||
# Python standard startup for command line with arguments
|
||||
if __name__ == '__main__':
|
||||
format_pins(sys.argv[1:])
|
142
buildroot/share/scripts/rle16_compress_cpp_image_data.py
Executable file
142
buildroot/share/scripts/rle16_compress_cpp_image_data.py
Executable file
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Utility to compress Marlin RGB565 TFT data to RLE16 format.
|
||||
# Reads the existing Marlin RGB565 cpp file and generates a new file with the additional RLE16 data.
|
||||
#
|
||||
# Usage: rle16_compress_cpp_image_data.py INPUT_FILE.cpp OUTPUT_FILE.cpp
|
||||
#
|
||||
import sys,struct
|
||||
import re
|
||||
|
||||
def addCompressedData(input_file, output_file):
|
||||
ofile = open(output_file, 'wt')
|
||||
|
||||
c_data_section = False
|
||||
c_skip_data = False
|
||||
c_footer = False
|
||||
raw_data = []
|
||||
rle_value = []
|
||||
rle_count = []
|
||||
arrname = ''
|
||||
|
||||
line = input_file.readline()
|
||||
while line:
|
||||
if not c_footer:
|
||||
if not c_skip_data: ofile.write(line)
|
||||
|
||||
if "};" in line:
|
||||
c_skip_data = False
|
||||
c_data_section = False
|
||||
c_footer = True
|
||||
|
||||
if c_data_section:
|
||||
cleaned = re.sub(r"\s|,|\n", "", line)
|
||||
as_list = cleaned.split("0x")
|
||||
as_list.pop(0)
|
||||
raw_data += [int(x, 16) for x in as_list]
|
||||
|
||||
if "const uint" in line:
|
||||
# e.g.: const uint16_t marlin_logo_480x320x16[153600] = {
|
||||
if "_rle16" in line:
|
||||
c_skip_data = True
|
||||
else:
|
||||
c_data_section = True
|
||||
arrname = line.split('[')[0].split(' ')[-1]
|
||||
print("Found data array", arrname)
|
||||
|
||||
line = input_file.readline()
|
||||
|
||||
input_file.close()
|
||||
|
||||
#
|
||||
# RLE16 (run length 16) encoding
|
||||
# Convert data from from raw RGB565 to a simple run-length-encoded format for each word of data.
|
||||
# - Each sequence begins with a count byte N.
|
||||
# - If the high bit is set in N the run contains N & 0x7F + 1 unique words.
|
||||
# - Otherwise it repeats the following word N + 1 times.
|
||||
# - Each RGB565 word is stored in MSB / LSB order.
|
||||
#
|
||||
def rle_encode(data):
|
||||
warn = "This may take a while" if len(data) > 300000 else ""
|
||||
print("Compressing image data...", warn)
|
||||
rledata = []
|
||||
distinct = []
|
||||
i = 0
|
||||
while i < len(data):
|
||||
v = data[i]
|
||||
i += 1
|
||||
rsize = 1
|
||||
for j in range(i, len(data)):
|
||||
if v != data[j]: break
|
||||
i += 1
|
||||
rsize += 1
|
||||
if rsize >= 128: break
|
||||
|
||||
# If the run is one, add to the distinct values
|
||||
if rsize == 1: distinct.append(v)
|
||||
|
||||
# If distinct length >= 127, or the repeat run is 2 or more,
|
||||
# store the distinct run.
|
||||
nr = len(distinct)
|
||||
if nr and (nr >= 128 or rsize > 1 or i >= len(data)):
|
||||
rledata += [(nr - 1) | 0x80] + distinct
|
||||
distinct = []
|
||||
|
||||
# If the repeat run is 2 or more, store the repeat run.
|
||||
if rsize > 1: rledata += [rsize - 1, v]
|
||||
|
||||
return rledata
|
||||
|
||||
def append_byte(data, byte, cols=240):
|
||||
if data == '': data = ' '
|
||||
data += ('0x{0:02X}, '.format(byte)) # 6 characters
|
||||
if len(data) % (cols * 6 + 2) == 0: data = data.rstrip() + "\n "
|
||||
return data
|
||||
|
||||
def rle_emit(ofile, arrname, rledata, rawsize):
|
||||
col = 0
|
||||
i = 0
|
||||
outstr = ''
|
||||
size = 0
|
||||
while i < len(rledata):
|
||||
rval = rledata[i]
|
||||
i += 1
|
||||
if rval & 0x80:
|
||||
count = (rval & 0x7F) + 1
|
||||
outstr = append_byte(outstr, rval)
|
||||
size += 1
|
||||
for j in range(count):
|
||||
outstr = append_byte(outstr, rledata[i + j] >> 8)
|
||||
outstr = append_byte(outstr, rledata[i + j] & 0xFF)
|
||||
size += 2
|
||||
i += count
|
||||
else:
|
||||
outstr = append_byte(outstr, rval)
|
||||
outstr = append_byte(outstr, rledata[i] >> 8)
|
||||
outstr = append_byte(outstr, rledata[i] & 0xFF)
|
||||
i += 1
|
||||
size += 3
|
||||
|
||||
outstr = outstr.rstrip()[:-1]
|
||||
ofile.write("\n// Saves %i bytes\nconst uint8_t %s_rle16[%d] = {\n%s\n};\n" % (rawsize - size, arrname, size, outstr))
|
||||
|
||||
(w, h, d) = arrname.split("_")[-1].split('x')
|
||||
ofile.write("\nconst tImage MarlinLogo{0}x{1}x16 = MARLIN_LOGO_CHOSEN({0}, {1});\n".format(w, h))
|
||||
ofile.write("\n#endif // HAS_GRAPHICAL_TFT && SHOW_BOOTSCREEN\n".format(w, h))
|
||||
|
||||
# Encode the data, write it out, close the file
|
||||
rledata = rle_encode(raw_data)
|
||||
rle_emit(ofile, arrname, rledata, len(raw_data) * 2)
|
||||
ofile.close()
|
||||
|
||||
if len(sys.argv) <= 2:
|
||||
print("Utility to compress Marlin RGB565 TFT data to RLE16 format.")
|
||||
print("Reads a Marlin RGB565 cpp file and generates a new file with the additional RLE16 data.")
|
||||
print("Usage: rle16_compress_cpp_image_data.py INPUT_FILE.cpp OUTPUT_FILE.cpp")
|
||||
exit(1)
|
||||
|
||||
output_cpp = sys.argv[2]
|
||||
inname = sys.argv[1].replace('//', '/')
|
||||
input_cpp = open(inname)
|
||||
print("Processing", inname, "...")
|
||||
addCompressedData(input_cpp, output_cpp)
|
200
buildroot/share/scripts/rle_compress_bitmap.py
Executable file
200
buildroot/share/scripts/rle_compress_bitmap.py
Executable file
@@ -0,0 +1,200 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Bitwise RLE compress a Marlin mono DOGM bitmap.
|
||||
# Input: An existing Marlin Marlin mono DOGM bitmap .cpp or .h file.
|
||||
# Output: A new file with the original and compressed data.
|
||||
#
|
||||
# Usage: rle_compress_bitmap.py INPUT_FILE OUTPUT_FILE
|
||||
#
|
||||
import sys,struct
|
||||
import re
|
||||
|
||||
def addCompressedData(input_file, output_file):
|
||||
ofile = open(output_file, 'wt')
|
||||
|
||||
datatype = "uint8_t"
|
||||
bytewidth = 16
|
||||
raw_data = []
|
||||
arrname = ''
|
||||
|
||||
c_data_section = False ; c_skip_data = False ; c_footer = False
|
||||
while True:
|
||||
line = input_file.readline()
|
||||
if not line: break
|
||||
|
||||
if not c_footer:
|
||||
if not c_skip_data: ofile.write(line)
|
||||
|
||||
mat = re.match(r'.+CUSTOM_BOOTSCREEN_BMPWIDTH\s+(\d+)', line)
|
||||
if mat: bytewidth = (int(mat[1]) + 7) // 8
|
||||
|
||||
if "};" in line:
|
||||
c_skip_data = False
|
||||
c_data_section = False
|
||||
c_footer = True
|
||||
|
||||
if c_data_section:
|
||||
cleaned = re.sub(r"\s|,|\n", "", line)
|
||||
mat = re.match(r'(0b|B)[01]{8}', cleaned)
|
||||
if mat:
|
||||
as_list = cleaned.split(mat[1])
|
||||
as_list.pop(0)
|
||||
raw_data += [int(x, 2) for x in as_list]
|
||||
else:
|
||||
as_list = cleaned.split("0x")
|
||||
as_list.pop(0)
|
||||
raw_data += [int(x, 16) for x in as_list]
|
||||
|
||||
mat = re.match(r'const (uint\d+_t|unsigned char)', line)
|
||||
if mat:
|
||||
# e.g.: const unsigned char custom_start_bmp[] PROGMEM = {
|
||||
datatype = mat[0]
|
||||
if "_rle" in line:
|
||||
c_skip_data = True
|
||||
else:
|
||||
c_data_section = True
|
||||
arrname = line.split('[')[0].split(' ')[-1]
|
||||
print("Found data array", arrname)
|
||||
|
||||
input_file.close()
|
||||
|
||||
#print("\nRaw Bitmap Data", raw_data)
|
||||
|
||||
#
|
||||
# Bitwise RLE (run length) encoding
|
||||
# Convert data from raw mono bitmap to a bitwise run-length-encoded format.
|
||||
# - The first nybble is the starting bit state. Changing this nybble inverts the bitmap.
|
||||
# - The following bytes provide the runs for alternating on/off bits.
|
||||
# - A value of 0-14 encodes a run of 1-15.
|
||||
# - A value of 16 indicates a run of 16-270 calculated using the next two bytes.
|
||||
#
|
||||
def bitwise_rle_encode(data):
|
||||
|
||||
def get_bit(data, n): return 1 if (data[n // 8] & (0x80 >> (n & 7))) else 0
|
||||
|
||||
def try_encode(data, isext):
|
||||
bitslen = len(data) * 8
|
||||
bitstate = get_bit(data, 0)
|
||||
rledata = [ bitstate ]
|
||||
bigrun = 256 if isext else 272
|
||||
medrun = False
|
||||
|
||||
i = 0
|
||||
runlen = -1
|
||||
while i <= bitslen:
|
||||
if i < bitslen: b = get_bit(data, i)
|
||||
runlen += 1
|
||||
if bitstate != b or i == bitslen:
|
||||
if runlen >= bigrun:
|
||||
isext = True
|
||||
if medrun: return [], isext
|
||||
rem = runlen & 0xFF
|
||||
rledata += [ 15, 15, rem // 16, rem % 16 ]
|
||||
elif runlen >= 16:
|
||||
rledata += [ 15, runlen // 16 - 1, runlen % 16 ]
|
||||
if runlen >= 256: medrun = True
|
||||
else:
|
||||
rledata += [ runlen - 1 ]
|
||||
bitstate ^= 1
|
||||
runlen = 0
|
||||
i += 1
|
||||
|
||||
#print("\nrledata", rledata)
|
||||
|
||||
encoded = []
|
||||
ri = 0
|
||||
rlen = len(rledata)
|
||||
while ri < rlen:
|
||||
v = rledata[ri] << 4
|
||||
if (ri < rlen - 1): v |= rledata[ri + 1]
|
||||
encoded += [ v ]
|
||||
ri += 2
|
||||
|
||||
#print("\nencoded", encoded)
|
||||
return encoded, isext
|
||||
|
||||
# Try to encode with the original isext flag
|
||||
warn = "This may take a while" if len(data) > 300000 else ""
|
||||
print("Compressing image data...", warn)
|
||||
isext = False
|
||||
encoded, isext = try_encode(data, isext)
|
||||
if len(encoded) == 0:
|
||||
encoded, isext = try_encode(data, True)
|
||||
return encoded, isext
|
||||
|
||||
def bitwise_rle_decode(isext, rledata, invert=0):
|
||||
expanded = []
|
||||
for n in rledata: expanded += [ n >> 4, n & 0xF ]
|
||||
|
||||
decoded = []
|
||||
bitstate = 0 ; workbyte = 0 ; outindex = 0
|
||||
i = 0
|
||||
while i < len(expanded):
|
||||
c = expanded[i]
|
||||
i += 1
|
||||
|
||||
if i == 1: bitstate = c ; continue
|
||||
|
||||
if c == 15:
|
||||
d = expanded[i] ; e = expanded[i + 1]
|
||||
if isext and d == 15:
|
||||
c = 256 + 16 * e + expanded[i + 2] - 1
|
||||
i += 1
|
||||
else:
|
||||
c = 16 * d + e + 15
|
||||
i += 2
|
||||
|
||||
for _ in range(c, -1, -1):
|
||||
bitval = 0x80 >> (outindex & 7)
|
||||
if bitstate: workbyte |= bitval
|
||||
if bitval == 1:
|
||||
decoded += [ workbyte ]
|
||||
workbyte = 0
|
||||
outindex += 1
|
||||
|
||||
bitstate ^= 1
|
||||
|
||||
print("\nDecoded RLE data:")
|
||||
pretty = [ '{0:08b}'.format(v) for v in decoded ]
|
||||
rows = [pretty[i:i+bytewidth] for i in range(0, len(pretty), bytewidth)]
|
||||
for row in rows: print(f"{''.join(row)}")
|
||||
|
||||
return decoded
|
||||
|
||||
def rle_emit(ofile, arrname, rledata, rawsize, isext):
|
||||
|
||||
outstr = ''
|
||||
rows = [ rledata[i:i+16] for i in range(0, len(rledata), 16) ]
|
||||
for i in range(0, len(rows)):
|
||||
rows[i] = [ '0x{0:02X}'.format(v) for v in rows[i] ]
|
||||
outstr += f" {', '.join(rows[i])},\n"
|
||||
|
||||
outstr = outstr[:-2]
|
||||
size = len(rledata)
|
||||
defname = 'COMPACT_CUSTOM_BOOTSCREEN_EXT' if isext else 'COMPACT_CUSTOM_BOOTSCREEN'
|
||||
ofile.write(f"\n// Saves {rawsize - size} bytes\n#define {defname}\n{datatype} {arrname}_rle[{size}] PROGMEM = {{\n{outstr}\n}};\n")
|
||||
|
||||
# Encode the data, write it out, close the file
|
||||
rledata, isext = bitwise_rle_encode(raw_data)
|
||||
rle_emit(ofile, arrname, rledata, len(raw_data), isext)
|
||||
ofile.close()
|
||||
|
||||
# Validate that code properly compressed (and decompressed) the data
|
||||
checkdata = bitwise_rle_decode(isext, rledata)
|
||||
for i in range(0, len(checkdata)):
|
||||
if raw_data[i] != checkdata[i]:
|
||||
print(f'Data mismatch at byte offset {i} (should be {raw_data[i]} but got {checkdata[i]})')
|
||||
break
|
||||
|
||||
if len(sys.argv) <= 2:
|
||||
print('Usage: rle_compress_bitmap.py INPUT_FILE OUTPUT_FILE')
|
||||
exit(1)
|
||||
|
||||
output_cpp = sys.argv[2]
|
||||
inname = sys.argv[1].replace('//', '/')
|
||||
try:
|
||||
input_cpp = open(inname)
|
||||
print("Processing", inname, "...")
|
||||
addCompressedData(input_cpp, output_cpp)
|
||||
except OSError:
|
||||
print("Can't find input file", inname)
|
@@ -7,17 +7,6 @@ import serial
|
||||
|
||||
Import("env")
|
||||
|
||||
# Needed (only) for compression, but there are problems with pip install heatshrink
|
||||
#try:
|
||||
# import heatshrink
|
||||
#except ImportError:
|
||||
# # Install heatshrink
|
||||
# print("Installing 'heatshrink' python module...")
|
||||
# env.Execute(env.subst("$PYTHONEXE -m pip install heatshrink"))
|
||||
#
|
||||
# Not tested: If it's safe to install python libraries in PIO python try:
|
||||
# env.Execute(env.subst("$PYTHONEXE -m pip install https://github.com/p3p/pyheatshrink/releases/download/0.3.3/pyheatshrink-pip.zip"))
|
||||
|
||||
import MarlinBinaryProtocol
|
||||
|
||||
#-----------------#
|
||||
@@ -168,7 +157,8 @@ def Upload(source, target, env):
|
||||
marlin_string_config_h_author = _GetMarlinEnv(MarlinEnv, 'STRING_CONFIG_H_AUTHOR')
|
||||
|
||||
# Get firmware upload params
|
||||
upload_firmware_source_name = str(source[0]) # Source firmware filename
|
||||
upload_firmware_source_name = env['PROGNAME'] + '.bin' if 'PROGNAME' in env else str(source[0])
|
||||
# Source firmware filename
|
||||
upload_speed = env['UPLOAD_SPEED'] if 'UPLOAD_SPEED' in env else 115200
|
||||
# baud rate of serial connection
|
||||
upload_port = _GetUploadPort(env) # Serial port to use
|
||||
@@ -191,6 +181,21 @@ def Upload(source, target, env):
|
||||
# "upload_random_name": generate a random 8.3 firmware filename to upload
|
||||
upload_random_filename = upload_delete_old_bins and not marlin_long_filename_host_support
|
||||
|
||||
# Heatshrink module is needed (only) for compression
|
||||
if upload_compression:
|
||||
if sys.version_info[0] > 2:
|
||||
try:
|
||||
import heatshrink2
|
||||
except ImportError:
|
||||
print("Installing 'heatshrink2' python module...")
|
||||
env.Execute(env.subst("$PYTHONEXE -m pip install heatshrink2"))
|
||||
else:
|
||||
try:
|
||||
import heatshrink
|
||||
except ImportError:
|
||||
print("Installing 'heatshrink' python module...")
|
||||
env.Execute(env.subst("$PYTHONEXE -m pip install heatshrink"))
|
||||
|
||||
try:
|
||||
|
||||
# Start upload job
|
||||
|
Reference in New Issue
Block a user