ZTWHHH commited on
Commit
3ddea28
·
verified ·
1 Parent(s): e3c673c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. deepseekvl2/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 +3 -0
  3. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/CFF2ToCFF.py +203 -0
  4. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/CFFToCFF2.py +305 -0
  5. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__init__.py +0 -0
  6. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/CFF2ToCFF.cpython-310.pyc +0 -0
  7. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/CFFToCFF2.cpython-310.pyc +0 -0
  8. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/__init__.cpython-310.pyc +0 -0
  9. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/specializer.cpython-310.pyc +0 -0
  10. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/transforms.cpython-310.pyc +0 -0
  11. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/width.cpython-310.pyc +0 -0
  12. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/specializer.py +924 -0
  13. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/transforms.py +490 -0
  14. infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/width.py +210 -0
  15. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__init__.py +15 -0
  16. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__main__.py +6 -0
  17. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/__init__.cpython-310.pyc +0 -0
  18. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/__main__.cpython-310.pyc +0 -0
  19. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/benchmark.cpython-310.pyc +0 -0
  20. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/cli.cpython-310.pyc +0 -0
  21. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/cu2qu.cpython-310.pyc +0 -0
  22. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/errors.cpython-310.pyc +0 -0
  23. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/ufo.cpython-310.pyc +0 -0
  24. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/benchmark.py +54 -0
  25. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/cli.py +198 -0
  26. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/cu2qu.c +0 -0
  27. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/cu2qu.py +531 -0
  28. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/errors.py +77 -0
  29. infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/ufo.py +349 -0
  30. infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/__main__.cpython-310.pyc +0 -0
  31. infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/split.cpython-310.pyc +0 -0
  32. infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/statNames.py +253 -0
  33. infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/MacRoman.py +258 -0
  34. infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/StandardEncoding.py +258 -0
  35. infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/__init__.py +1 -0
  36. infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/__pycache__/MacRoman.cpython-310.pyc +0 -0
  37. infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/__pycache__/StandardEncoding.cpython-310.pyc +0 -0
  38. infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/__pycache__/__init__.cpython-310.pyc +0 -0
  39. infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/__pycache__/codecs.cpython-310.pyc +0 -0
  40. infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/codecs.py +135 -0
  41. infer_4_47_1/lib/python3.10/site-packages/fontTools/mtiLib/__main__.py +5 -0
  42. infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/__init__.py +1 -0
  43. infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/__init__.cpython-310.pyc +0 -0
  44. infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/error.cpython-310.pyc +0 -0
  45. infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/builder.py +0 -0
  46. infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/error.py +11 -0
  47. infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/maxContextCalc.py +96 -0
  48. infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/optimize/__init__.py +53 -0
  49. infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/optimize/__main__.py +6 -0
  50. infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/__main__.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1577,3 +1577,4 @@ evalkit_tf449/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 f
1577
  infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/lite/dist/assets/gradio_client-1.5.3-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
1578
  evalkit_cambrian/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1579
  infer_4_47_1/lib/python3.10/site-packages/fontTools/pens/momentsPen.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
1577
  infer_4_47_1/lib/python3.10/site-packages/gradio/_frontend_code/lite/dist/assets/gradio_client-1.5.3-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
1578
  evalkit_cambrian/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1579
  infer_4_47_1/lib/python3.10/site-packages/fontTools/pens/momentsPen.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1580
+ deepseekvl2/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
deepseekvl2/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5e5d9d2280dd4e34ab57f542a32a3fb6d009fe4ce84c074df3eae99b008c72d
3
+ size 240931768
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/CFF2ToCFF.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """CFF2 to CFF converter."""
2
+
3
+ from fontTools.ttLib import TTFont, newTable
4
+ from fontTools.misc.cliTools import makeOutputFileName
5
+ from fontTools.cffLib import (
6
+ TopDictIndex,
7
+ buildOrder,
8
+ buildDefaults,
9
+ topDictOperators,
10
+ privateDictOperators,
11
+ )
12
+ from .width import optimizeWidths
13
+ from collections import defaultdict
14
+ import logging
15
+
16
+
17
+ __all__ = ["convertCFF2ToCFF", "main"]
18
+
19
+
20
+ log = logging.getLogger("fontTools.cffLib")
21
+
22
+
23
+ def _convertCFF2ToCFF(cff, otFont):
24
+ """Converts this object from CFF2 format to CFF format. This conversion
25
+ is done 'in-place'. The conversion cannot be reversed.
26
+
27
+ The CFF2 font cannot be variable. (TODO Accept those and convert to the
28
+ default instance?)
29
+
30
+ This assumes a decompiled CFF table. (i.e. that the object has been
31
+ filled via :meth:`decompile` and e.g. not loaded from XML.)"""
32
+
33
+ cff.major = 1
34
+
35
+ topDictData = TopDictIndex(None)
36
+ for item in cff.topDictIndex:
37
+ # Iterate over, such that all are decompiled
38
+ item.cff2GetGlyphOrder = None
39
+ topDictData.append(item)
40
+ cff.topDictIndex = topDictData
41
+ topDict = topDictData[0]
42
+
43
+ if hasattr(topDict, "VarStore"):
44
+ raise ValueError("Variable CFF2 font cannot be converted to CFF format.")
45
+
46
+ opOrder = buildOrder(topDictOperators)
47
+ topDict.order = opOrder
48
+ for key in topDict.rawDict.keys():
49
+ if key not in opOrder:
50
+ del topDict.rawDict[key]
51
+ if hasattr(topDict, key):
52
+ delattr(topDict, key)
53
+
54
+ fdArray = topDict.FDArray
55
+ charStrings = topDict.CharStrings
56
+
57
+ defaults = buildDefaults(privateDictOperators)
58
+ order = buildOrder(privateDictOperators)
59
+ for fd in fdArray:
60
+ fd.setCFF2(False)
61
+ privateDict = fd.Private
62
+ privateDict.order = order
63
+ for key in order:
64
+ if key not in privateDict.rawDict and key in defaults:
65
+ privateDict.rawDict[key] = defaults[key]
66
+ for key in privateDict.rawDict.keys():
67
+ if key not in order:
68
+ del privateDict.rawDict[key]
69
+ if hasattr(privateDict, key):
70
+ delattr(privateDict, key)
71
+
72
+ for cs in charStrings.values():
73
+ cs.decompile()
74
+ cs.program.append("endchar")
75
+ for subrSets in [cff.GlobalSubrs] + [
76
+ getattr(fd.Private, "Subrs", []) for fd in fdArray
77
+ ]:
78
+ for cs in subrSets:
79
+ cs.program.append("return")
80
+
81
+ # Add (optimal) width to CharStrings that need it.
82
+ widths = defaultdict(list)
83
+ metrics = otFont["hmtx"].metrics
84
+ for glyphName in charStrings.keys():
85
+ cs, fdIndex = charStrings.getItemAndSelector(glyphName)
86
+ if fdIndex == None:
87
+ fdIndex = 0
88
+ widths[fdIndex].append(metrics[glyphName][0])
89
+ for fdIndex, widthList in widths.items():
90
+ bestDefault, bestNominal = optimizeWidths(widthList)
91
+ private = fdArray[fdIndex].Private
92
+ private.defaultWidthX = bestDefault
93
+ private.nominalWidthX = bestNominal
94
+ for glyphName in charStrings.keys():
95
+ cs, fdIndex = charStrings.getItemAndSelector(glyphName)
96
+ if fdIndex == None:
97
+ fdIndex = 0
98
+ private = fdArray[fdIndex].Private
99
+ width = metrics[glyphName][0]
100
+ if width != private.defaultWidthX:
101
+ cs.program.insert(0, width - private.nominalWidthX)
102
+
103
+ mapping = {
104
+ name: ("cid" + str(n) if n else ".notdef")
105
+ for n, name in enumerate(topDict.charset)
106
+ }
107
+ topDict.charset = [
108
+ "cid" + str(n) if n else ".notdef" for n in range(len(topDict.charset))
109
+ ]
110
+ charStrings.charStrings = {
111
+ mapping[name]: v for name, v in charStrings.charStrings.items()
112
+ }
113
+
114
+ # I'm not sure why the following is *not* necessary. And it breaks
115
+ # the output if I add it.
116
+ # topDict.ROS = ("Adobe", "Identity", 0)
117
+
118
+
119
+ def convertCFF2ToCFF(font, *, updatePostTable=True):
120
+ cff = font["CFF2"].cff
121
+ _convertCFF2ToCFF(cff, font)
122
+ del font["CFF2"]
123
+ table = font["CFF "] = newTable("CFF ")
124
+ table.cff = cff
125
+
126
+ if updatePostTable and "post" in font:
127
+ # Only version supported for fonts with CFF table is 0x00030000 not 0x20000
128
+ post = font["post"]
129
+ if post.formatType == 2.0:
130
+ post.formatType = 3.0
131
+
132
+
133
+ def main(args=None):
134
+ """Convert CFF OTF font to CFF2 OTF font"""
135
+ if args is None:
136
+ import sys
137
+
138
+ args = sys.argv[1:]
139
+
140
+ import argparse
141
+
142
+ parser = argparse.ArgumentParser(
143
+ "fonttools cffLib.CFFToCFF2",
144
+ description="Upgrade a CFF font to CFF2.",
145
+ )
146
+ parser.add_argument(
147
+ "input", metavar="INPUT.ttf", help="Input OTF file with CFF table."
148
+ )
149
+ parser.add_argument(
150
+ "-o",
151
+ "--output",
152
+ metavar="OUTPUT.ttf",
153
+ default=None,
154
+ help="Output instance OTF file (default: INPUT-CFF2.ttf).",
155
+ )
156
+ parser.add_argument(
157
+ "--no-recalc-timestamp",
158
+ dest="recalc_timestamp",
159
+ action="store_false",
160
+ help="Don't set the output font's timestamp to the current time.",
161
+ )
162
+ loggingGroup = parser.add_mutually_exclusive_group(required=False)
163
+ loggingGroup.add_argument(
164
+ "-v", "--verbose", action="store_true", help="Run more verbosely."
165
+ )
166
+ loggingGroup.add_argument(
167
+ "-q", "--quiet", action="store_true", help="Turn verbosity off."
168
+ )
169
+ options = parser.parse_args(args)
170
+
171
+ from fontTools import configLogger
172
+
173
+ configLogger(
174
+ level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
175
+ )
176
+
177
+ import os
178
+
179
+ infile = options.input
180
+ if not os.path.isfile(infile):
181
+ parser.error("No such file '{}'".format(infile))
182
+
183
+ outfile = (
184
+ makeOutputFileName(infile, overWrite=True, suffix="-CFF")
185
+ if not options.output
186
+ else options.output
187
+ )
188
+
189
+ font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False)
190
+
191
+ convertCFF2ToCFF(font)
192
+
193
+ log.info(
194
+ "Saving %s",
195
+ outfile,
196
+ )
197
+ font.save(outfile)
198
+
199
+
200
+ if __name__ == "__main__":
201
+ import sys
202
+
203
+ sys.exit(main(sys.argv[1:]))
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/CFFToCFF2.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """CFF to CFF2 converter."""
2
+
3
+ from fontTools.ttLib import TTFont, newTable
4
+ from fontTools.misc.cliTools import makeOutputFileName
5
+ from fontTools.misc.psCharStrings import T2WidthExtractor
6
+ from fontTools.cffLib import (
7
+ TopDictIndex,
8
+ FDArrayIndex,
9
+ FontDict,
10
+ buildOrder,
11
+ topDictOperators,
12
+ privateDictOperators,
13
+ topDictOperators2,
14
+ privateDictOperators2,
15
+ )
16
+ from io import BytesIO
17
+ import logging
18
+
19
+ __all__ = ["convertCFFToCFF2", "main"]
20
+
21
+
22
+ log = logging.getLogger("fontTools.cffLib")
23
+
24
+
25
+ class _NominalWidthUsedError(Exception):
26
+ def __add__(self, other):
27
+ raise self
28
+
29
+ def __radd__(self, other):
30
+ raise self
31
+
32
+
33
+ def _convertCFFToCFF2(cff, otFont):
34
+ """Converts this object from CFF format to CFF2 format. This conversion
35
+ is done 'in-place'. The conversion cannot be reversed.
36
+
37
+ This assumes a decompiled CFF table. (i.e. that the object has been
38
+ filled via :meth:`decompile` and e.g. not loaded from XML.)"""
39
+
40
+ # Clean up T2CharStrings
41
+
42
+ topDict = cff.topDictIndex[0]
43
+ fdArray = topDict.FDArray if hasattr(topDict, "FDArray") else None
44
+ charStrings = topDict.CharStrings
45
+ globalSubrs = cff.GlobalSubrs
46
+ localSubrs = (
47
+ [getattr(fd.Private, "Subrs", []) for fd in fdArray]
48
+ if fdArray
49
+ else (
50
+ [topDict.Private.Subrs]
51
+ if hasattr(topDict, "Private") and hasattr(topDict.Private, "Subrs")
52
+ else []
53
+ )
54
+ )
55
+
56
+ for glyphName in charStrings.keys():
57
+ cs, fdIndex = charStrings.getItemAndSelector(glyphName)
58
+ cs.decompile()
59
+
60
+ # Clean up subroutines first
61
+ for subrs in [globalSubrs] + localSubrs:
62
+ for subr in subrs:
63
+ program = subr.program
64
+ i = j = len(program)
65
+ try:
66
+ i = program.index("return")
67
+ except ValueError:
68
+ pass
69
+ try:
70
+ j = program.index("endchar")
71
+ except ValueError:
72
+ pass
73
+ program[min(i, j) :] = []
74
+
75
+ # Clean up glyph charstrings
76
+ removeUnusedSubrs = False
77
+ nominalWidthXError = _NominalWidthUsedError()
78
+ for glyphName in charStrings.keys():
79
+ cs, fdIndex = charStrings.getItemAndSelector(glyphName)
80
+ program = cs.program
81
+
82
+ thisLocalSubrs = (
83
+ localSubrs[fdIndex]
84
+ if fdIndex is not None
85
+ else (
86
+ getattr(topDict.Private, "Subrs", [])
87
+ if hasattr(topDict, "Private")
88
+ else []
89
+ )
90
+ )
91
+
92
+ # Intentionally use custom type for nominalWidthX, such that any
93
+ # CharString that has an explicit width encoded will throw back to us.
94
+ extractor = T2WidthExtractor(
95
+ thisLocalSubrs,
96
+ globalSubrs,
97
+ nominalWidthXError,
98
+ 0,
99
+ )
100
+ try:
101
+ extractor.execute(cs)
102
+ except _NominalWidthUsedError:
103
+ # Program has explicit width. We want to drop it, but can't
104
+ # just pop the first number since it may be a subroutine call.
105
+ # Instead, when seeing that, we embed the subroutine and recurse.
106
+ # If this ever happened, we later prune unused subroutines.
107
+ while len(program) >= 2 and program[1] in ["callsubr", "callgsubr"]:
108
+ removeUnusedSubrs = True
109
+ subrNumber = program.pop(0)
110
+ assert isinstance(subrNumber, int), subrNumber
111
+ op = program.pop(0)
112
+ bias = extractor.localBias if op == "callsubr" else extractor.globalBias
113
+ subrNumber += bias
114
+ subrSet = thisLocalSubrs if op == "callsubr" else globalSubrs
115
+ subrProgram = subrSet[subrNumber].program
116
+ program[:0] = subrProgram
117
+ # Now pop the actual width
118
+ assert len(program) >= 1, program
119
+ program.pop(0)
120
+
121
+ if program and program[-1] == "endchar":
122
+ program.pop()
123
+
124
+ if removeUnusedSubrs:
125
+ cff.remove_unused_subroutines()
126
+
127
+ # Upconvert TopDict
128
+
129
+ cff.major = 2
130
+ cff2GetGlyphOrder = cff.otFont.getGlyphOrder
131
+ topDictData = TopDictIndex(None, cff2GetGlyphOrder)
132
+ for item in cff.topDictIndex:
133
+ # Iterate over, such that all are decompiled
134
+ topDictData.append(item)
135
+ cff.topDictIndex = topDictData
136
+ topDict = topDictData[0]
137
+ if hasattr(topDict, "Private"):
138
+ privateDict = topDict.Private
139
+ else:
140
+ privateDict = None
141
+ opOrder = buildOrder(topDictOperators2)
142
+ topDict.order = opOrder
143
+ topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
144
+
145
+ if not hasattr(topDict, "FDArray"):
146
+ fdArray = topDict.FDArray = FDArrayIndex()
147
+ fdArray.strings = None
148
+ fdArray.GlobalSubrs = topDict.GlobalSubrs
149
+ topDict.GlobalSubrs.fdArray = fdArray
150
+ charStrings = topDict.CharStrings
151
+ if charStrings.charStringsAreIndexed:
152
+ charStrings.charStringsIndex.fdArray = fdArray
153
+ else:
154
+ charStrings.fdArray = fdArray
155
+ fontDict = FontDict()
156
+ fontDict.setCFF2(True)
157
+ fdArray.append(fontDict)
158
+ fontDict.Private = privateDict
159
+ privateOpOrder = buildOrder(privateDictOperators2)
160
+ if privateDict is not None:
161
+ for entry in privateDictOperators:
162
+ key = entry[1]
163
+ if key not in privateOpOrder:
164
+ if key in privateDict.rawDict:
165
+ # print "Removing private dict", key
166
+ del privateDict.rawDict[key]
167
+ if hasattr(privateDict, key):
168
+ delattr(privateDict, key)
169
+ # print "Removing privateDict attr", key
170
+ else:
171
+ # clean up the PrivateDicts in the fdArray
172
+ fdArray = topDict.FDArray
173
+ privateOpOrder = buildOrder(privateDictOperators2)
174
+ for fontDict in fdArray:
175
+ fontDict.setCFF2(True)
176
+ for key in list(fontDict.rawDict.keys()):
177
+ if key not in fontDict.order:
178
+ del fontDict.rawDict[key]
179
+ if hasattr(fontDict, key):
180
+ delattr(fontDict, key)
181
+
182
+ privateDict = fontDict.Private
183
+ for entry in privateDictOperators:
184
+ key = entry[1]
185
+ if key not in privateOpOrder:
186
+ if key in list(privateDict.rawDict.keys()):
187
+ # print "Removing private dict", key
188
+ del privateDict.rawDict[key]
189
+ if hasattr(privateDict, key):
190
+ delattr(privateDict, key)
191
+ # print "Removing privateDict attr", key
192
+
193
+ # Now delete up the deprecated topDict operators from CFF 1.0
194
+ for entry in topDictOperators:
195
+ key = entry[1]
196
+ # We seem to need to keep the charset operator for now,
197
+ # or we fail to compile with some fonts, like AdditionFont.otf.
198
+ # I don't know which kind of CFF font those are. But keeping
199
+ # charset seems to work. It will be removed when we save and
200
+ # read the font again.
201
+ #
202
+ # AdditionFont.otf has <Encoding name="StandardEncoding"/>.
203
+ if key == "charset":
204
+ continue
205
+ if key not in opOrder:
206
+ if key in topDict.rawDict:
207
+ del topDict.rawDict[key]
208
+ if hasattr(topDict, key):
209
+ delattr(topDict, key)
210
+
211
+ # TODO(behdad): What does the following comment even mean? Both CFF and CFF2
212
+ # use the same T2Charstring class. I *think* what it means is that the CharStrings
213
+ # were loaded for CFF1, and we need to reload them for CFF2 to set varstore, etc
214
+ # on them. At least that's what I understand. It's probably safe to remove this
215
+ # and just set vstore where needed.
216
+ #
217
+ # See comment above about charset as well.
218
+
219
+ # At this point, the Subrs and Charstrings are all still T2Charstring class
220
+ # easiest to fix this by compiling, then decompiling again
221
+ file = BytesIO()
222
+ cff.compile(file, otFont, isCFF2=True)
223
+ file.seek(0)
224
+ cff.decompile(file, otFont, isCFF2=True)
225
+
226
+
227
+ def convertCFFToCFF2(font):
228
+ cff = font["CFF "].cff
229
+ del font["CFF "]
230
+ _convertCFFToCFF2(cff, font)
231
+ table = font["CFF2"] = newTable("CFF2")
232
+ table.cff = cff
233
+
234
+
235
+ def main(args=None):
236
+ """Convert CFF OTF font to CFF2 OTF font"""
237
+ if args is None:
238
+ import sys
239
+
240
+ args = sys.argv[1:]
241
+
242
+ import argparse
243
+
244
+ parser = argparse.ArgumentParser(
245
+ "fonttools cffLib.CFFToCFF2",
246
+ description="Upgrade a CFF font to CFF2.",
247
+ )
248
+ parser.add_argument(
249
+ "input", metavar="INPUT.ttf", help="Input OTF file with CFF table."
250
+ )
251
+ parser.add_argument(
252
+ "-o",
253
+ "--output",
254
+ metavar="OUTPUT.ttf",
255
+ default=None,
256
+ help="Output instance OTF file (default: INPUT-CFF2.ttf).",
257
+ )
258
+ parser.add_argument(
259
+ "--no-recalc-timestamp",
260
+ dest="recalc_timestamp",
261
+ action="store_false",
262
+ help="Don't set the output font's timestamp to the current time.",
263
+ )
264
+ loggingGroup = parser.add_mutually_exclusive_group(required=False)
265
+ loggingGroup.add_argument(
266
+ "-v", "--verbose", action="store_true", help="Run more verbosely."
267
+ )
268
+ loggingGroup.add_argument(
269
+ "-q", "--quiet", action="store_true", help="Turn verbosity off."
270
+ )
271
+ options = parser.parse_args(args)
272
+
273
+ from fontTools import configLogger
274
+
275
+ configLogger(
276
+ level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
277
+ )
278
+
279
+ import os
280
+
281
+ infile = options.input
282
+ if not os.path.isfile(infile):
283
+ parser.error("No such file '{}'".format(infile))
284
+
285
+ outfile = (
286
+ makeOutputFileName(infile, overWrite=True, suffix="-CFF2")
287
+ if not options.output
288
+ else options.output
289
+ )
290
+
291
+ font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False)
292
+
293
+ convertCFFToCFF2(font)
294
+
295
+ log.info(
296
+ "Saving %s",
297
+ outfile,
298
+ )
299
+ font.save(outfile)
300
+
301
+
302
+ if __name__ == "__main__":
303
+ import sys
304
+
305
+ sys.exit(main(sys.argv[1:]))
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/CFF2ToCFF.cpython-310.pyc ADDED
Binary file (5.12 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/CFFToCFF2.cpython-310.pyc ADDED
Binary file (5.94 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (79.2 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/specializer.cpython-310.pyc ADDED
Binary file (18.2 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/transforms.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/__pycache__/width.cpython-310.pyc ADDED
Binary file (6.41 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/specializer.py ADDED
@@ -0,0 +1,924 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ """T2CharString operator specializer and generalizer.
4
+
5
+ PostScript glyph drawing operations can be expressed in multiple different
6
+ ways. For example, as well as the ``lineto`` operator, there is also a
7
+ ``hlineto`` operator which draws a horizontal line, removing the need to
8
+ specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a
9
+ vertical line, removing the need to specify a ``dy`` coordinate. As well
10
+ as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects
11
+ into lists of operations, this module allows for conversion between general
12
+ and specific forms of the operation.
13
+
14
+ """
15
+
16
+ from fontTools.cffLib import maxStackLimit
17
+
18
+
19
+ def stringToProgram(string):
20
+ if isinstance(string, str):
21
+ string = string.split()
22
+ program = []
23
+ for token in string:
24
+ try:
25
+ token = int(token)
26
+ except ValueError:
27
+ try:
28
+ token = float(token)
29
+ except ValueError:
30
+ pass
31
+ program.append(token)
32
+ return program
33
+
34
+
35
+ def programToString(program):
36
+ return " ".join(str(x) for x in program)
37
+
38
+
39
+ def programToCommands(program, getNumRegions=None):
40
+ """Takes a T2CharString program list and returns list of commands.
41
+ Each command is a two-tuple of commandname,arg-list. The commandname might
42
+ be empty string if no commandname shall be emitted (used for glyph width,
43
+ hintmask/cntrmask argument, as well as stray arguments at the end of the
44
+ program (🤷).
45
+ 'getNumRegions' may be None, or a callable object. It must return the
46
+ number of regions. 'getNumRegions' takes a single argument, vsindex. It
47
+ returns the numRegions for the vsindex.
48
+ The Charstring may or may not start with a width value. If the first
49
+ non-blend operator has an odd number of arguments, then the first argument is
50
+ a width, and is popped off. This is complicated with blend operators, as
51
+ there may be more than one before the first hint or moveto operator, and each
52
+ one reduces several arguments to just one list argument. We have to sum the
53
+ number of arguments that are not part of the blend arguments, and all the
54
+ 'numBlends' values. We could instead have said that by definition, if there
55
+ is a blend operator, there is no width value, since CFF2 Charstrings don't
56
+ have width values. I discussed this with Behdad, and we are allowing for an
57
+ initial width value in this case because developers may assemble a CFF2
58
+ charstring from CFF Charstrings, which could have width values.
59
+ """
60
+
61
+ seenWidthOp = False
62
+ vsIndex = 0
63
+ lenBlendStack = 0
64
+ lastBlendIndex = 0
65
+ commands = []
66
+ stack = []
67
+ it = iter(program)
68
+
69
+ for token in it:
70
+ if not isinstance(token, str):
71
+ stack.append(token)
72
+ continue
73
+
74
+ if token == "blend":
75
+ assert getNumRegions is not None
76
+ numSourceFonts = 1 + getNumRegions(vsIndex)
77
+ # replace the blend op args on the stack with a single list
78
+ # containing all the blend op args.
79
+ numBlends = stack[-1]
80
+ numBlendArgs = numBlends * numSourceFonts + 1
81
+ # replace first blend op by a list of the blend ops.
82
+ stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
83
+ lenStack = len(stack)
84
+ lenBlendStack += numBlends + lenStack - 1
85
+ lastBlendIndex = lenStack
86
+ # if a blend op exists, this is or will be a CFF2 charstring.
87
+ continue
88
+
89
+ elif token == "vsindex":
90
+ vsIndex = stack[-1]
91
+ assert type(vsIndex) is int
92
+
93
+ elif (not seenWidthOp) and token in {
94
+ "hstem",
95
+ "hstemhm",
96
+ "vstem",
97
+ "vstemhm",
98
+ "cntrmask",
99
+ "hintmask",
100
+ "hmoveto",
101
+ "vmoveto",
102
+ "rmoveto",
103
+ "endchar",
104
+ }:
105
+ seenWidthOp = True
106
+ parity = token in {"hmoveto", "vmoveto"}
107
+ if lenBlendStack:
108
+ # lenBlendStack has the number of args represented by the last blend
109
+ # arg and all the preceding args. We need to now add the number of
110
+ # args following the last blend arg.
111
+ numArgs = lenBlendStack + len(stack[lastBlendIndex:])
112
+ else:
113
+ numArgs = len(stack)
114
+ if numArgs and (numArgs % 2) ^ parity:
115
+ width = stack.pop(0)
116
+ commands.append(("", [width]))
117
+
118
+ if token in {"hintmask", "cntrmask"}:
119
+ if stack:
120
+ commands.append(("", stack))
121
+ commands.append((token, []))
122
+ commands.append(("", [next(it)]))
123
+ else:
124
+ commands.append((token, stack))
125
+ stack = []
126
+ if stack:
127
+ commands.append(("", stack))
128
+ return commands
129
+
130
+
131
+ def _flattenBlendArgs(args):
132
+ token_list = []
133
+ for arg in args:
134
+ if isinstance(arg, list):
135
+ token_list.extend(arg)
136
+ token_list.append("blend")
137
+ else:
138
+ token_list.append(arg)
139
+ return token_list
140
+
141
+
142
+ def commandsToProgram(commands):
143
+ """Takes a commands list as returned by programToCommands() and converts
144
+ it back to a T2CharString program list."""
145
+ program = []
146
+ for op, args in commands:
147
+ if any(isinstance(arg, list) for arg in args):
148
+ args = _flattenBlendArgs(args)
149
+ program.extend(args)
150
+ if op:
151
+ program.append(op)
152
+ return program
153
+
154
+
155
+ def _everyN(el, n):
156
+ """Group the list el into groups of size n"""
157
+ l = len(el)
158
+ if l % n != 0:
159
+ raise ValueError(el)
160
+ for i in range(0, l, n):
161
+ yield el[i : i + n]
162
+
163
+
164
+ class _GeneralizerDecombinerCommandsMap(object):
165
+ @staticmethod
166
+ def rmoveto(args):
167
+ if len(args) != 2:
168
+ raise ValueError(args)
169
+ yield ("rmoveto", args)
170
+
171
+ @staticmethod
172
+ def hmoveto(args):
173
+ if len(args) != 1:
174
+ raise ValueError(args)
175
+ yield ("rmoveto", [args[0], 0])
176
+
177
+ @staticmethod
178
+ def vmoveto(args):
179
+ if len(args) != 1:
180
+ raise ValueError(args)
181
+ yield ("rmoveto", [0, args[0]])
182
+
183
+ @staticmethod
184
+ def rlineto(args):
185
+ if not args:
186
+ raise ValueError(args)
187
+ for args in _everyN(args, 2):
188
+ yield ("rlineto", args)
189
+
190
+ @staticmethod
191
+ def hlineto(args):
192
+ if not args:
193
+ raise ValueError(args)
194
+ it = iter(args)
195
+ try:
196
+ while True:
197
+ yield ("rlineto", [next(it), 0])
198
+ yield ("rlineto", [0, next(it)])
199
+ except StopIteration:
200
+ pass
201
+
202
+ @staticmethod
203
+ def vlineto(args):
204
+ if not args:
205
+ raise ValueError(args)
206
+ it = iter(args)
207
+ try:
208
+ while True:
209
+ yield ("rlineto", [0, next(it)])
210
+ yield ("rlineto", [next(it), 0])
211
+ except StopIteration:
212
+ pass
213
+
214
+ @staticmethod
215
+ def rrcurveto(args):
216
+ if not args:
217
+ raise ValueError(args)
218
+ for args in _everyN(args, 6):
219
+ yield ("rrcurveto", args)
220
+
221
+ @staticmethod
222
+ def hhcurveto(args):
223
+ l = len(args)
224
+ if l < 4 or l % 4 > 1:
225
+ raise ValueError(args)
226
+ if l % 2 == 1:
227
+ yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0])
228
+ args = args[5:]
229
+ for args in _everyN(args, 4):
230
+ yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0])
231
+
232
+ @staticmethod
233
+ def vvcurveto(args):
234
+ l = len(args)
235
+ if l < 4 or l % 4 > 1:
236
+ raise ValueError(args)
237
+ if l % 2 == 1:
238
+ yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]])
239
+ args = args[5:]
240
+ for args in _everyN(args, 4):
241
+ yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]])
242
+
243
+ @staticmethod
244
+ def hvcurveto(args):
245
+ l = len(args)
246
+ if l < 4 or l % 8 not in {0, 1, 4, 5}:
247
+ raise ValueError(args)
248
+ last_args = None
249
+ if l % 2 == 1:
250
+ lastStraight = l % 8 == 5
251
+ args, last_args = args[:-5], args[-5:]
252
+ it = _everyN(args, 4)
253
+ try:
254
+ while True:
255
+ args = next(it)
256
+ yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
257
+ args = next(it)
258
+ yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
259
+ except StopIteration:
260
+ pass
261
+ if last_args:
262
+ args = last_args
263
+ if lastStraight:
264
+ yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
265
+ else:
266
+ yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
267
+
268
+ @staticmethod
269
+ def vhcurveto(args):
270
+ l = len(args)
271
+ if l < 4 or l % 8 not in {0, 1, 4, 5}:
272
+ raise ValueError(args)
273
+ last_args = None
274
+ if l % 2 == 1:
275
+ lastStraight = l % 8 == 5
276
+ args, last_args = args[:-5], args[-5:]
277
+ it = _everyN(args, 4)
278
+ try:
279
+ while True:
280
+ args = next(it)
281
+ yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
282
+ args = next(it)
283
+ yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
284
+ except StopIteration:
285
+ pass
286
+ if last_args:
287
+ args = last_args
288
+ if lastStraight:
289
+ yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
290
+ else:
291
+ yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
292
+
293
+ @staticmethod
294
+ def rcurveline(args):
295
+ l = len(args)
296
+ if l < 8 or l % 6 != 2:
297
+ raise ValueError(args)
298
+ args, last_args = args[:-2], args[-2:]
299
+ for args in _everyN(args, 6):
300
+ yield ("rrcurveto", args)
301
+ yield ("rlineto", last_args)
302
+
303
+ @staticmethod
304
+ def rlinecurve(args):
305
+ l = len(args)
306
+ if l < 8 or l % 2 != 0:
307
+ raise ValueError(args)
308
+ args, last_args = args[:-6], args[-6:]
309
+ for args in _everyN(args, 2):
310
+ yield ("rlineto", args)
311
+ yield ("rrcurveto", last_args)
312
+
313
+
314
+ def _convertBlendOpToArgs(blendList):
315
+ # args is list of blend op args. Since we are supporting
316
+ # recursive blend op calls, some of these args may also
317
+ # be a list of blend op args, and need to be converted before
318
+ # we convert the current list.
319
+ if any([isinstance(arg, list) for arg in blendList]):
320
+ args = [
321
+ i
322
+ for e in blendList
323
+ for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e])
324
+ ]
325
+ else:
326
+ args = blendList
327
+
328
+ # We now know that blendList contains a blend op argument list, even if
329
+ # some of the args are lists that each contain a blend op argument list.
330
+ # Convert from:
331
+ # [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
332
+ # to:
333
+ # [ [x0] + [delta tuple for x0],
334
+ # ...,
335
+ # [xn] + [delta tuple for xn] ]
336
+ numBlends = args[-1]
337
+ # Can't use args.pop() when the args are being used in a nested list
338
+ # comprehension. See calling context
339
+ args = args[:-1]
340
+
341
+ l = len(args)
342
+ numRegions = l // numBlends - 1
343
+ if not (numBlends * (numRegions + 1) == l):
344
+ raise ValueError(blendList)
345
+
346
+ defaultArgs = [[arg] for arg in args[:numBlends]]
347
+ deltaArgs = args[numBlends:]
348
+ numDeltaValues = len(deltaArgs)
349
+ deltaList = [
350
+ deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions)
351
+ ]
352
+ blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
353
+ return blend_args
354
+
355
+
356
+ def generalizeCommands(commands, ignoreErrors=False):
357
+ result = []
358
+ mapping = _GeneralizerDecombinerCommandsMap
359
+ for op, args in commands:
360
+ # First, generalize any blend args in the arg list.
361
+ if any([isinstance(arg, list) for arg in args]):
362
+ try:
363
+ args = [
364
+ n
365
+ for arg in args
366
+ for n in (
367
+ _convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
368
+ )
369
+ ]
370
+ except ValueError:
371
+ if ignoreErrors:
372
+ # Store op as data, such that consumers of commands do not have to
373
+ # deal with incorrect number of arguments.
374
+ result.append(("", args))
375
+ result.append(("", [op]))
376
+ else:
377
+ raise
378
+
379
+ func = getattr(mapping, op, None)
380
+ if func is None:
381
+ result.append((op, args))
382
+ continue
383
+ try:
384
+ for command in func(args):
385
+ result.append(command)
386
+ except ValueError:
387
+ if ignoreErrors:
388
+ # Store op as data, such that consumers of commands do not have to
389
+ # deal with incorrect number of arguments.
390
+ result.append(("", args))
391
+ result.append(("", [op]))
392
+ else:
393
+ raise
394
+ return result
395
+
396
+
397
+ def generalizeProgram(program, getNumRegions=None, **kwargs):
398
+ return commandsToProgram(
399
+ generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
400
+ )
401
+
402
+
403
+ def _categorizeVector(v):
404
+ """
405
+ Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
406
+ of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
407
+ it returns a single zero still.
408
+
409
+ >>> _categorizeVector((0,0))
410
+ ('0', (0,))
411
+ >>> _categorizeVector((1,0))
412
+ ('h', (1,))
413
+ >>> _categorizeVector((0,2))
414
+ ('v', (2,))
415
+ >>> _categorizeVector((1,2))
416
+ ('r', (1, 2))
417
+ """
418
+ if not v[0]:
419
+ if not v[1]:
420
+ return "0", v[:1]
421
+ else:
422
+ return "v", v[1:]
423
+ else:
424
+ if not v[1]:
425
+ return "h", v[:1]
426
+ else:
427
+ return "r", v
428
+
429
+
430
+ def _mergeCategories(a, b):
431
+ if a == "0":
432
+ return b
433
+ if b == "0":
434
+ return a
435
+ if a == b:
436
+ return a
437
+ return None
438
+
439
+
440
+ def _negateCategory(a):
441
+ if a == "h":
442
+ return "v"
443
+ if a == "v":
444
+ return "h"
445
+ assert a in "0r"
446
+ return a
447
+
448
+
449
+ def _convertToBlendCmds(args):
450
+ # return a list of blend commands, and
451
+ # the remaining non-blended args, if any.
452
+ num_args = len(args)
453
+ stack_use = 0
454
+ new_args = []
455
+ i = 0
456
+ while i < num_args:
457
+ arg = args[i]
458
+ i += 1
459
+ if not isinstance(arg, list):
460
+ new_args.append(arg)
461
+ stack_use += 1
462
+ else:
463
+ prev_stack_use = stack_use
464
+ # The arg is a tuple of blend values.
465
+ # These are each (master 0,delta 1..delta n, 1)
466
+ # Combine as many successive tuples as we can,
467
+ # up to the max stack limit.
468
+ num_sources = len(arg) - 1
469
+ blendlist = [arg]
470
+ stack_use += 1 + num_sources # 1 for the num_blends arg
471
+
472
+ # if we are here, max stack is the CFF2 max stack.
473
+ # I use the CFF2 max stack limit here rather than
474
+ # the 'maxstack' chosen by the client, as the default
475
+ # maxstack may have been used unintentionally. For all
476
+ # the other operators, this just produces a little less
477
+ # optimization, but here it puts a hard (and low) limit
478
+ # on the number of source fonts that can be used.
479
+ #
480
+ # Make sure the stack depth does not exceed (maxstack - 1), so
481
+ # that subroutinizer can insert subroutine calls at any point.
482
+ while (
483
+ (i < num_args)
484
+ and isinstance(args[i], list)
485
+ and stack_use + num_sources < maxStackLimit
486
+ ):
487
+ blendlist.append(args[i])
488
+ i += 1
489
+ stack_use += num_sources
490
+ # blendList now contains as many single blend tuples as can be
491
+ # combined without exceeding the CFF2 stack limit.
492
+ num_blends = len(blendlist)
493
+ # append the 'num_blends' default font values
494
+ blend_args = []
495
+ for arg in blendlist:
496
+ blend_args.append(arg[0])
497
+ for arg in blendlist:
498
+ assert arg[-1] == 1
499
+ blend_args.extend(arg[1:-1])
500
+ blend_args.append(num_blends)
501
+ new_args.append(blend_args)
502
+ stack_use = prev_stack_use + num_blends
503
+
504
+ return new_args
505
+
506
+
507
+ def _addArgs(a, b):
508
+ if isinstance(b, list):
509
+ if isinstance(a, list):
510
+ if len(a) != len(b) or a[-1] != b[-1]:
511
+ raise ValueError()
512
+ return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]]
513
+ else:
514
+ a, b = b, a
515
+ if isinstance(a, list):
516
+ assert a[-1] == 1
517
+ return [_addArgs(a[0], b)] + a[1:]
518
+ return a + b
519
+
520
+
521
+ def _argsStackUse(args):
522
+ stackLen = 0
523
+ maxLen = 0
524
+ for arg in args:
525
+ if type(arg) is list:
526
+ # Blended arg
527
+ maxLen = max(maxLen, stackLen + _argsStackUse(arg))
528
+ stackLen += arg[-1]
529
+ else:
530
+ stackLen += 1
531
+ return max(stackLen, maxLen)
532
+
533
+
534
+ def specializeCommands(
535
+ commands,
536
+ ignoreErrors=False,
537
+ generalizeFirst=True,
538
+ preserveTopology=False,
539
+ maxstack=48,
540
+ ):
541
+ # We perform several rounds of optimizations. They are carefully ordered and are:
542
+ #
543
+ # 0. Generalize commands.
544
+ # This ensures that they are in our expected simple form, with each line/curve only
545
+ # having arguments for one segment, and using the generic form (rlineto/rrcurveto).
546
+ # If caller is sure the input is in this form, they can turn off generalization to
547
+ # save time.
548
+ #
549
+ # 1. Combine successive rmoveto operations.
550
+ #
551
+ # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
552
+ # We specialize into some, made-up, variants as well, which simplifies following
553
+ # passes.
554
+ #
555
+ # 3. Merge or delete redundant operations, to the extent requested.
556
+ # OpenType spec declares point numbers in CFF undefined. As such, we happily
557
+ # change topology. If client relies on point numbers (in GPOS anchors, or for
558
+ # hinting purposes(what?)) they can turn this off.
559
+ #
560
+ # 4. Peephole optimization to revert back some of the h/v variants back into their
561
+ # original "relative" operator (rline/rrcurveto) if that saves a byte.
562
+ #
563
+ # 5. Combine adjacent operators when possible, minding not to go over max stack size.
564
+ #
565
+ # 6. Resolve any remaining made-up operators into real operators.
566
+ #
567
+ # I have convinced myself that this produces optimal bytecode (except for, possibly
568
+ # one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
569
+ # A dynamic-programming approach can do the same but would be significantly slower.
570
+ #
571
+ # 7. For any args which are blend lists, convert them to a blend command.
572
+
573
+ # 0. Generalize commands.
574
+ if generalizeFirst:
575
+ commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
576
+ else:
577
+ commands = list(commands) # Make copy since we modify in-place later.
578
+
579
+ # 1. Combine successive rmoveto operations.
580
+ for i in range(len(commands) - 1, 0, -1):
581
+ if "rmoveto" == commands[i][0] == commands[i - 1][0]:
582
+ v1, v2 = commands[i - 1][1], commands[i][1]
583
+ commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]])
584
+ del commands[i]
585
+
586
+ # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
587
+ #
588
+ # We, in fact, specialize into more, made-up, variants that special-case when both
589
+ # X and Y components are zero. This simplifies the following optimization passes.
590
+ # This case is rare, but OCD does not let me skip it.
591
+ #
592
+ # After this round, we will have four variants that use the following mnemonics:
593
+ #
594
+ # - 'r' for relative, ie. non-zero X and non-zero Y,
595
+ # - 'h' for horizontal, ie. zero X and non-zero Y,
596
+ # - 'v' for vertical, ie. non-zero X and zero Y,
597
+ # - '0' for zeros, ie. zero X and zero Y.
598
+ #
599
+ # The '0' pseudo-operators are not part of the spec, but help simplify the following
600
+ # optimization rounds. We resolve them at the end. So, after this, we will have four
601
+ # moveto and four lineto variants:
602
+ #
603
+ # - 0moveto, 0lineto
604
+ # - hmoveto, hlineto
605
+ # - vmoveto, vlineto
606
+ # - rmoveto, rlineto
607
+ #
608
+ # and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
609
+ # dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
610
+ # An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
611
+ #
612
+ # There are nine different variants of curves without the '0'. Those nine map exactly
613
+ # to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
614
+ # vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
615
+ # arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
616
+ # arguments) is in fact an rhcurveto. The operators in the spec are designed such that
617
+ # all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
618
+ #
619
+ # Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
620
+ # of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
621
+ # thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
622
+ # encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
623
+ # the '0' with either 'h' or 'v' and it works.
624
+ #
625
+ # When we get to curve splines however, things become more complicated... XXX finish this.
626
+ # There's one more complexity with splines. If one side of the spline is not horizontal or
627
+ # vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
628
+ # Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
629
+ # only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
630
+ # This limits our merge opportunities later.
631
+ #
632
+ for i in range(len(commands)):
633
+ op, args = commands[i]
634
+
635
+ if op in {"rmoveto", "rlineto"}:
636
+ c, args = _categorizeVector(args)
637
+ commands[i] = c + op[1:], args
638
+ continue
639
+
640
+ if op == "rrcurveto":
641
+ c1, args1 = _categorizeVector(args[:2])
642
+ c2, args2 = _categorizeVector(args[-2:])
643
+ commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
644
+ continue
645
+
646
+ # 3. Merge or delete redundant operations, to the extent requested.
647
+ #
648
+ # TODO
649
+ # A 0moveto that comes before all other path operations can be removed.
650
+ # though I find conflicting evidence for this.
651
+ #
652
+ # TODO
653
+ # "If hstem and vstem hints are both declared at the beginning of a
654
+ # CharString, and this sequence is followed directly by the hintmask or
655
+ # cntrmask operators, then the vstem hint operator (or, if applicable,
656
+ # the vstemhm operator) need not be included."
657
+ #
658
+ # "The sequence and form of a CFF2 CharString program may be represented as:
659
+ # {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
660
+ #
661
+ # https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
662
+ #
663
+ # For Type2 CharStrings the sequence is:
664
+ # w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
665
+
666
+ # Some other redundancies change topology (point numbers).
667
+ if not preserveTopology:
668
+ for i in range(len(commands) - 1, -1, -1):
669
+ op, args = commands[i]
670
+
671
+ # A 00curveto is demoted to a (specialized) lineto.
672
+ if op == "00curveto":
673
+ assert len(args) == 4
674
+ c, args = _categorizeVector(args[1:3])
675
+ op = c + "lineto"
676
+ commands[i] = op, args
677
+ # and then...
678
+
679
+ # A 0lineto can be deleted.
680
+ if op == "0lineto":
681
+ del commands[i]
682
+ continue
683
+
684
+ # Merge adjacent hlineto's and vlineto's.
685
+ # In CFF2 charstrings from variable fonts, each
686
+ # arg item may be a list of blendable values, one from
687
+ # each source font.
688
+ if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
689
+ _, other_args = commands[i - 1]
690
+ assert len(args) == 1 and len(other_args) == 1
691
+ try:
692
+ new_args = [_addArgs(args[0], other_args[0])]
693
+ except ValueError:
694
+ continue
695
+ commands[i - 1] = (op, new_args)
696
+ del commands[i]
697
+ continue
698
+
699
+ # 4. Peephole optimization to revert back some of the h/v variants back into their
700
+ # original "relative" operator (rline/rrcurveto) if that saves a byte.
701
+ for i in range(1, len(commands) - 1):
702
+ op, args = commands[i]
703
+ prv, nxt = commands[i - 1][0], commands[i + 1][0]
704
+
705
+ if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
706
+ assert len(args) == 1
707
+ args = [0, args[0]] if op[0] == "v" else [args[0], 0]
708
+ commands[i] = ("rlineto", args)
709
+ continue
710
+
711
+ if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
712
+ assert (op[0] == "r") ^ (op[1] == "r")
713
+ if op[0] == "v":
714
+ pos = 0
715
+ elif op[0] != "r":
716
+ pos = 1
717
+ elif op[1] == "v":
718
+ pos = 4
719
+ else:
720
+ pos = 5
721
+ # Insert, while maintaining the type of args (can be tuple or list).
722
+ args = args[:pos] + type(args)((0,)) + args[pos:]
723
+ commands[i] = ("rrcurveto", args)
724
+ continue
725
+
726
+ # 5. Combine adjacent operators when possible, minding not to go over max stack size.
727
+ stackUse = _argsStackUse(commands[-1][1]) if commands else 0
728
+ for i in range(len(commands) - 1, 0, -1):
729
+ op1, args1 = commands[i - 1]
730
+ op2, args2 = commands[i]
731
+ new_op = None
732
+
733
+ # Merge logic...
734
+ if {op1, op2} <= {"rlineto", "rrcurveto"}:
735
+ if op1 == op2:
736
+ new_op = op1
737
+ else:
738
+ l = len(args2)
739
+ if op2 == "rrcurveto" and l == 6:
740
+ new_op = "rlinecurve"
741
+ elif l == 2:
742
+ new_op = "rcurveline"
743
+
744
+ elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
745
+ new_op = op2
746
+
747
+ elif {op1, op2} == {"vlineto", "hlineto"}:
748
+ new_op = op1
749
+
750
+ elif "curveto" == op1[2:] == op2[2:]:
751
+ d0, d1 = op1[:2]
752
+ d2, d3 = op2[:2]
753
+
754
+ if d1 == "r" or d2 == "r" or d0 == d3 == "r":
755
+ continue
756
+
757
+ d = _mergeCategories(d1, d2)
758
+ if d is None:
759
+ continue
760
+ if d0 == "r":
761
+ d = _mergeCategories(d, d3)
762
+ if d is None:
763
+ continue
764
+ new_op = "r" + d + "curveto"
765
+ elif d3 == "r":
766
+ d0 = _mergeCategories(d0, _negateCategory(d))
767
+ if d0 is None:
768
+ continue
769
+ new_op = d0 + "r" + "curveto"
770
+ else:
771
+ d0 = _mergeCategories(d0, d3)
772
+ if d0 is None:
773
+ continue
774
+ new_op = d0 + d + "curveto"
775
+
776
+ # Make sure the stack depth does not exceed (maxstack - 1), so
777
+ # that subroutinizer can insert subroutine calls at any point.
778
+ args1StackUse = _argsStackUse(args1)
779
+ combinedStackUse = max(args1StackUse, len(args1) + stackUse)
780
+ if new_op and combinedStackUse < maxstack:
781
+ commands[i - 1] = (new_op, args1 + args2)
782
+ del commands[i]
783
+ stackUse = combinedStackUse
784
+ else:
785
+ stackUse = args1StackUse
786
+
787
+ # 6. Resolve any remaining made-up operators into real operators.
788
+ for i in range(len(commands)):
789
+ op, args = commands[i]
790
+
791
+ if op in {"0moveto", "0lineto"}:
792
+ commands[i] = "h" + op[1:], args
793
+ continue
794
+
795
+ if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
796
+ l = len(args)
797
+
798
+ op0, op1 = op[:2]
799
+ if (op0 == "r") ^ (op1 == "r"):
800
+ assert l % 2 == 1
801
+ if op0 == "0":
802
+ op0 = "h"
803
+ if op1 == "0":
804
+ op1 = "h"
805
+ if op0 == "r":
806
+ op0 = op1
807
+ if op1 == "r":
808
+ op1 = _negateCategory(op0)
809
+ assert {op0, op1} <= {"h", "v"}, (op0, op1)
810
+
811
+ if l % 2:
812
+ if op0 != op1: # vhcurveto / hvcurveto
813
+ if (op0 == "h") ^ (l % 8 == 1):
814
+ # Swap last two args order
815
+ args = args[:-2] + args[-1:] + args[-2:-1]
816
+ else: # hhcurveto / vvcurveto
817
+ if op0 == "h": # hhcurveto
818
+ # Swap first two args order
819
+ args = args[1:2] + args[:1] + args[2:]
820
+
821
+ commands[i] = op0 + op1 + "curveto", args
822
+ continue
823
+
824
+ # 7. For any series of args which are blend lists, convert the series to a single blend arg.
825
+ for i in range(len(commands)):
826
+ op, args = commands[i]
827
+ if any(isinstance(arg, list) for arg in args):
828
+ commands[i] = op, _convertToBlendCmds(args)
829
+
830
+ return commands
831
+
832
+
833
+ def specializeProgram(program, getNumRegions=None, **kwargs):
834
+ return commandsToProgram(
835
+ specializeCommands(programToCommands(program, getNumRegions), **kwargs)
836
+ )
837
+
838
+
839
+ if __name__ == "__main__":
840
+ import sys
841
+
842
+ if len(sys.argv) == 1:
843
+ import doctest
844
+
845
+ sys.exit(doctest.testmod().failed)
846
+
847
+ import argparse
848
+
849
+ parser = argparse.ArgumentParser(
850
+ "fonttools cffLib.specializer",
851
+ description="CFF CharString generalizer/specializer",
852
+ )
853
+ parser.add_argument("program", metavar="command", nargs="*", help="Commands.")
854
+ parser.add_argument(
855
+ "--num-regions",
856
+ metavar="NumRegions",
857
+ nargs="*",
858
+ default=None,
859
+ help="Number of variable-font regions for blend opertaions.",
860
+ )
861
+ parser.add_argument(
862
+ "--font",
863
+ metavar="FONTFILE",
864
+ default=None,
865
+ help="CFF2 font to specialize.",
866
+ )
867
+ parser.add_argument(
868
+ "-o",
869
+ "--output-file",
870
+ type=str,
871
+ help="Output font file name.",
872
+ )
873
+
874
+ options = parser.parse_args(sys.argv[1:])
875
+
876
+ if options.program:
877
+ getNumRegions = (
878
+ None
879
+ if options.num_regions is None
880
+ else lambda vsIndex: int(
881
+ options.num_regions[0 if vsIndex is None else vsIndex]
882
+ )
883
+ )
884
+
885
+ program = stringToProgram(options.program)
886
+ print("Program:")
887
+ print(programToString(program))
888
+ commands = programToCommands(program, getNumRegions)
889
+ print("Commands:")
890
+ print(commands)
891
+ program2 = commandsToProgram(commands)
892
+ print("Program from commands:")
893
+ print(programToString(program2))
894
+ assert program == program2
895
+ print("Generalized program:")
896
+ print(programToString(generalizeProgram(program, getNumRegions)))
897
+ print("Specialized program:")
898
+ print(programToString(specializeProgram(program, getNumRegions)))
899
+
900
+ if options.font:
901
+ from fontTools.ttLib import TTFont
902
+
903
+ font = TTFont(options.font)
904
+ cff2 = font["CFF2"].cff.topDictIndex[0]
905
+ charstrings = cff2.CharStrings
906
+ for glyphName in charstrings.keys():
907
+ charstring = charstrings[glyphName]
908
+ charstring.decompile()
909
+ getNumRegions = charstring.private.getNumRegions
910
+ charstring.program = specializeProgram(
911
+ charstring.program, getNumRegions, maxstack=maxStackLimit
912
+ )
913
+
914
+ if options.output_file is None:
915
+ from fontTools.misc.cliTools import makeOutputFileName
916
+
917
+ outfile = makeOutputFileName(
918
+ options.font, overWrite=True, suffix=".specialized"
919
+ )
920
+ else:
921
+ outfile = options.output_file
922
+ if outfile:
923
+ print("Saving", outfile)
924
+ font.save(outfile)
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/transforms.py ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc.psCharStrings import (
2
+ SimpleT2Decompiler,
3
+ T2WidthExtractor,
4
+ calcSubrBias,
5
+ )
6
+
7
+
8
+ def _uniq_sort(l):
9
+ return sorted(set(l))
10
+
11
+
12
+ class StopHintCountEvent(Exception):
13
+ pass
14
+
15
+
16
+ class _DesubroutinizingT2Decompiler(SimpleT2Decompiler):
17
+ stop_hintcount_ops = (
18
+ "op_hintmask",
19
+ "op_cntrmask",
20
+ "op_rmoveto",
21
+ "op_hmoveto",
22
+ "op_vmoveto",
23
+ )
24
+
25
+ def __init__(self, localSubrs, globalSubrs, private=None):
26
+ SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
27
+
28
+ def execute(self, charString):
29
+ self.need_hintcount = True # until proven otherwise
30
+ for op_name in self.stop_hintcount_ops:
31
+ setattr(self, op_name, self.stop_hint_count)
32
+
33
+ if hasattr(charString, "_desubroutinized"):
34
+ # If a charstring has already been desubroutinized, we will still
35
+ # need to execute it if we need to count hints in order to
36
+ # compute the byte length for mask arguments, and haven't finished
37
+ # counting hints pairs.
38
+ if self.need_hintcount and self.callingStack:
39
+ try:
40
+ SimpleT2Decompiler.execute(self, charString)
41
+ except StopHintCountEvent:
42
+ del self.callingStack[-1]
43
+ return
44
+
45
+ charString._patches = []
46
+ SimpleT2Decompiler.execute(self, charString)
47
+ desubroutinized = charString.program[:]
48
+ for idx, expansion in reversed(charString._patches):
49
+ assert idx >= 2
50
+ assert desubroutinized[idx - 1] in [
51
+ "callsubr",
52
+ "callgsubr",
53
+ ], desubroutinized[idx - 1]
54
+ assert type(desubroutinized[idx - 2]) == int
55
+ if expansion[-1] == "return":
56
+ expansion = expansion[:-1]
57
+ desubroutinized[idx - 2 : idx] = expansion
58
+ if not self.private.in_cff2:
59
+ if "endchar" in desubroutinized:
60
+ # Cut off after first endchar
61
+ desubroutinized = desubroutinized[
62
+ : desubroutinized.index("endchar") + 1
63
+ ]
64
+
65
+ charString._desubroutinized = desubroutinized
66
+ del charString._patches
67
+
68
+ def op_callsubr(self, index):
69
+ subr = self.localSubrs[self.operandStack[-1] + self.localBias]
70
+ SimpleT2Decompiler.op_callsubr(self, index)
71
+ self.processSubr(index, subr)
72
+
73
+ def op_callgsubr(self, index):
74
+ subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
75
+ SimpleT2Decompiler.op_callgsubr(self, index)
76
+ self.processSubr(index, subr)
77
+
78
+ def stop_hint_count(self, *args):
79
+ self.need_hintcount = False
80
+ for op_name in self.stop_hintcount_ops:
81
+ setattr(self, op_name, None)
82
+ cs = self.callingStack[-1]
83
+ if hasattr(cs, "_desubroutinized"):
84
+ raise StopHintCountEvent()
85
+
86
+ def op_hintmask(self, index):
87
+ SimpleT2Decompiler.op_hintmask(self, index)
88
+ if self.need_hintcount:
89
+ self.stop_hint_count()
90
+
91
+ def processSubr(self, index, subr):
92
+ cs = self.callingStack[-1]
93
+ if not hasattr(cs, "_desubroutinized"):
94
+ cs._patches.append((index, subr._desubroutinized))
95
+
96
+
97
+ def desubroutinize(cff):
98
+ for fontName in cff.fontNames:
99
+ font = cff[fontName]
100
+ cs = font.CharStrings
101
+ for c in cs.values():
102
+ c.decompile()
103
+ subrs = getattr(c.private, "Subrs", [])
104
+ decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private)
105
+ decompiler.execute(c)
106
+ c.program = c._desubroutinized
107
+ del c._desubroutinized
108
+ # Delete all the local subrs
109
+ if hasattr(font, "FDArray"):
110
+ for fd in font.FDArray:
111
+ pd = fd.Private
112
+ if hasattr(pd, "Subrs"):
113
+ del pd.Subrs
114
+ if "Subrs" in pd.rawDict:
115
+ del pd.rawDict["Subrs"]
116
+ else:
117
+ pd = font.Private
118
+ if hasattr(pd, "Subrs"):
119
+ del pd.Subrs
120
+ if "Subrs" in pd.rawDict:
121
+ del pd.rawDict["Subrs"]
122
+ # as well as the global subrs
123
+ cff.GlobalSubrs.clear()
124
+
125
+
126
+ class _MarkingT2Decompiler(SimpleT2Decompiler):
127
+ def __init__(self, localSubrs, globalSubrs, private):
128
+ SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
129
+ for subrs in [localSubrs, globalSubrs]:
130
+ if subrs and not hasattr(subrs, "_used"):
131
+ subrs._used = set()
132
+
133
+ def op_callsubr(self, index):
134
+ self.localSubrs._used.add(self.operandStack[-1] + self.localBias)
135
+ SimpleT2Decompiler.op_callsubr(self, index)
136
+
137
+ def op_callgsubr(self, index):
138
+ self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
139
+ SimpleT2Decompiler.op_callgsubr(self, index)
140
+
141
+
142
+ class _DehintingT2Decompiler(T2WidthExtractor):
143
+ class Hints(object):
144
+ def __init__(self):
145
+ # Whether calling this charstring produces any hint stems
146
+ # Note that if a charstring starts with hintmask, it will
147
+ # have has_hint set to True, because it *might* produce an
148
+ # implicit vstem if called under certain conditions.
149
+ self.has_hint = False
150
+ # Index to start at to drop all hints
151
+ self.last_hint = 0
152
+ # Index up to which we know more hints are possible.
153
+ # Only relevant if status is 0 or 1.
154
+ self.last_checked = 0
155
+ # The status means:
156
+ # 0: after dropping hints, this charstring is empty
157
+ # 1: after dropping hints, there may be more hints
158
+ # continuing after this, or there might be
159
+ # other things. Not clear yet.
160
+ # 2: no more hints possible after this charstring
161
+ self.status = 0
162
+ # Has hintmask instructions; not recursive
163
+ self.has_hintmask = False
164
+ # List of indices of calls to empty subroutines to remove.
165
+ self.deletions = []
166
+
167
+ pass
168
+
169
+ def __init__(
170
+ self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
171
+ ):
172
+ self._css = css
173
+ T2WidthExtractor.__init__(
174
+ self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
175
+ )
176
+ self.private = private
177
+
178
+ def execute(self, charString):
179
+ old_hints = charString._hints if hasattr(charString, "_hints") else None
180
+ charString._hints = self.Hints()
181
+
182
+ T2WidthExtractor.execute(self, charString)
183
+
184
+ hints = charString._hints
185
+
186
+ if hints.has_hint or hints.has_hintmask:
187
+ self._css.add(charString)
188
+
189
+ if hints.status != 2:
190
+ # Check from last_check, make sure we didn't have any operators.
191
+ for i in range(hints.last_checked, len(charString.program) - 1):
192
+ if isinstance(charString.program[i], str):
193
+ hints.status = 2
194
+ break
195
+ else:
196
+ hints.status = 1 # There's *something* here
197
+ hints.last_checked = len(charString.program)
198
+
199
+ if old_hints:
200
+ assert hints.__dict__ == old_hints.__dict__
201
+
202
+ def op_callsubr(self, index):
203
+ subr = self.localSubrs[self.operandStack[-1] + self.localBias]
204
+ T2WidthExtractor.op_callsubr(self, index)
205
+ self.processSubr(index, subr)
206
+
207
+ def op_callgsubr(self, index):
208
+ subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
209
+ T2WidthExtractor.op_callgsubr(self, index)
210
+ self.processSubr(index, subr)
211
+
212
+ def op_hstem(self, index):
213
+ T2WidthExtractor.op_hstem(self, index)
214
+ self.processHint(index)
215
+
216
+ def op_vstem(self, index):
217
+ T2WidthExtractor.op_vstem(self, index)
218
+ self.processHint(index)
219
+
220
+ def op_hstemhm(self, index):
221
+ T2WidthExtractor.op_hstemhm(self, index)
222
+ self.processHint(index)
223
+
224
+ def op_vstemhm(self, index):
225
+ T2WidthExtractor.op_vstemhm(self, index)
226
+ self.processHint(index)
227
+
228
+ def op_hintmask(self, index):
229
+ rv = T2WidthExtractor.op_hintmask(self, index)
230
+ self.processHintmask(index)
231
+ return rv
232
+
233
+ def op_cntrmask(self, index):
234
+ rv = T2WidthExtractor.op_cntrmask(self, index)
235
+ self.processHintmask(index)
236
+ return rv
237
+
238
+ def processHintmask(self, index):
239
+ cs = self.callingStack[-1]
240
+ hints = cs._hints
241
+ hints.has_hintmask = True
242
+ if hints.status != 2:
243
+ # Check from last_check, see if we may be an implicit vstem
244
+ for i in range(hints.last_checked, index - 1):
245
+ if isinstance(cs.program[i], str):
246
+ hints.status = 2
247
+ break
248
+ else:
249
+ # We are an implicit vstem
250
+ hints.has_hint = True
251
+ hints.last_hint = index + 1
252
+ hints.status = 0
253
+ hints.last_checked = index + 1
254
+
255
+ def processHint(self, index):
256
+ cs = self.callingStack[-1]
257
+ hints = cs._hints
258
+ hints.has_hint = True
259
+ hints.last_hint = index
260
+ hints.last_checked = index
261
+
262
+ def processSubr(self, index, subr):
263
+ cs = self.callingStack[-1]
264
+ hints = cs._hints
265
+ subr_hints = subr._hints
266
+
267
+ # Check from last_check, make sure we didn't have
268
+ # any operators.
269
+ if hints.status != 2:
270
+ for i in range(hints.last_checked, index - 1):
271
+ if isinstance(cs.program[i], str):
272
+ hints.status = 2
273
+ break
274
+ hints.last_checked = index
275
+
276
+ if hints.status != 2:
277
+ if subr_hints.has_hint:
278
+ hints.has_hint = True
279
+
280
+ # Decide where to chop off from
281
+ if subr_hints.status == 0:
282
+ hints.last_hint = index
283
+ else:
284
+ hints.last_hint = index - 2 # Leave the subr call in
285
+
286
+ elif subr_hints.status == 0:
287
+ hints.deletions.append(index)
288
+
289
+ hints.status = max(hints.status, subr_hints.status)
290
+
291
+
292
+ def _cs_subset_subroutines(charstring, subrs, gsubrs):
293
+ p = charstring.program
294
+ for i in range(1, len(p)):
295
+ if p[i] == "callsubr":
296
+ assert isinstance(p[i - 1], int)
297
+ p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
298
+ elif p[i] == "callgsubr":
299
+ assert isinstance(p[i - 1], int)
300
+ p[i - 1] = (
301
+ gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
302
+ )
303
+
304
+
305
+ def _cs_drop_hints(charstring):
306
+ hints = charstring._hints
307
+
308
+ if hints.deletions:
309
+ p = charstring.program
310
+ for idx in reversed(hints.deletions):
311
+ del p[idx - 2 : idx]
312
+
313
+ if hints.has_hint:
314
+ assert not hints.deletions or hints.last_hint <= hints.deletions[0]
315
+ charstring.program = charstring.program[hints.last_hint :]
316
+ if not charstring.program:
317
+ # TODO CFF2 no need for endchar.
318
+ charstring.program.append("endchar")
319
+ if hasattr(charstring, "width"):
320
+ # Insert width back if needed
321
+ if charstring.width != charstring.private.defaultWidthX:
322
+ # For CFF2 charstrings, this should never happen
323
+ assert (
324
+ charstring.private.defaultWidthX is not None
325
+ ), "CFF2 CharStrings must not have an initial width value"
326
+ charstring.program.insert(
327
+ 0, charstring.width - charstring.private.nominalWidthX
328
+ )
329
+
330
+ if hints.has_hintmask:
331
+ i = 0
332
+ p = charstring.program
333
+ while i < len(p):
334
+ if p[i] in ["hintmask", "cntrmask"]:
335
+ assert i + 1 <= len(p)
336
+ del p[i : i + 2]
337
+ continue
338
+ i += 1
339
+
340
+ assert len(charstring.program)
341
+
342
+ del charstring._hints
343
+
344
+
345
+ def remove_hints(cff, *, removeUnusedSubrs: bool = True):
346
+ for fontname in cff.keys():
347
+ font = cff[fontname]
348
+ cs = font.CharStrings
349
+ # This can be tricky, but doesn't have to. What we do is:
350
+ #
351
+ # - Run all used glyph charstrings and recurse into subroutines,
352
+ # - For each charstring (including subroutines), if it has any
353
+ # of the hint stem operators, we mark it as such.
354
+ # Upon returning, for each charstring we note all the
355
+ # subroutine calls it makes that (recursively) contain a stem,
356
+ # - Dropping hinting then consists of the following two ops:
357
+ # * Drop the piece of the program in each charstring before the
358
+ # last call to a stem op or a stem-calling subroutine,
359
+ # * Drop all hintmask operations.
360
+ # - It's trickier... A hintmask right after hints and a few numbers
361
+ # will act as an implicit vstemhm. As such, we track whether
362
+ # we have seen any non-hint operators so far and do the right
363
+ # thing, recursively... Good luck understanding that :(
364
+ css = set()
365
+ for c in cs.values():
366
+ c.decompile()
367
+ subrs = getattr(c.private, "Subrs", [])
368
+ decompiler = _DehintingT2Decompiler(
369
+ css,
370
+ subrs,
371
+ c.globalSubrs,
372
+ c.private.nominalWidthX,
373
+ c.private.defaultWidthX,
374
+ c.private,
375
+ )
376
+ decompiler.execute(c)
377
+ c.width = decompiler.width
378
+ for charstring in css:
379
+ _cs_drop_hints(charstring)
380
+ del css
381
+
382
+ # Drop font-wide hinting values
383
+ all_privs = []
384
+ if hasattr(font, "FDArray"):
385
+ all_privs.extend(fd.Private for fd in font.FDArray)
386
+ else:
387
+ all_privs.append(font.Private)
388
+ for priv in all_privs:
389
+ for k in [
390
+ "BlueValues",
391
+ "OtherBlues",
392
+ "FamilyBlues",
393
+ "FamilyOtherBlues",
394
+ "BlueScale",
395
+ "BlueShift",
396
+ "BlueFuzz",
397
+ "StemSnapH",
398
+ "StemSnapV",
399
+ "StdHW",
400
+ "StdVW",
401
+ "ForceBold",
402
+ "LanguageGroup",
403
+ "ExpansionFactor",
404
+ ]:
405
+ if hasattr(priv, k):
406
+ setattr(priv, k, None)
407
+ if removeUnusedSubrs:
408
+ remove_unused_subroutines(cff)
409
+
410
+
411
+ def _pd_delete_empty_subrs(private_dict):
412
+ if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
413
+ if "Subrs" in private_dict.rawDict:
414
+ del private_dict.rawDict["Subrs"]
415
+ del private_dict.Subrs
416
+
417
+
418
+ def remove_unused_subroutines(cff):
419
+ for fontname in cff.keys():
420
+ font = cff[fontname]
421
+ cs = font.CharStrings
422
+ # Renumber subroutines to remove unused ones
423
+
424
+ # Mark all used subroutines
425
+ for c in cs.values():
426
+ subrs = getattr(c.private, "Subrs", [])
427
+ decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
428
+ decompiler.execute(c)
429
+
430
+ all_subrs = [font.GlobalSubrs]
431
+ if hasattr(font, "FDArray"):
432
+ all_subrs.extend(
433
+ fd.Private.Subrs
434
+ for fd in font.FDArray
435
+ if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
436
+ )
437
+ elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
438
+ all_subrs.append(font.Private.Subrs)
439
+
440
+ subrs = set(subrs) # Remove duplicates
441
+
442
+ # Prepare
443
+ for subrs in all_subrs:
444
+ if not hasattr(subrs, "_used"):
445
+ subrs._used = set()
446
+ subrs._used = _uniq_sort(subrs._used)
447
+ subrs._old_bias = calcSubrBias(subrs)
448
+ subrs._new_bias = calcSubrBias(subrs._used)
449
+
450
+ # Renumber glyph charstrings
451
+ for c in cs.values():
452
+ subrs = getattr(c.private, "Subrs", None)
453
+ _cs_subset_subroutines(c, subrs, font.GlobalSubrs)
454
+
455
+ # Renumber subroutines themselves
456
+ for subrs in all_subrs:
457
+ if subrs == font.GlobalSubrs:
458
+ if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
459
+ local_subrs = font.Private.Subrs
460
+ elif (
461
+ hasattr(font, "FDArray")
462
+ and len(font.FDArray) == 1
463
+ and hasattr(font.FDArray[0].Private, "Subrs")
464
+ ):
465
+ # Technically we shouldn't do this. But I've run into fonts that do it.
466
+ local_subrs = font.FDArray[0].Private.Subrs
467
+ else:
468
+ local_subrs = None
469
+ else:
470
+ local_subrs = subrs
471
+
472
+ subrs.items = [subrs.items[i] for i in subrs._used]
473
+ if hasattr(subrs, "file"):
474
+ del subrs.file
475
+ if hasattr(subrs, "offsets"):
476
+ del subrs.offsets
477
+
478
+ for subr in subrs.items:
479
+ _cs_subset_subroutines(subr, local_subrs, font.GlobalSubrs)
480
+
481
+ # Delete local SubrsIndex if empty
482
+ if hasattr(font, "FDArray"):
483
+ for fd in font.FDArray:
484
+ _pd_delete_empty_subrs(fd.Private)
485
+ else:
486
+ _pd_delete_empty_subrs(font.Private)
487
+
488
+ # Cleanup
489
+ for subrs in all_subrs:
490
+ del subrs._used, subrs._old_bias, subrs._new_bias
infer_4_47_1/lib/python3.10/site-packages/fontTools/cffLib/width.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ """T2CharString glyph width optimizer.
4
+
5
+ CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX``
6
+ value do not need to specify their width in their charstring, saving bytes.
7
+ This module determines the optimum ``defaultWidthX`` and ``nominalWidthX``
8
+ values for a font, when provided with a list of glyph widths."""
9
+
10
+ from fontTools.ttLib import TTFont
11
+ from collections import defaultdict
12
+ from operator import add
13
+ from functools import reduce
14
+
15
+
16
+ __all__ = ["optimizeWidths", "main"]
17
+
18
+
19
+ class missingdict(dict):
20
+ def __init__(self, missing_func):
21
+ self.missing_func = missing_func
22
+
23
+ def __missing__(self, v):
24
+ return self.missing_func(v)
25
+
26
+
27
+ def cumSum(f, op=add, start=0, decreasing=False):
28
+ keys = sorted(f.keys())
29
+ minx, maxx = keys[0], keys[-1]
30
+
31
+ total = reduce(op, f.values(), start)
32
+
33
+ if decreasing:
34
+ missing = lambda x: start if x > maxx else total
35
+ domain = range(maxx, minx - 1, -1)
36
+ else:
37
+ missing = lambda x: start if x < minx else total
38
+ domain = range(minx, maxx + 1)
39
+
40
+ out = missingdict(missing)
41
+
42
+ v = start
43
+ for x in domain:
44
+ v = op(v, f[x])
45
+ out[x] = v
46
+
47
+ return out
48
+
49
+
50
+ def byteCost(widths, default, nominal):
51
+ if not hasattr(widths, "items"):
52
+ d = defaultdict(int)
53
+ for w in widths:
54
+ d[w] += 1
55
+ widths = d
56
+
57
+ cost = 0
58
+ for w, freq in widths.items():
59
+ if w == default:
60
+ continue
61
+ diff = abs(w - nominal)
62
+ if diff <= 107:
63
+ cost += freq
64
+ elif diff <= 1131:
65
+ cost += freq * 2
66
+ else:
67
+ cost += freq * 5
68
+ return cost
69
+
70
+
71
+ def optimizeWidthsBruteforce(widths):
72
+ """Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
73
+
74
+ d = defaultdict(int)
75
+ for w in widths:
76
+ d[w] += 1
77
+
78
+ # Maximum number of bytes using default can possibly save
79
+ maxDefaultAdvantage = 5 * max(d.values())
80
+
81
+ minw, maxw = min(widths), max(widths)
82
+ domain = list(range(minw, maxw + 1))
83
+
84
+ bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
85
+
86
+ bestCost = len(widths) * 5 + 1
87
+ for nominal in domain:
88
+ if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
89
+ continue
90
+ for default in domain:
91
+ cost = byteCost(widths, default, nominal)
92
+ if cost < bestCost:
93
+ bestCost = cost
94
+ bestDefault = default
95
+ bestNominal = nominal
96
+
97
+ return bestDefault, bestNominal
98
+
99
+
100
+ def optimizeWidths(widths):
101
+ """Given a list of glyph widths, or dictionary mapping glyph width to number of
102
+ glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
103
+
104
+ This algorithm is linear in UPEM+numGlyphs."""
105
+
106
+ if not hasattr(widths, "items"):
107
+ d = defaultdict(int)
108
+ for w in widths:
109
+ d[w] += 1
110
+ widths = d
111
+
112
+ keys = sorted(widths.keys())
113
+ minw, maxw = keys[0], keys[-1]
114
+ domain = list(range(minw, maxw + 1))
115
+
116
+ # Cumulative sum/max forward/backward.
117
+ cumFrqU = cumSum(widths, op=add)
118
+ cumMaxU = cumSum(widths, op=max)
119
+ cumFrqD = cumSum(widths, op=add, decreasing=True)
120
+ cumMaxD = cumSum(widths, op=max, decreasing=True)
121
+
122
+ # Cost per nominal choice, without default consideration.
123
+ nomnCostU = missingdict(
124
+ lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
125
+ )
126
+ nomnCostD = missingdict(
127
+ lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
128
+ )
129
+ nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
130
+
131
+ # Cost-saving per nominal choice, by best default choice.
132
+ dfltCostU = missingdict(
133
+ lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
134
+ )
135
+ dfltCostD = missingdict(
136
+ lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
137
+ )
138
+ dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
139
+
140
+ # Combined cost per nominal choice.
141
+ bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
142
+
143
+ # Best nominal.
144
+ nominal = min(domain, key=lambda x: bestCost[x])
145
+
146
+ # Work back the best default.
147
+ bestC = bestCost[nominal]
148
+ dfltC = nomnCost[nominal] - bestCost[nominal]
149
+ ends = []
150
+ if dfltC == dfltCostU[nominal]:
151
+ starts = [nominal, nominal - 108, nominal - 1132]
152
+ for start in starts:
153
+ while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
154
+ start -= 1
155
+ ends.append(start)
156
+ else:
157
+ starts = [nominal, nominal + 108, nominal + 1132]
158
+ for start in starts:
159
+ while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
160
+ start += 1
161
+ ends.append(start)
162
+ default = min(ends, key=lambda default: byteCost(widths, default, nominal))
163
+
164
+ return default, nominal
165
+
166
+
167
+ def main(args=None):
168
+ """Calculate optimum defaultWidthX/nominalWidthX values"""
169
+
170
+ import argparse
171
+
172
+ parser = argparse.ArgumentParser(
173
+ "fonttools cffLib.width",
174
+ description=main.__doc__,
175
+ )
176
+ parser.add_argument(
177
+ "inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
178
+ )
179
+ parser.add_argument(
180
+ "-b",
181
+ "--brute-force",
182
+ dest="brute",
183
+ action="store_true",
184
+ help="Use brute-force approach (VERY slow)",
185
+ )
186
+
187
+ args = parser.parse_args(args)
188
+
189
+ for fontfile in args.inputs:
190
+ font = TTFont(fontfile)
191
+ hmtx = font["hmtx"]
192
+ widths = [m[0] for m in hmtx.metrics.values()]
193
+ if args.brute:
194
+ default, nominal = optimizeWidthsBruteforce(widths)
195
+ else:
196
+ default, nominal = optimizeWidths(widths)
197
+ print(
198
+ "glyphs=%d default=%d nominal=%d byteCost=%d"
199
+ % (len(widths), default, nominal, byteCost(widths, default, nominal))
200
+ )
201
+
202
+
203
+ if __name__ == "__main__":
204
+ import sys
205
+
206
+ if len(sys.argv) == 1:
207
+ import doctest
208
+
209
+ sys.exit(doctest.testmod().failed)
210
+ main()
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 Google Inc. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .cu2qu import *
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__main__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import sys
2
+ from .cli import _main as main
3
+
4
+
5
+ if __name__ == "__main__":
6
+ sys.exit(main())
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (288 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/benchmark.cpython-310.pyc ADDED
Binary file (2.26 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/cli.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/cu2qu.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/errors.cpython-310.pyc ADDED
Binary file (3.59 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/__pycache__/ufo.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/benchmark.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Benchmark the cu2qu algorithm performance."""
2
+
3
+ from .cu2qu import *
4
+ import random
5
+ import timeit
6
+
7
+ MAX_ERR = 0.05
8
+
9
+
10
+ def generate_curve():
11
+ return [
12
+ tuple(float(random.randint(0, 2048)) for coord in range(2))
13
+ for point in range(4)
14
+ ]
15
+
16
+
17
+ def setup_curve_to_quadratic():
18
+ return generate_curve(), MAX_ERR
19
+
20
+
21
+ def setup_curves_to_quadratic():
22
+ num_curves = 3
23
+ return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves)
24
+
25
+
26
+ def run_benchmark(module, function, setup_suffix="", repeat=5, number=1000):
27
+ setup_func = "setup_" + function
28
+ if setup_suffix:
29
+ print("%s with %s:" % (function, setup_suffix), end="")
30
+ setup_func += "_" + setup_suffix
31
+ else:
32
+ print("%s:" % function, end="")
33
+
34
+ def wrapper(function, setup_func):
35
+ function = globals()[function]
36
+ setup_func = globals()[setup_func]
37
+
38
+ def wrapped():
39
+ return function(*setup_func())
40
+
41
+ return wrapped
42
+
43
+ results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
44
+ print("\t%5.1fus" % (min(results) * 1000000.0 / number))
45
+
46
+
47
+ def main():
48
+ run_benchmark("cu2qu", "curve_to_quadratic")
49
+ run_benchmark("cu2qu", "curves_to_quadratic")
50
+
51
+
52
+ if __name__ == "__main__":
53
+ random.seed(1)
54
+ main()
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/cli.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import argparse
3
+ import logging
4
+ import shutil
5
+ import multiprocessing as mp
6
+ from contextlib import closing
7
+ from functools import partial
8
+
9
+ import fontTools
10
+ from .ufo import font_to_quadratic, fonts_to_quadratic
11
+
12
+ ufo_module = None
13
+ try:
14
+ import ufoLib2 as ufo_module
15
+ except ImportError:
16
+ try:
17
+ import defcon as ufo_module
18
+ except ImportError as e:
19
+ pass
20
+
21
+
22
+ logger = logging.getLogger("fontTools.cu2qu")
23
+
24
+
25
+ def _cpu_count():
26
+ try:
27
+ return mp.cpu_count()
28
+ except NotImplementedError: # pragma: no cover
29
+ return 1
30
+
31
+
32
+ def open_ufo(path):
33
+ if hasattr(ufo_module.Font, "open"): # ufoLib2
34
+ return ufo_module.Font.open(path)
35
+ return ufo_module.Font(path) # defcon
36
+
37
+
38
+ def _font_to_quadratic(input_path, output_path=None, **kwargs):
39
+ ufo = open_ufo(input_path)
40
+ logger.info("Converting curves for %s", input_path)
41
+ if font_to_quadratic(ufo, **kwargs):
42
+ logger.info("Saving %s", output_path)
43
+ if output_path:
44
+ ufo.save(output_path)
45
+ else:
46
+ ufo.save() # save in-place
47
+ elif output_path:
48
+ _copytree(input_path, output_path)
49
+
50
+
51
+ def _samepath(path1, path2):
52
+ # TODO on python3+, there's os.path.samefile
53
+ path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1)))
54
+ path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2)))
55
+ return path1 == path2
56
+
57
+
58
+ def _copytree(input_path, output_path):
59
+ if _samepath(input_path, output_path):
60
+ logger.debug("input and output paths are the same file; skipped copy")
61
+ return
62
+ if os.path.exists(output_path):
63
+ shutil.rmtree(output_path)
64
+ shutil.copytree(input_path, output_path)
65
+
66
+
67
+ def _main(args=None):
68
+ """Convert a UFO font from cubic to quadratic curves"""
69
+ parser = argparse.ArgumentParser(prog="cu2qu")
70
+ parser.add_argument("--version", action="version", version=fontTools.__version__)
71
+ parser.add_argument(
72
+ "infiles",
73
+ nargs="+",
74
+ metavar="INPUT",
75
+ help="one or more input UFO source file(s).",
76
+ )
77
+ parser.add_argument("-v", "--verbose", action="count", default=0)
78
+ parser.add_argument(
79
+ "-e",
80
+ "--conversion-error",
81
+ type=float,
82
+ metavar="ERROR",
83
+ default=None,
84
+ help="maxiumum approximation error measured in EM (default: 0.001)",
85
+ )
86
+ parser.add_argument(
87
+ "-m",
88
+ "--mixed",
89
+ default=False,
90
+ action="store_true",
91
+ help="whether to used mixed quadratic and cubic curves",
92
+ )
93
+ parser.add_argument(
94
+ "--keep-direction",
95
+ dest="reverse_direction",
96
+ action="store_false",
97
+ help="do not reverse the contour direction",
98
+ )
99
+
100
+ mode_parser = parser.add_mutually_exclusive_group()
101
+ mode_parser.add_argument(
102
+ "-i",
103
+ "--interpolatable",
104
+ action="store_true",
105
+ help="whether curve conversion should keep interpolation compatibility",
106
+ )
107
+ mode_parser.add_argument(
108
+ "-j",
109
+ "--jobs",
110
+ type=int,
111
+ nargs="?",
112
+ default=1,
113
+ const=_cpu_count(),
114
+ metavar="N",
115
+ help="Convert using N multiple processes (default: %(default)s)",
116
+ )
117
+
118
+ output_parser = parser.add_mutually_exclusive_group()
119
+ output_parser.add_argument(
120
+ "-o",
121
+ "--output-file",
122
+ default=None,
123
+ metavar="OUTPUT",
124
+ help=(
125
+ "output filename for the converted UFO. By default fonts are "
126
+ "modified in place. This only works with a single input."
127
+ ),
128
+ )
129
+ output_parser.add_argument(
130
+ "-d",
131
+ "--output-dir",
132
+ default=None,
133
+ metavar="DIRECTORY",
134
+ help="output directory where to save converted UFOs",
135
+ )
136
+
137
+ options = parser.parse_args(args)
138
+
139
+ if ufo_module is None:
140
+ parser.error("Either ufoLib2 or defcon are required to run this script.")
141
+
142
+ if not options.verbose:
143
+ level = "WARNING"
144
+ elif options.verbose == 1:
145
+ level = "INFO"
146
+ else:
147
+ level = "DEBUG"
148
+ logging.basicConfig(level=level)
149
+
150
+ if len(options.infiles) > 1 and options.output_file:
151
+ parser.error("-o/--output-file can't be used with multile inputs")
152
+
153
+ if options.output_dir:
154
+ output_dir = options.output_dir
155
+ if not os.path.exists(output_dir):
156
+ os.mkdir(output_dir)
157
+ elif not os.path.isdir(output_dir):
158
+ parser.error("'%s' is not a directory" % output_dir)
159
+ output_paths = [
160
+ os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
161
+ ]
162
+ elif options.output_file:
163
+ output_paths = [options.output_file]
164
+ else:
165
+ # save in-place
166
+ output_paths = [None] * len(options.infiles)
167
+
168
+ kwargs = dict(
169
+ dump_stats=options.verbose > 0,
170
+ max_err_em=options.conversion_error,
171
+ reverse_direction=options.reverse_direction,
172
+ all_quadratic=False if options.mixed else True,
173
+ )
174
+
175
+ if options.interpolatable:
176
+ logger.info("Converting curves compatibly")
177
+ ufos = [open_ufo(infile) for infile in options.infiles]
178
+ if fonts_to_quadratic(ufos, **kwargs):
179
+ for ufo, output_path in zip(ufos, output_paths):
180
+ logger.info("Saving %s", output_path)
181
+ if output_path:
182
+ ufo.save(output_path)
183
+ else:
184
+ ufo.save()
185
+ else:
186
+ for input_path, output_path in zip(options.infiles, output_paths):
187
+ if output_path:
188
+ _copytree(input_path, output_path)
189
+ else:
190
+ jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
191
+ if jobs > 1:
192
+ func = partial(_font_to_quadratic, **kwargs)
193
+ logger.info("Running %d parallel processes", jobs)
194
+ with closing(mp.Pool(jobs)) as pool:
195
+ pool.starmap(func, zip(options.infiles, output_paths))
196
+ else:
197
+ for input_path, output_path in zip(options.infiles, output_paths):
198
+ _font_to_quadratic(input_path, output_path, **kwargs)
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/cu2qu.c ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/cu2qu.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+ # distutils: define_macros=CYTHON_TRACE_NOGIL=1
3
+
4
+ # Copyright 2015 Google Inc. All Rights Reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ try:
19
+ import cython
20
+ except (AttributeError, ImportError):
21
+ # if cython not installed, use mock module with no-op decorators and types
22
+ from fontTools.misc import cython
23
+ COMPILED = cython.compiled
24
+
25
+ import math
26
+
27
+ from .errors import Error as Cu2QuError, ApproxNotFoundError
28
+
29
+
30
+ __all__ = ["curve_to_quadratic", "curves_to_quadratic"]
31
+
32
+ MAX_N = 100
33
+
34
+ NAN = float("NaN")
35
+
36
+
37
+ @cython.cfunc
38
+ @cython.inline
39
+ @cython.returns(cython.double)
40
+ @cython.locals(v1=cython.complex, v2=cython.complex)
41
+ def dot(v1, v2):
42
+ """Return the dot product of two vectors.
43
+
44
+ Args:
45
+ v1 (complex): First vector.
46
+ v2 (complex): Second vector.
47
+
48
+ Returns:
49
+ double: Dot product.
50
+ """
51
+ return (v1 * v2.conjugate()).real
52
+
53
+
54
+ @cython.cfunc
55
+ @cython.inline
56
+ @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
57
+ @cython.locals(
58
+ _1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex
59
+ )
60
+ def calc_cubic_points(a, b, c, d):
61
+ _1 = d
62
+ _2 = (c / 3.0) + d
63
+ _3 = (b + c) / 3.0 + _2
64
+ _4 = a + d + c + b
65
+ return _1, _2, _3, _4
66
+
67
+
68
+ @cython.cfunc
69
+ @cython.inline
70
+ @cython.locals(
71
+ p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
72
+ )
73
+ @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
74
+ def calc_cubic_parameters(p0, p1, p2, p3):
75
+ c = (p1 - p0) * 3.0
76
+ b = (p2 - p1) * 3.0 - c
77
+ d = p0
78
+ a = p3 - d - c - b
79
+ return a, b, c, d
80
+
81
+
82
+ @cython.cfunc
83
+ @cython.inline
84
+ @cython.locals(
85
+ p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
86
+ )
87
+ def split_cubic_into_n_iter(p0, p1, p2, p3, n):
88
+ """Split a cubic Bezier into n equal parts.
89
+
90
+ Splits the curve into `n` equal parts by curve time.
91
+ (t=0..1/n, t=1/n..2/n, ...)
92
+
93
+ Args:
94
+ p0 (complex): Start point of curve.
95
+ p1 (complex): First handle of curve.
96
+ p2 (complex): Second handle of curve.
97
+ p3 (complex): End point of curve.
98
+
99
+ Returns:
100
+ An iterator yielding the control points (four complex values) of the
101
+ subcurves.
102
+ """
103
+ # Hand-coded special-cases
104
+ if n == 2:
105
+ return iter(split_cubic_into_two(p0, p1, p2, p3))
106
+ if n == 3:
107
+ return iter(split_cubic_into_three(p0, p1, p2, p3))
108
+ if n == 4:
109
+ a, b = split_cubic_into_two(p0, p1, p2, p3)
110
+ return iter(
111
+ split_cubic_into_two(a[0], a[1], a[2], a[3])
112
+ + split_cubic_into_two(b[0], b[1], b[2], b[3])
113
+ )
114
+ if n == 6:
115
+ a, b = split_cubic_into_two(p0, p1, p2, p3)
116
+ return iter(
117
+ split_cubic_into_three(a[0], a[1], a[2], a[3])
118
+ + split_cubic_into_three(b[0], b[1], b[2], b[3])
119
+ )
120
+
121
+ return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
122
+
123
+
124
+ @cython.locals(
125
+ p0=cython.complex,
126
+ p1=cython.complex,
127
+ p2=cython.complex,
128
+ p3=cython.complex,
129
+ n=cython.int,
130
+ )
131
+ @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
132
+ @cython.locals(
133
+ dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int
134
+ )
135
+ @cython.locals(
136
+ a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex
137
+ )
138
+ def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
139
+ a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
140
+ dt = 1 / n
141
+ delta_2 = dt * dt
142
+ delta_3 = dt * delta_2
143
+ for i in range(n):
144
+ t1 = i * dt
145
+ t1_2 = t1 * t1
146
+ # calc new a, b, c and d
147
+ a1 = a * delta_3
148
+ b1 = (3 * a * t1 + b) * delta_2
149
+ c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt
150
+ d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d
151
+ yield calc_cubic_points(a1, b1, c1, d1)
152
+
153
+
154
+ @cython.cfunc
155
+ @cython.inline
156
+ @cython.locals(
157
+ p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
158
+ )
159
+ @cython.locals(mid=cython.complex, deriv3=cython.complex)
160
+ def split_cubic_into_two(p0, p1, p2, p3):
161
+ """Split a cubic Bezier into two equal parts.
162
+
163
+ Splits the curve into two equal parts at t = 0.5
164
+
165
+ Args:
166
+ p0 (complex): Start point of curve.
167
+ p1 (complex): First handle of curve.
168
+ p2 (complex): Second handle of curve.
169
+ p3 (complex): End point of curve.
170
+
171
+ Returns:
172
+ tuple: Two cubic Beziers (each expressed as a tuple of four complex
173
+ values).
174
+ """
175
+ mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
176
+ deriv3 = (p3 + p2 - p1 - p0) * 0.125
177
+ return (
178
+ (p0, (p0 + p1) * 0.5, mid - deriv3, mid),
179
+ (mid, mid + deriv3, (p2 + p3) * 0.5, p3),
180
+ )
181
+
182
+
183
+ @cython.cfunc
184
+ @cython.inline
185
+ @cython.locals(
186
+ p0=cython.complex,
187
+ p1=cython.complex,
188
+ p2=cython.complex,
189
+ p3=cython.complex,
190
+ )
191
+ @cython.locals(
192
+ mid1=cython.complex,
193
+ deriv1=cython.complex,
194
+ mid2=cython.complex,
195
+ deriv2=cython.complex,
196
+ )
197
+ def split_cubic_into_three(p0, p1, p2, p3):
198
+ """Split a cubic Bezier into three equal parts.
199
+
200
+ Splits the curve into three equal parts at t = 1/3 and t = 2/3
201
+
202
+ Args:
203
+ p0 (complex): Start point of curve.
204
+ p1 (complex): First handle of curve.
205
+ p2 (complex): Second handle of curve.
206
+ p3 (complex): End point of curve.
207
+
208
+ Returns:
209
+ tuple: Three cubic Beziers (each expressed as a tuple of four complex
210
+ values).
211
+ """
212
+ mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27)
213
+ deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27)
214
+ mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27)
215
+ deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27)
216
+ return (
217
+ (p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1),
218
+ (mid1, mid1 + deriv1, mid2 - deriv2, mid2),
219
+ (mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3),
220
+ )
221
+
222
+
223
+ @cython.cfunc
224
+ @cython.inline
225
+ @cython.returns(cython.complex)
226
+ @cython.locals(
227
+ t=cython.double,
228
+ p0=cython.complex,
229
+ p1=cython.complex,
230
+ p2=cython.complex,
231
+ p3=cython.complex,
232
+ )
233
+ @cython.locals(_p1=cython.complex, _p2=cython.complex)
234
+ def cubic_approx_control(t, p0, p1, p2, p3):
235
+ """Approximate a cubic Bezier using a quadratic one.
236
+
237
+ Args:
238
+ t (double): Position of control point.
239
+ p0 (complex): Start point of curve.
240
+ p1 (complex): First handle of curve.
241
+ p2 (complex): Second handle of curve.
242
+ p3 (complex): End point of curve.
243
+
244
+ Returns:
245
+ complex: Location of candidate control point on quadratic curve.
246
+ """
247
+ _p1 = p0 + (p1 - p0) * 1.5
248
+ _p2 = p3 + (p2 - p3) * 1.5
249
+ return _p1 + (_p2 - _p1) * t
250
+
251
+
252
+ @cython.cfunc
253
+ @cython.inline
254
+ @cython.returns(cython.complex)
255
+ @cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
256
+ @cython.locals(ab=cython.complex, cd=cython.complex, p=cython.complex, h=cython.double)
257
+ def calc_intersect(a, b, c, d):
258
+ """Calculate the intersection of two lines.
259
+
260
+ Args:
261
+ a (complex): Start point of first line.
262
+ b (complex): End point of first line.
263
+ c (complex): Start point of second line.
264
+ d (complex): End point of second line.
265
+
266
+ Returns:
267
+ complex: Location of intersection if one present, ``complex(NaN,NaN)``
268
+ if no intersection was found.
269
+ """
270
+ ab = b - a
271
+ cd = d - c
272
+ p = ab * 1j
273
+ try:
274
+ h = dot(p, a - c) / dot(p, cd)
275
+ except ZeroDivisionError:
276
+ return complex(NAN, NAN)
277
+ return c + cd * h
278
+
279
+
280
+ @cython.cfunc
281
+ @cython.returns(cython.int)
282
+ @cython.locals(
283
+ tolerance=cython.double,
284
+ p0=cython.complex,
285
+ p1=cython.complex,
286
+ p2=cython.complex,
287
+ p3=cython.complex,
288
+ )
289
+ @cython.locals(mid=cython.complex, deriv3=cython.complex)
290
+ def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
291
+ """Check if a cubic Bezier lies within a given distance of the origin.
292
+
293
+ "Origin" means *the* origin (0,0), not the start of the curve. Note that no
294
+ checks are made on the start and end positions of the curve; this function
295
+ only checks the inside of the curve.
296
+
297
+ Args:
298
+ p0 (complex): Start point of curve.
299
+ p1 (complex): First handle of curve.
300
+ p2 (complex): Second handle of curve.
301
+ p3 (complex): End point of curve.
302
+ tolerance (double): Distance from origin.
303
+
304
+ Returns:
305
+ bool: True if the cubic Bezier ``p`` entirely lies within a distance
306
+ ``tolerance`` of the origin, False otherwise.
307
+ """
308
+ # First check p2 then p1, as p2 has higher error early on.
309
+ if abs(p2) <= tolerance and abs(p1) <= tolerance:
310
+ return True
311
+
312
+ # Split.
313
+ mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
314
+ if abs(mid) > tolerance:
315
+ return False
316
+ deriv3 = (p3 + p2 - p1 - p0) * 0.125
317
+ return cubic_farthest_fit_inside(
318
+ p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
319
+ ) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
320
+
321
+
322
+ @cython.cfunc
323
+ @cython.inline
324
+ @cython.locals(tolerance=cython.double)
325
+ @cython.locals(
326
+ q1=cython.complex,
327
+ c0=cython.complex,
328
+ c1=cython.complex,
329
+ c2=cython.complex,
330
+ c3=cython.complex,
331
+ )
332
+ def cubic_approx_quadratic(cubic, tolerance):
333
+ """Approximate a cubic Bezier with a single quadratic within a given tolerance.
334
+
335
+ Args:
336
+ cubic (sequence): Four complex numbers representing control points of
337
+ the cubic Bezier curve.
338
+ tolerance (double): Permitted deviation from the original curve.
339
+
340
+ Returns:
341
+ Three complex numbers representing control points of the quadratic
342
+ curve if it fits within the given tolerance, or ``None`` if no suitable
343
+ curve could be calculated.
344
+ """
345
+
346
+ q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3])
347
+ if math.isnan(q1.imag):
348
+ return None
349
+ c0 = cubic[0]
350
+ c3 = cubic[3]
351
+ c1 = c0 + (q1 - c0) * (2 / 3)
352
+ c2 = c3 + (q1 - c3) * (2 / 3)
353
+ if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance):
354
+ return None
355
+ return c0, q1, c3
356
+
357
+
358
+ @cython.cfunc
359
+ @cython.locals(n=cython.int, tolerance=cython.double)
360
+ @cython.locals(i=cython.int)
361
+ @cython.locals(all_quadratic=cython.int)
362
+ @cython.locals(
363
+ c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex
364
+ )
365
+ @cython.locals(
366
+ q0=cython.complex,
367
+ q1=cython.complex,
368
+ next_q1=cython.complex,
369
+ q2=cython.complex,
370
+ d1=cython.complex,
371
+ )
372
+ def cubic_approx_spline(cubic, n, tolerance, all_quadratic):
373
+ """Approximate a cubic Bezier curve with a spline of n quadratics.
374
+
375
+ Args:
376
+ cubic (sequence): Four complex numbers representing control points of
377
+ the cubic Bezier curve.
378
+ n (int): Number of quadratic Bezier curves in the spline.
379
+ tolerance (double): Permitted deviation from the original curve.
380
+
381
+ Returns:
382
+ A list of ``n+2`` complex numbers, representing control points of the
383
+ quadratic spline if it fits within the given tolerance, or ``None`` if
384
+ no suitable spline could be calculated.
385
+ """
386
+
387
+ if n == 1:
388
+ return cubic_approx_quadratic(cubic, tolerance)
389
+ if n == 2 and all_quadratic == False:
390
+ return cubic
391
+
392
+ cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n)
393
+
394
+ # calculate the spline of quadratics and check errors at the same time.
395
+ next_cubic = next(cubics)
396
+ next_q1 = cubic_approx_control(
397
+ 0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
398
+ )
399
+ q2 = cubic[0]
400
+ d1 = 0j
401
+ spline = [cubic[0], next_q1]
402
+ for i in range(1, n + 1):
403
+ # Current cubic to convert
404
+ c0, c1, c2, c3 = next_cubic
405
+
406
+ # Current quadratic approximation of current cubic
407
+ q0 = q2
408
+ q1 = next_q1
409
+ if i < n:
410
+ next_cubic = next(cubics)
411
+ next_q1 = cubic_approx_control(
412
+ i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
413
+ )
414
+ spline.append(next_q1)
415
+ q2 = (q1 + next_q1) * 0.5
416
+ else:
417
+ q2 = c3
418
+
419
+ # End-point deltas
420
+ d0 = d1
421
+ d1 = q2 - c3
422
+
423
+ if abs(d1) > tolerance or not cubic_farthest_fit_inside(
424
+ d0,
425
+ q0 + (q1 - q0) * (2 / 3) - c1,
426
+ q2 + (q1 - q2) * (2 / 3) - c2,
427
+ d1,
428
+ tolerance,
429
+ ):
430
+ return None
431
+ spline.append(cubic[3])
432
+
433
+ return spline
434
+
435
+
436
+ @cython.locals(max_err=cython.double)
437
+ @cython.locals(n=cython.int)
438
+ @cython.locals(all_quadratic=cython.int)
439
+ def curve_to_quadratic(curve, max_err, all_quadratic=True):
440
+ """Approximate a cubic Bezier curve with a spline of n quadratics.
441
+
442
+ Args:
443
+ cubic (sequence): Four 2D tuples representing control points of
444
+ the cubic Bezier curve.
445
+ max_err (double): Permitted deviation from the original curve.
446
+ all_quadratic (bool): If True (default) returned value is a
447
+ quadratic spline. If False, it's either a single quadratic
448
+ curve or a single cubic curve.
449
+
450
+ Returns:
451
+ If all_quadratic is True: A list of 2D tuples, representing
452
+ control points of the quadratic spline if it fits within the
453
+ given tolerance, or ``None`` if no suitable spline could be
454
+ calculated.
455
+
456
+ If all_quadratic is False: Either a quadratic curve (if length
457
+ of output is 3), or a cubic curve (if length of output is 4).
458
+ """
459
+
460
+ curve = [complex(*p) for p in curve]
461
+
462
+ for n in range(1, MAX_N + 1):
463
+ spline = cubic_approx_spline(curve, n, max_err, all_quadratic)
464
+ if spline is not None:
465
+ # done. go home
466
+ return [(s.real, s.imag) for s in spline]
467
+
468
+ raise ApproxNotFoundError(curve)
469
+
470
+
471
+ @cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
472
+ @cython.locals(all_quadratic=cython.int)
473
+ def curves_to_quadratic(curves, max_errors, all_quadratic=True):
474
+ """Return quadratic Bezier splines approximating the input cubic Beziers.
475
+
476
+ Args:
477
+ curves: A sequence of *n* curves, each curve being a sequence of four
478
+ 2D tuples.
479
+ max_errors: A sequence of *n* floats representing the maximum permissible
480
+ deviation from each of the cubic Bezier curves.
481
+ all_quadratic (bool): If True (default) returned values are a
482
+ quadratic spline. If False, they are either a single quadratic
483
+ curve or a single cubic curve.
484
+
485
+ Example::
486
+
487
+ >>> curves_to_quadratic( [
488
+ ... [ (50,50), (100,100), (150,100), (200,50) ],
489
+ ... [ (75,50), (120,100), (150,75), (200,60) ]
490
+ ... ], [1,1] )
491
+ [[(50.0, 50.0), (75.0, 75.0), (125.0, 91.66666666666666), (175.0, 75.0), (200.0, 50.0)], [(75.0, 50.0), (97.5, 75.0), (135.41666666666666, 82.08333333333333), (175.0, 67.5), (200.0, 60.0)]]
492
+
493
+ The returned splines have "implied oncurve points" suitable for use in
494
+ TrueType ``glif`` outlines - i.e. in the first spline returned above,
495
+ the first quadratic segment runs from (50,50) to
496
+ ( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...).
497
+
498
+ Returns:
499
+ If all_quadratic is True, a list of splines, each spline being a list
500
+ of 2D tuples.
501
+
502
+ If all_quadratic is False, a list of curves, each curve being a quadratic
503
+ (length 3), or cubic (length 4).
504
+
505
+ Raises:
506
+ fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation
507
+ can be found for all curves with the given parameters.
508
+ """
509
+
510
+ curves = [[complex(*p) for p in curve] for curve in curves]
511
+ assert len(max_errors) == len(curves)
512
+
513
+ l = len(curves)
514
+ splines = [None] * l
515
+ last_i = i = 0
516
+ n = 1
517
+ while True:
518
+ spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic)
519
+ if spline is None:
520
+ if n == MAX_N:
521
+ break
522
+ n += 1
523
+ last_i = i
524
+ continue
525
+ splines[i] = spline
526
+ i = (i + 1) % l
527
+ if i == last_i:
528
+ # done. go home
529
+ return [[(s.real, s.imag) for s in spline] for spline in splines]
530
+
531
+ raise ApproxNotFoundError(curves)
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/errors.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2016 Google Inc. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ class Error(Exception):
17
+ """Base Cu2Qu exception class for all other errors."""
18
+
19
+
20
+ class ApproxNotFoundError(Error):
21
+ def __init__(self, curve):
22
+ message = "no approximation found: %s" % curve
23
+ super().__init__(message)
24
+ self.curve = curve
25
+
26
+
27
+ class UnequalZipLengthsError(Error):
28
+ pass
29
+
30
+
31
+ class IncompatibleGlyphsError(Error):
32
+ def __init__(self, glyphs):
33
+ assert len(glyphs) > 1
34
+ self.glyphs = glyphs
35
+ names = set(repr(g.name) for g in glyphs)
36
+ if len(names) > 1:
37
+ self.combined_name = "{%s}" % ", ".join(sorted(names))
38
+ else:
39
+ self.combined_name = names.pop()
40
+
41
+ def __repr__(self):
42
+ return "<%s %s>" % (type(self).__name__, self.combined_name)
43
+
44
+
45
+ class IncompatibleSegmentNumberError(IncompatibleGlyphsError):
46
+ def __str__(self):
47
+ return "Glyphs named %s have different number of segments" % (
48
+ self.combined_name
49
+ )
50
+
51
+
52
+ class IncompatibleSegmentTypesError(IncompatibleGlyphsError):
53
+ def __init__(self, glyphs, segments):
54
+ IncompatibleGlyphsError.__init__(self, glyphs)
55
+ self.segments = segments
56
+
57
+ def __str__(self):
58
+ lines = []
59
+ ndigits = len(str(max(self.segments)))
60
+ for i, tags in sorted(self.segments.items()):
61
+ lines.append(
62
+ "%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags))
63
+ )
64
+ return "Glyphs named %s have incompatible segment types:\n %s" % (
65
+ self.combined_name,
66
+ "\n ".join(lines),
67
+ )
68
+
69
+
70
+ class IncompatibleFontsError(Error):
71
+ def __init__(self, glyph_errors):
72
+ self.glyph_errors = glyph_errors
73
+
74
+ def __str__(self):
75
+ return "fonts contains incompatible glyphs: %s" % (
76
+ ", ".join(repr(g) for g in sorted(self.glyph_errors.keys()))
77
+ )
infer_4_47_1/lib/python3.10/site-packages/fontTools/cu2qu/ufo.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2015 Google Inc. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ """Converts cubic bezier curves to quadratic splines.
17
+
18
+ Conversion is performed such that the quadratic splines keep the same end-curve
19
+ tangents as the original cubics. The approach is iterative, increasing the
20
+ number of segments for a spline until the error gets below a bound.
21
+
22
+ Respective curves from multiple fonts will be converted at once to ensure that
23
+ the resulting splines are interpolation-compatible.
24
+ """
25
+
26
+ import logging
27
+ from fontTools.pens.basePen import AbstractPen
28
+ from fontTools.pens.pointPen import PointToSegmentPen
29
+ from fontTools.pens.reverseContourPen import ReverseContourPen
30
+
31
+ from . import curves_to_quadratic
32
+ from .errors import (
33
+ UnequalZipLengthsError,
34
+ IncompatibleSegmentNumberError,
35
+ IncompatibleSegmentTypesError,
36
+ IncompatibleGlyphsError,
37
+ IncompatibleFontsError,
38
+ )
39
+
40
+
41
+ __all__ = ["fonts_to_quadratic", "font_to_quadratic"]
42
+
43
+ # The default approximation error below is a relative value (1/1000 of the EM square).
44
+ # Later on, we convert it to absolute font units by multiplying it by a font's UPEM
45
+ # (see fonts_to_quadratic).
46
+ DEFAULT_MAX_ERR = 0.001
47
+ CURVE_TYPE_LIB_KEY = "com.github.googlei18n.cu2qu.curve_type"
48
+
49
+ logger = logging.getLogger(__name__)
50
+
51
+
52
+ _zip = zip
53
+
54
+
55
+ def zip(*args):
56
+ """Ensure each argument to zip has the same length. Also make sure a list is
57
+ returned for python 2/3 compatibility.
58
+ """
59
+
60
+ if len(set(len(a) for a in args)) != 1:
61
+ raise UnequalZipLengthsError(*args)
62
+ return list(_zip(*args))
63
+
64
+
65
+ class GetSegmentsPen(AbstractPen):
66
+ """Pen to collect segments into lists of points for conversion.
67
+
68
+ Curves always include their initial on-curve point, so some points are
69
+ duplicated between segments.
70
+ """
71
+
72
+ def __init__(self):
73
+ self._last_pt = None
74
+ self.segments = []
75
+
76
+ def _add_segment(self, tag, *args):
77
+ if tag in ["move", "line", "qcurve", "curve"]:
78
+ self._last_pt = args[-1]
79
+ self.segments.append((tag, args))
80
+
81
+ def moveTo(self, pt):
82
+ self._add_segment("move", pt)
83
+
84
+ def lineTo(self, pt):
85
+ self._add_segment("line", pt)
86
+
87
+ def qCurveTo(self, *points):
88
+ self._add_segment("qcurve", self._last_pt, *points)
89
+
90
+ def curveTo(self, *points):
91
+ self._add_segment("curve", self._last_pt, *points)
92
+
93
+ def closePath(self):
94
+ self._add_segment("close")
95
+
96
+ def endPath(self):
97
+ self._add_segment("end")
98
+
99
+ def addComponent(self, glyphName, transformation):
100
+ pass
101
+
102
+
103
+ def _get_segments(glyph):
104
+ """Get a glyph's segments as extracted by GetSegmentsPen."""
105
+
106
+ pen = GetSegmentsPen()
107
+ # glyph.draw(pen)
108
+ # We can't simply draw the glyph with the pen, but we must initialize the
109
+ # PointToSegmentPen explicitly with outputImpliedClosingLine=True.
110
+ # By default PointToSegmentPen does not outputImpliedClosingLine -- unless
111
+ # last and first point on closed contour are duplicated. Because we are
112
+ # converting multiple glyphs at the same time, we want to make sure
113
+ # this function returns the same number of segments, whether or not
114
+ # the last and first point overlap.
115
+ # https://github.com/googlefonts/fontmake/issues/572
116
+ # https://github.com/fonttools/fonttools/pull/1720
117
+ pointPen = PointToSegmentPen(pen, outputImpliedClosingLine=True)
118
+ glyph.drawPoints(pointPen)
119
+ return pen.segments
120
+
121
+
122
+ def _set_segments(glyph, segments, reverse_direction):
123
+ """Draw segments as extracted by GetSegmentsPen back to a glyph."""
124
+
125
+ glyph.clearContours()
126
+ pen = glyph.getPen()
127
+ if reverse_direction:
128
+ pen = ReverseContourPen(pen)
129
+ for tag, args in segments:
130
+ if tag == "move":
131
+ pen.moveTo(*args)
132
+ elif tag == "line":
133
+ pen.lineTo(*args)
134
+ elif tag == "curve":
135
+ pen.curveTo(*args[1:])
136
+ elif tag == "qcurve":
137
+ pen.qCurveTo(*args[1:])
138
+ elif tag == "close":
139
+ pen.closePath()
140
+ elif tag == "end":
141
+ pen.endPath()
142
+ else:
143
+ raise AssertionError('Unhandled segment type "%s"' % tag)
144
+
145
+
146
+ def _segments_to_quadratic(segments, max_err, stats, all_quadratic=True):
147
+ """Return quadratic approximations of cubic segments."""
148
+
149
+ assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert"
150
+
151
+ new_points = curves_to_quadratic([s[1] for s in segments], max_err, all_quadratic)
152
+ n = len(new_points[0])
153
+ assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly"
154
+
155
+ spline_length = str(n - 2)
156
+ stats[spline_length] = stats.get(spline_length, 0) + 1
157
+
158
+ if all_quadratic or n == 3:
159
+ return [("qcurve", p) for p in new_points]
160
+ else:
161
+ return [("curve", p) for p in new_points]
162
+
163
+
164
+ def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats, all_quadratic=True):
165
+ """Do the actual conversion of a set of compatible glyphs, after arguments
166
+ have been set up.
167
+
168
+ Return True if the glyphs were modified, else return False.
169
+ """
170
+
171
+ try:
172
+ segments_by_location = zip(*[_get_segments(g) for g in glyphs])
173
+ except UnequalZipLengthsError:
174
+ raise IncompatibleSegmentNumberError(glyphs)
175
+ if not any(segments_by_location):
176
+ return False
177
+
178
+ # always modify input glyphs if reverse_direction is True
179
+ glyphs_modified = reverse_direction
180
+
181
+ new_segments_by_location = []
182
+ incompatible = {}
183
+ for i, segments in enumerate(segments_by_location):
184
+ tag = segments[0][0]
185
+ if not all(s[0] == tag for s in segments[1:]):
186
+ incompatible[i] = [s[0] for s in segments]
187
+ elif tag == "curve":
188
+ new_segments = _segments_to_quadratic(
189
+ segments, max_err, stats, all_quadratic
190
+ )
191
+ if all_quadratic or new_segments != segments:
192
+ glyphs_modified = True
193
+ segments = new_segments
194
+ new_segments_by_location.append(segments)
195
+
196
+ if glyphs_modified:
197
+ new_segments_by_glyph = zip(*new_segments_by_location)
198
+ for glyph, new_segments in zip(glyphs, new_segments_by_glyph):
199
+ _set_segments(glyph, new_segments, reverse_direction)
200
+
201
+ if incompatible:
202
+ raise IncompatibleSegmentTypesError(glyphs, segments=incompatible)
203
+ return glyphs_modified
204
+
205
+
206
+ def glyphs_to_quadratic(
207
+ glyphs, max_err=None, reverse_direction=False, stats=None, all_quadratic=True
208
+ ):
209
+ """Convert the curves of a set of compatible of glyphs to quadratic.
210
+
211
+ All curves will be converted to quadratic at once, ensuring interpolation
212
+ compatibility. If this is not required, calling glyphs_to_quadratic with one
213
+ glyph at a time may yield slightly more optimized results.
214
+
215
+ Return True if glyphs were modified, else return False.
216
+
217
+ Raises IncompatibleGlyphsError if glyphs have non-interpolatable outlines.
218
+ """
219
+ if stats is None:
220
+ stats = {}
221
+
222
+ if not max_err:
223
+ # assume 1000 is the default UPEM
224
+ max_err = DEFAULT_MAX_ERR * 1000
225
+
226
+ if isinstance(max_err, (list, tuple)):
227
+ max_errors = max_err
228
+ else:
229
+ max_errors = [max_err] * len(glyphs)
230
+ assert len(max_errors) == len(glyphs)
231
+
232
+ return _glyphs_to_quadratic(
233
+ glyphs, max_errors, reverse_direction, stats, all_quadratic
234
+ )
235
+
236
+
237
+ def fonts_to_quadratic(
238
+ fonts,
239
+ max_err_em=None,
240
+ max_err=None,
241
+ reverse_direction=False,
242
+ stats=None,
243
+ dump_stats=False,
244
+ remember_curve_type=True,
245
+ all_quadratic=True,
246
+ ):
247
+ """Convert the curves of a collection of fonts to quadratic.
248
+
249
+ All curves will be converted to quadratic at once, ensuring interpolation
250
+ compatibility. If this is not required, calling fonts_to_quadratic with one
251
+ font at a time may yield slightly more optimized results.
252
+
253
+ Return the set of modified glyph names if any, else return an empty set.
254
+
255
+ By default, cu2qu stores the curve type in the fonts' lib, under a private
256
+ key "com.github.googlei18n.cu2qu.curve_type", and will not try to convert
257
+ them again if the curve type is already set to "quadratic".
258
+ Setting 'remember_curve_type' to False disables this optimization.
259
+
260
+ Raises IncompatibleFontsError if same-named glyphs from different fonts
261
+ have non-interpolatable outlines.
262
+ """
263
+
264
+ if remember_curve_type:
265
+ curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts}
266
+ if len(curve_types) == 1:
267
+ curve_type = next(iter(curve_types))
268
+ if curve_type in ("quadratic", "mixed"):
269
+ logger.info("Curves already converted to quadratic")
270
+ return False
271
+ elif curve_type == "cubic":
272
+ pass # keep converting
273
+ else:
274
+ raise NotImplementedError(curve_type)
275
+ elif len(curve_types) > 1:
276
+ # going to crash later if they do differ
277
+ logger.warning("fonts may contain different curve types")
278
+
279
+ if stats is None:
280
+ stats = {}
281
+
282
+ if max_err_em and max_err:
283
+ raise TypeError("Only one of max_err and max_err_em can be specified.")
284
+ if not (max_err_em or max_err):
285
+ max_err_em = DEFAULT_MAX_ERR
286
+
287
+ if isinstance(max_err, (list, tuple)):
288
+ assert len(max_err) == len(fonts)
289
+ max_errors = max_err
290
+ elif max_err:
291
+ max_errors = [max_err] * len(fonts)
292
+
293
+ if isinstance(max_err_em, (list, tuple)):
294
+ assert len(fonts) == len(max_err_em)
295
+ max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)]
296
+ elif max_err_em:
297
+ max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
298
+
299
+ modified = set()
300
+ glyph_errors = {}
301
+ for name in set().union(*(f.keys() for f in fonts)):
302
+ glyphs = []
303
+ cur_max_errors = []
304
+ for font, error in zip(fonts, max_errors):
305
+ if name in font:
306
+ glyphs.append(font[name])
307
+ cur_max_errors.append(error)
308
+ try:
309
+ if _glyphs_to_quadratic(
310
+ glyphs, cur_max_errors, reverse_direction, stats, all_quadratic
311
+ ):
312
+ modified.add(name)
313
+ except IncompatibleGlyphsError as exc:
314
+ logger.error(exc)
315
+ glyph_errors[name] = exc
316
+
317
+ if glyph_errors:
318
+ raise IncompatibleFontsError(glyph_errors)
319
+
320
+ if modified and dump_stats:
321
+ spline_lengths = sorted(stats.keys())
322
+ logger.info(
323
+ "New spline lengths: %s"
324
+ % (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths))
325
+ )
326
+
327
+ if remember_curve_type:
328
+ for font in fonts:
329
+ curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic")
330
+ new_curve_type = "quadratic" if all_quadratic else "mixed"
331
+ if curve_type != new_curve_type:
332
+ font.lib[CURVE_TYPE_LIB_KEY] = new_curve_type
333
+ return modified
334
+
335
+
336
+ def glyph_to_quadratic(glyph, **kwargs):
337
+ """Convenience wrapper around glyphs_to_quadratic, for just one glyph.
338
+ Return True if the glyph was modified, else return False.
339
+ """
340
+
341
+ return glyphs_to_quadratic([glyph], **kwargs)
342
+
343
+
344
+ def font_to_quadratic(font, **kwargs):
345
+ """Convenience wrapper around fonts_to_quadratic, for just one font.
346
+ Return the set of modified glyph names if any, else return empty set.
347
+ """
348
+
349
+ return fonts_to_quadratic([font], **kwargs)
infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (306 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/__pycache__/split.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/designspaceLib/statNames.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compute name information for a given location in user-space coordinates
2
+ using STAT data. This can be used to fill-in automatically the names of an
3
+ instance:
4
+
5
+ .. code:: python
6
+
7
+ instance = doc.instances[0]
8
+ names = getStatNames(doc, instance.getFullUserLocation(doc))
9
+ print(names.styleNames)
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ from dataclasses import dataclass
15
+ from typing import Dict, Optional, Tuple, Union
16
+ import logging
17
+
18
+ from fontTools.designspaceLib import (
19
+ AxisDescriptor,
20
+ AxisLabelDescriptor,
21
+ DesignSpaceDocument,
22
+ DesignSpaceDocumentError,
23
+ DiscreteAxisDescriptor,
24
+ SimpleLocationDict,
25
+ SourceDescriptor,
26
+ )
27
+
28
+ LOGGER = logging.getLogger(__name__)
29
+
30
+ # TODO(Python 3.8): use Literal
31
+ # RibbiStyleName = Union[Literal["regular"], Literal["bold"], Literal["italic"], Literal["bold italic"]]
32
+ RibbiStyle = str
33
+ BOLD_ITALIC_TO_RIBBI_STYLE = {
34
+ (False, False): "regular",
35
+ (False, True): "italic",
36
+ (True, False): "bold",
37
+ (True, True): "bold italic",
38
+ }
39
+
40
+
41
+ @dataclass
42
+ class StatNames:
43
+ """Name data generated from the STAT table information."""
44
+
45
+ familyNames: Dict[str, str]
46
+ styleNames: Dict[str, str]
47
+ postScriptFontName: Optional[str]
48
+ styleMapFamilyNames: Dict[str, str]
49
+ styleMapStyleName: Optional[RibbiStyle]
50
+
51
+
52
+ def getStatNames(
53
+ doc: DesignSpaceDocument, userLocation: SimpleLocationDict
54
+ ) -> StatNames:
55
+ """Compute the family, style, PostScript names of the given ``userLocation``
56
+ using the document's STAT information.
57
+
58
+ Also computes localizations.
59
+
60
+ If not enough STAT data is available for a given name, either its dict of
61
+ localized names will be empty (family and style names), or the name will be
62
+ None (PostScript name).
63
+
64
+ .. versionadded:: 5.0
65
+ """
66
+ familyNames: Dict[str, str] = {}
67
+ defaultSource: Optional[SourceDescriptor] = doc.findDefault()
68
+ if defaultSource is None:
69
+ LOGGER.warning("Cannot determine default source to look up family name.")
70
+ elif defaultSource.familyName is None:
71
+ LOGGER.warning(
72
+ "Cannot look up family name, assign the 'familyname' attribute to the default source."
73
+ )
74
+ else:
75
+ familyNames = {
76
+ "en": defaultSource.familyName,
77
+ **defaultSource.localisedFamilyName,
78
+ }
79
+
80
+ styleNames: Dict[str, str] = {}
81
+ # If a free-standing label matches the location, use it for name generation.
82
+ label = doc.labelForUserLocation(userLocation)
83
+ if label is not None:
84
+ styleNames = {"en": label.name, **label.labelNames}
85
+ # Otherwise, scour the axis labels for matches.
86
+ else:
87
+ # Gather all languages in which at least one translation is provided
88
+ # Then build names for all these languages, but fallback to English
89
+ # whenever a translation is missing.
90
+ labels = _getAxisLabelsForUserLocation(doc.axes, userLocation)
91
+ if labels:
92
+ languages = set(
93
+ language for label in labels for language in label.labelNames
94
+ )
95
+ languages.add("en")
96
+ for language in languages:
97
+ styleName = " ".join(
98
+ label.labelNames.get(language, label.defaultName)
99
+ for label in labels
100
+ if not label.elidable
101
+ )
102
+ if not styleName and doc.elidedFallbackName is not None:
103
+ styleName = doc.elidedFallbackName
104
+ styleNames[language] = styleName
105
+
106
+ if "en" not in familyNames or "en" not in styleNames:
107
+ # Not enough information to compute PS names of styleMap names
108
+ return StatNames(
109
+ familyNames=familyNames,
110
+ styleNames=styleNames,
111
+ postScriptFontName=None,
112
+ styleMapFamilyNames={},
113
+ styleMapStyleName=None,
114
+ )
115
+
116
+ postScriptFontName = f"{familyNames['en']}-{styleNames['en']}".replace(" ", "")
117
+
118
+ styleMapStyleName, regularUserLocation = _getRibbiStyle(doc, userLocation)
119
+
120
+ styleNamesForStyleMap = styleNames
121
+ if regularUserLocation != userLocation:
122
+ regularStatNames = getStatNames(doc, regularUserLocation)
123
+ styleNamesForStyleMap = regularStatNames.styleNames
124
+
125
+ styleMapFamilyNames = {}
126
+ for language in set(familyNames).union(styleNames.keys()):
127
+ familyName = familyNames.get(language, familyNames["en"])
128
+ styleName = styleNamesForStyleMap.get(language, styleNamesForStyleMap["en"])
129
+ styleMapFamilyNames[language] = (familyName + " " + styleName).strip()
130
+
131
+ return StatNames(
132
+ familyNames=familyNames,
133
+ styleNames=styleNames,
134
+ postScriptFontName=postScriptFontName,
135
+ styleMapFamilyNames=styleMapFamilyNames,
136
+ styleMapStyleName=styleMapStyleName,
137
+ )
138
+
139
+
140
+ def _getSortedAxisLabels(
141
+ axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
142
+ ) -> Dict[str, list[AxisLabelDescriptor]]:
143
+ """Returns axis labels sorted by their ordering, with unordered ones appended as
144
+ they are listed."""
145
+
146
+ # First, get the axis labels with explicit ordering...
147
+ sortedAxes = sorted(
148
+ (axis for axis in axes if axis.axisOrdering is not None),
149
+ key=lambda a: a.axisOrdering,
150
+ )
151
+ sortedLabels: Dict[str, list[AxisLabelDescriptor]] = {
152
+ axis.name: axis.axisLabels for axis in sortedAxes
153
+ }
154
+
155
+ # ... then append the others in the order they appear.
156
+ # NOTE: This relies on Python 3.7+ dict's preserved insertion order.
157
+ for axis in axes:
158
+ if axis.axisOrdering is None:
159
+ sortedLabels[axis.name] = axis.axisLabels
160
+
161
+ return sortedLabels
162
+
163
+
164
+ def _getAxisLabelsForUserLocation(
165
+ axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
166
+ userLocation: SimpleLocationDict,
167
+ ) -> list[AxisLabelDescriptor]:
168
+ labels: list[AxisLabelDescriptor] = []
169
+
170
+ allAxisLabels = _getSortedAxisLabels(axes)
171
+ if allAxisLabels.keys() != userLocation.keys():
172
+ LOGGER.warning(
173
+ f"Mismatch between user location '{userLocation.keys()}' and available "
174
+ f"labels for '{allAxisLabels.keys()}'."
175
+ )
176
+
177
+ for axisName, axisLabels in allAxisLabels.items():
178
+ userValue = userLocation[axisName]
179
+ label: Optional[AxisLabelDescriptor] = next(
180
+ (
181
+ l
182
+ for l in axisLabels
183
+ if l.userValue == userValue
184
+ or (
185
+ l.userMinimum is not None
186
+ and l.userMaximum is not None
187
+ and l.userMinimum <= userValue <= l.userMaximum
188
+ )
189
+ ),
190
+ None,
191
+ )
192
+ if label is None:
193
+ LOGGER.debug(
194
+ f"Document needs a label for axis '{axisName}', user value '{userValue}'."
195
+ )
196
+ else:
197
+ labels.append(label)
198
+
199
+ return labels
200
+
201
+
202
+ def _getRibbiStyle(
203
+ self: DesignSpaceDocument, userLocation: SimpleLocationDict
204
+ ) -> Tuple[RibbiStyle, SimpleLocationDict]:
205
+ """Compute the RIBBI style name of the given user location,
206
+ return the location of the matching Regular in the RIBBI group.
207
+
208
+ .. versionadded:: 5.0
209
+ """
210
+ regularUserLocation = {}
211
+ axes_by_tag = {axis.tag: axis for axis in self.axes}
212
+
213
+ bold: bool = False
214
+ italic: bool = False
215
+
216
+ axis = axes_by_tag.get("wght")
217
+ if axis is not None:
218
+ for regular_label in axis.axisLabels:
219
+ if (
220
+ regular_label.linkedUserValue == userLocation[axis.name]
221
+ # In the "recursive" case where both the Regular has
222
+ # linkedUserValue pointing the Bold, and the Bold has
223
+ # linkedUserValue pointing to the Regular, only consider the
224
+ # first case: Regular (e.g. 400) has linkedUserValue pointing to
225
+ # Bold (e.g. 700, higher than Regular)
226
+ and regular_label.userValue < regular_label.linkedUserValue
227
+ ):
228
+ regularUserLocation[axis.name] = regular_label.userValue
229
+ bold = True
230
+ break
231
+
232
+ axis = axes_by_tag.get("ital") or axes_by_tag.get("slnt")
233
+ if axis is not None:
234
+ for upright_label in axis.axisLabels:
235
+ if (
236
+ upright_label.linkedUserValue == userLocation[axis.name]
237
+ # In the "recursive" case where both the Upright has
238
+ # linkedUserValue pointing the Italic, and the Italic has
239
+ # linkedUserValue pointing to the Upright, only consider the
240
+ # first case: Upright (e.g. ital=0, slant=0) has
241
+ # linkedUserValue pointing to Italic (e.g ital=1, slant=-12 or
242
+ # slant=12 for backwards italics, in any case higher than
243
+ # Upright in absolute value, hence the abs() below.
244
+ and abs(upright_label.userValue) < abs(upright_label.linkedUserValue)
245
+ ):
246
+ regularUserLocation[axis.name] = upright_label.userValue
247
+ italic = True
248
+ break
249
+
250
+ return BOLD_ITALIC_TO_RIBBI_STYLE[bold, italic], {
251
+ **userLocation,
252
+ **regularUserLocation,
253
+ }
infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/MacRoman.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MacRoman = [
2
+ "NUL",
3
+ "Eth",
4
+ "eth",
5
+ "Lslash",
6
+ "lslash",
7
+ "Scaron",
8
+ "scaron",
9
+ "Yacute",
10
+ "yacute",
11
+ "HT",
12
+ "LF",
13
+ "Thorn",
14
+ "thorn",
15
+ "CR",
16
+ "Zcaron",
17
+ "zcaron",
18
+ "DLE",
19
+ "DC1",
20
+ "DC2",
21
+ "DC3",
22
+ "DC4",
23
+ "onehalf",
24
+ "onequarter",
25
+ "onesuperior",
26
+ "threequarters",
27
+ "threesuperior",
28
+ "twosuperior",
29
+ "brokenbar",
30
+ "minus",
31
+ "multiply",
32
+ "RS",
33
+ "US",
34
+ "space",
35
+ "exclam",
36
+ "quotedbl",
37
+ "numbersign",
38
+ "dollar",
39
+ "percent",
40
+ "ampersand",
41
+ "quotesingle",
42
+ "parenleft",
43
+ "parenright",
44
+ "asterisk",
45
+ "plus",
46
+ "comma",
47
+ "hyphen",
48
+ "period",
49
+ "slash",
50
+ "zero",
51
+ "one",
52
+ "two",
53
+ "three",
54
+ "four",
55
+ "five",
56
+ "six",
57
+ "seven",
58
+ "eight",
59
+ "nine",
60
+ "colon",
61
+ "semicolon",
62
+ "less",
63
+ "equal",
64
+ "greater",
65
+ "question",
66
+ "at",
67
+ "A",
68
+ "B",
69
+ "C",
70
+ "D",
71
+ "E",
72
+ "F",
73
+ "G",
74
+ "H",
75
+ "I",
76
+ "J",
77
+ "K",
78
+ "L",
79
+ "M",
80
+ "N",
81
+ "O",
82
+ "P",
83
+ "Q",
84
+ "R",
85
+ "S",
86
+ "T",
87
+ "U",
88
+ "V",
89
+ "W",
90
+ "X",
91
+ "Y",
92
+ "Z",
93
+ "bracketleft",
94
+ "backslash",
95
+ "bracketright",
96
+ "asciicircum",
97
+ "underscore",
98
+ "grave",
99
+ "a",
100
+ "b",
101
+ "c",
102
+ "d",
103
+ "e",
104
+ "f",
105
+ "g",
106
+ "h",
107
+ "i",
108
+ "j",
109
+ "k",
110
+ "l",
111
+ "m",
112
+ "n",
113
+ "o",
114
+ "p",
115
+ "q",
116
+ "r",
117
+ "s",
118
+ "t",
119
+ "u",
120
+ "v",
121
+ "w",
122
+ "x",
123
+ "y",
124
+ "z",
125
+ "braceleft",
126
+ "bar",
127
+ "braceright",
128
+ "asciitilde",
129
+ "DEL",
130
+ "Adieresis",
131
+ "Aring",
132
+ "Ccedilla",
133
+ "Eacute",
134
+ "Ntilde",
135
+ "Odieresis",
136
+ "Udieresis",
137
+ "aacute",
138
+ "agrave",
139
+ "acircumflex",
140
+ "adieresis",
141
+ "atilde",
142
+ "aring",
143
+ "ccedilla",
144
+ "eacute",
145
+ "egrave",
146
+ "ecircumflex",
147
+ "edieresis",
148
+ "iacute",
149
+ "igrave",
150
+ "icircumflex",
151
+ "idieresis",
152
+ "ntilde",
153
+ "oacute",
154
+ "ograve",
155
+ "ocircumflex",
156
+ "odieresis",
157
+ "otilde",
158
+ "uacute",
159
+ "ugrave",
160
+ "ucircumflex",
161
+ "udieresis",
162
+ "dagger",
163
+ "degree",
164
+ "cent",
165
+ "sterling",
166
+ "section",
167
+ "bullet",
168
+ "paragraph",
169
+ "germandbls",
170
+ "registered",
171
+ "copyright",
172
+ "trademark",
173
+ "acute",
174
+ "dieresis",
175
+ "notequal",
176
+ "AE",
177
+ "Oslash",
178
+ "infinity",
179
+ "plusminus",
180
+ "lessequal",
181
+ "greaterequal",
182
+ "yen",
183
+ "mu",
184
+ "partialdiff",
185
+ "summation",
186
+ "product",
187
+ "pi",
188
+ "integral",
189
+ "ordfeminine",
190
+ "ordmasculine",
191
+ "Omega",
192
+ "ae",
193
+ "oslash",
194
+ "questiondown",
195
+ "exclamdown",
196
+ "logicalnot",
197
+ "radical",
198
+ "florin",
199
+ "approxequal",
200
+ "Delta",
201
+ "guillemotleft",
202
+ "guillemotright",
203
+ "ellipsis",
204
+ "nbspace",
205
+ "Agrave",
206
+ "Atilde",
207
+ "Otilde",
208
+ "OE",
209
+ "oe",
210
+ "endash",
211
+ "emdash",
212
+ "quotedblleft",
213
+ "quotedblright",
214
+ "quoteleft",
215
+ "quoteright",
216
+ "divide",
217
+ "lozenge",
218
+ "ydieresis",
219
+ "Ydieresis",
220
+ "fraction",
221
+ "currency",
222
+ "guilsinglleft",
223
+ "guilsinglright",
224
+ "fi",
225
+ "fl",
226
+ "daggerdbl",
227
+ "periodcentered",
228
+ "quotesinglbase",
229
+ "quotedblbase",
230
+ "perthousand",
231
+ "Acircumflex",
232
+ "Ecircumflex",
233
+ "Aacute",
234
+ "Edieresis",
235
+ "Egrave",
236
+ "Iacute",
237
+ "Icircumflex",
238
+ "Idieresis",
239
+ "Igrave",
240
+ "Oacute",
241
+ "Ocircumflex",
242
+ "apple",
243
+ "Ograve",
244
+ "Uacute",
245
+ "Ucircumflex",
246
+ "Ugrave",
247
+ "dotlessi",
248
+ "circumflex",
249
+ "tilde",
250
+ "macron",
251
+ "breve",
252
+ "dotaccent",
253
+ "ring",
254
+ "cedilla",
255
+ "hungarumlaut",
256
+ "ogonek",
257
+ "caron",
258
+ ]
infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/StandardEncoding.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ StandardEncoding = [
2
+ ".notdef",
3
+ ".notdef",
4
+ ".notdef",
5
+ ".notdef",
6
+ ".notdef",
7
+ ".notdef",
8
+ ".notdef",
9
+ ".notdef",
10
+ ".notdef",
11
+ ".notdef",
12
+ ".notdef",
13
+ ".notdef",
14
+ ".notdef",
15
+ ".notdef",
16
+ ".notdef",
17
+ ".notdef",
18
+ ".notdef",
19
+ ".notdef",
20
+ ".notdef",
21
+ ".notdef",
22
+ ".notdef",
23
+ ".notdef",
24
+ ".notdef",
25
+ ".notdef",
26
+ ".notdef",
27
+ ".notdef",
28
+ ".notdef",
29
+ ".notdef",
30
+ ".notdef",
31
+ ".notdef",
32
+ ".notdef",
33
+ ".notdef",
34
+ "space",
35
+ "exclam",
36
+ "quotedbl",
37
+ "numbersign",
38
+ "dollar",
39
+ "percent",
40
+ "ampersand",
41
+ "quoteright",
42
+ "parenleft",
43
+ "parenright",
44
+ "asterisk",
45
+ "plus",
46
+ "comma",
47
+ "hyphen",
48
+ "period",
49
+ "slash",
50
+ "zero",
51
+ "one",
52
+ "two",
53
+ "three",
54
+ "four",
55
+ "five",
56
+ "six",
57
+ "seven",
58
+ "eight",
59
+ "nine",
60
+ "colon",
61
+ "semicolon",
62
+ "less",
63
+ "equal",
64
+ "greater",
65
+ "question",
66
+ "at",
67
+ "A",
68
+ "B",
69
+ "C",
70
+ "D",
71
+ "E",
72
+ "F",
73
+ "G",
74
+ "H",
75
+ "I",
76
+ "J",
77
+ "K",
78
+ "L",
79
+ "M",
80
+ "N",
81
+ "O",
82
+ "P",
83
+ "Q",
84
+ "R",
85
+ "S",
86
+ "T",
87
+ "U",
88
+ "V",
89
+ "W",
90
+ "X",
91
+ "Y",
92
+ "Z",
93
+ "bracketleft",
94
+ "backslash",
95
+ "bracketright",
96
+ "asciicircum",
97
+ "underscore",
98
+ "quoteleft",
99
+ "a",
100
+ "b",
101
+ "c",
102
+ "d",
103
+ "e",
104
+ "f",
105
+ "g",
106
+ "h",
107
+ "i",
108
+ "j",
109
+ "k",
110
+ "l",
111
+ "m",
112
+ "n",
113
+ "o",
114
+ "p",
115
+ "q",
116
+ "r",
117
+ "s",
118
+ "t",
119
+ "u",
120
+ "v",
121
+ "w",
122
+ "x",
123
+ "y",
124
+ "z",
125
+ "braceleft",
126
+ "bar",
127
+ "braceright",
128
+ "asciitilde",
129
+ ".notdef",
130
+ ".notdef",
131
+ ".notdef",
132
+ ".notdef",
133
+ ".notdef",
134
+ ".notdef",
135
+ ".notdef",
136
+ ".notdef",
137
+ ".notdef",
138
+ ".notdef",
139
+ ".notdef",
140
+ ".notdef",
141
+ ".notdef",
142
+ ".notdef",
143
+ ".notdef",
144
+ ".notdef",
145
+ ".notdef",
146
+ ".notdef",
147
+ ".notdef",
148
+ ".notdef",
149
+ ".notdef",
150
+ ".notdef",
151
+ ".notdef",
152
+ ".notdef",
153
+ ".notdef",
154
+ ".notdef",
155
+ ".notdef",
156
+ ".notdef",
157
+ ".notdef",
158
+ ".notdef",
159
+ ".notdef",
160
+ ".notdef",
161
+ ".notdef",
162
+ ".notdef",
163
+ "exclamdown",
164
+ "cent",
165
+ "sterling",
166
+ "fraction",
167
+ "yen",
168
+ "florin",
169
+ "section",
170
+ "currency",
171
+ "quotesingle",
172
+ "quotedblleft",
173
+ "guillemotleft",
174
+ "guilsinglleft",
175
+ "guilsinglright",
176
+ "fi",
177
+ "fl",
178
+ ".notdef",
179
+ "endash",
180
+ "dagger",
181
+ "daggerdbl",
182
+ "periodcentered",
183
+ ".notdef",
184
+ "paragraph",
185
+ "bullet",
186
+ "quotesinglbase",
187
+ "quotedblbase",
188
+ "quotedblright",
189
+ "guillemotright",
190
+ "ellipsis",
191
+ "perthousand",
192
+ ".notdef",
193
+ "questiondown",
194
+ ".notdef",
195
+ "grave",
196
+ "acute",
197
+ "circumflex",
198
+ "tilde",
199
+ "macron",
200
+ "breve",
201
+ "dotaccent",
202
+ "dieresis",
203
+ ".notdef",
204
+ "ring",
205
+ "cedilla",
206
+ ".notdef",
207
+ "hungarumlaut",
208
+ "ogonek",
209
+ "caron",
210
+ "emdash",
211
+ ".notdef",
212
+ ".notdef",
213
+ ".notdef",
214
+ ".notdef",
215
+ ".notdef",
216
+ ".notdef",
217
+ ".notdef",
218
+ ".notdef",
219
+ ".notdef",
220
+ ".notdef",
221
+ ".notdef",
222
+ ".notdef",
223
+ ".notdef",
224
+ ".notdef",
225
+ ".notdef",
226
+ ".notdef",
227
+ "AE",
228
+ ".notdef",
229
+ "ordfeminine",
230
+ ".notdef",
231
+ ".notdef",
232
+ ".notdef",
233
+ ".notdef",
234
+ "Lslash",
235
+ "Oslash",
236
+ "OE",
237
+ "ordmasculine",
238
+ ".notdef",
239
+ ".notdef",
240
+ ".notdef",
241
+ ".notdef",
242
+ ".notdef",
243
+ "ae",
244
+ ".notdef",
245
+ ".notdef",
246
+ ".notdef",
247
+ "dotlessi",
248
+ ".notdef",
249
+ ".notdef",
250
+ "lslash",
251
+ "oslash",
252
+ "oe",
253
+ "germandbls",
254
+ ".notdef",
255
+ ".notdef",
256
+ ".notdef",
257
+ ".notdef",
258
+ ]
infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Empty __init__.py file to signal Python this directory is a package."""
infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/__pycache__/MacRoman.cpython-310.pyc ADDED
Binary file (2.22 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/__pycache__/StandardEncoding.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (257 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/__pycache__/codecs.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/encodings/codecs.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Extend the Python codecs module with a few encodings that are used in OpenType (name table)
2
+ but missing from Python. See https://github.com/fonttools/fonttools/issues/236 for details."""
3
+
4
+ import codecs
5
+ import encodings
6
+
7
+
8
+ class ExtendCodec(codecs.Codec):
9
+ def __init__(self, name, base_encoding, mapping):
10
+ self.name = name
11
+ self.base_encoding = base_encoding
12
+ self.mapping = mapping
13
+ self.reverse = {v: k for k, v in mapping.items()}
14
+ self.max_len = max(len(v) for v in mapping.values())
15
+ self.info = codecs.CodecInfo(
16
+ name=self.name, encode=self.encode, decode=self.decode
17
+ )
18
+ codecs.register_error(name, self.error)
19
+
20
+ def _map(self, mapper, output_type, exc_type, input, errors):
21
+ base_error_handler = codecs.lookup_error(errors)
22
+ length = len(input)
23
+ out = output_type()
24
+ while input:
25
+ # first try to use self.error as the error handler
26
+ try:
27
+ part = mapper(input, self.base_encoding, errors=self.name)
28
+ out += part
29
+ break # All converted
30
+ except exc_type as e:
31
+ # else convert the correct part, handle error as requested and continue
32
+ out += mapper(input[: e.start], self.base_encoding, self.name)
33
+ replacement, pos = base_error_handler(e)
34
+ out += replacement
35
+ input = input[pos:]
36
+ return out, length
37
+
38
+ def encode(self, input, errors="strict"):
39
+ return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
40
+
41
+ def decode(self, input, errors="strict"):
42
+ return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
43
+
44
+ def error(self, e):
45
+ if isinstance(e, UnicodeDecodeError):
46
+ for end in range(e.start + 1, e.end + 1):
47
+ s = e.object[e.start : end]
48
+ if s in self.mapping:
49
+ return self.mapping[s], end
50
+ elif isinstance(e, UnicodeEncodeError):
51
+ for end in range(e.start + 1, e.start + self.max_len + 1):
52
+ s = e.object[e.start : end]
53
+ if s in self.reverse:
54
+ return self.reverse[s], end
55
+ e.encoding = self.name
56
+ raise e
57
+
58
+
59
+ _extended_encodings = {
60
+ "x_mac_japanese_ttx": (
61
+ "shift_jis",
62
+ {
63
+ b"\xFC": chr(0x007C),
64
+ b"\x7E": chr(0x007E),
65
+ b"\x80": chr(0x005C),
66
+ b"\xA0": chr(0x00A0),
67
+ b"\xFD": chr(0x00A9),
68
+ b"\xFE": chr(0x2122),
69
+ b"\xFF": chr(0x2026),
70
+ },
71
+ ),
72
+ "x_mac_trad_chinese_ttx": (
73
+ "big5",
74
+ {
75
+ b"\x80": chr(0x005C),
76
+ b"\xA0": chr(0x00A0),
77
+ b"\xFD": chr(0x00A9),
78
+ b"\xFE": chr(0x2122),
79
+ b"\xFF": chr(0x2026),
80
+ },
81
+ ),
82
+ "x_mac_korean_ttx": (
83
+ "euc_kr",
84
+ {
85
+ b"\x80": chr(0x00A0),
86
+ b"\x81": chr(0x20A9),
87
+ b"\x82": chr(0x2014),
88
+ b"\x83": chr(0x00A9),
89
+ b"\xFE": chr(0x2122),
90
+ b"\xFF": chr(0x2026),
91
+ },
92
+ ),
93
+ "x_mac_simp_chinese_ttx": (
94
+ "gb2312",
95
+ {
96
+ b"\x80": chr(0x00FC),
97
+ b"\xA0": chr(0x00A0),
98
+ b"\xFD": chr(0x00A9),
99
+ b"\xFE": chr(0x2122),
100
+ b"\xFF": chr(0x2026),
101
+ },
102
+ ),
103
+ }
104
+
105
+ _cache = {}
106
+
107
+
108
+ def search_function(name):
109
+ name = encodings.normalize_encoding(name) # Rather undocumented...
110
+ if name in _extended_encodings:
111
+ if name not in _cache:
112
+ base_encoding, mapping = _extended_encodings[name]
113
+ assert name[-4:] == "_ttx"
114
+ # Python 2 didn't have any of the encodings that we are implementing
115
+ # in this file. Python 3 added aliases for the East Asian ones, mapping
116
+ # them "temporarily" to the same base encoding as us, with a comment
117
+ # suggesting that full implementation will appear some time later.
118
+ # As such, try the Python version of the x_mac_... first, if that is found,
119
+ # use *that* as our base encoding. This would make our encoding upgrade
120
+ # to the full encoding when and if Python finally implements that.
121
+ # http://bugs.python.org/issue24041
122
+ base_encodings = [name[:-4], base_encoding]
123
+ for base_encoding in base_encodings:
124
+ try:
125
+ codecs.lookup(base_encoding)
126
+ except LookupError:
127
+ continue
128
+ _cache[name] = ExtendCodec(name, base_encoding, mapping)
129
+ break
130
+ return _cache[name].info
131
+
132
+ return None
133
+
134
+
135
+ codecs.register(search_function)
infer_4_47_1/lib/python3.10/site-packages/fontTools/mtiLib/__main__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import sys
2
+ from fontTools.mtiLib import main
3
+
4
+ if __name__ == "__main__":
5
+ sys.exit(main())
infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """OpenType Layout-related functionality."""
infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (224 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/error.cpython-310.pyc ADDED
Binary file (705 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/builder.py ADDED
The diff for this file is too large to render. See raw diff
 
infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/error.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class OpenTypeLibError(Exception):
2
+ def __init__(self, message, location):
3
+ Exception.__init__(self, message)
4
+ self.location = location
5
+
6
+ def __str__(self):
7
+ message = Exception.__str__(self)
8
+ if self.location:
9
+ return f"{self.location}: {message}"
10
+ else:
11
+ return message
infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/maxContextCalc.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ["maxCtxFont"]
2
+
3
+
4
+ def maxCtxFont(font):
5
+ """Calculate the usMaxContext value for an entire font."""
6
+
7
+ maxCtx = 0
8
+ for tag in ("GSUB", "GPOS"):
9
+ if tag not in font:
10
+ continue
11
+ table = font[tag].table
12
+ if not table.LookupList:
13
+ continue
14
+ for lookup in table.LookupList.Lookup:
15
+ for st in lookup.SubTable:
16
+ maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st)
17
+ return maxCtx
18
+
19
+
20
+ def maxCtxSubtable(maxCtx, tag, lookupType, st):
21
+ """Calculate usMaxContext based on a single lookup table (and an existing
22
+ max value).
23
+ """
24
+
25
+ # single positioning, single / multiple substitution
26
+ if (tag == "GPOS" and lookupType == 1) or (
27
+ tag == "GSUB" and lookupType in (1, 2, 3)
28
+ ):
29
+ maxCtx = max(maxCtx, 1)
30
+
31
+ # pair positioning
32
+ elif tag == "GPOS" and lookupType == 2:
33
+ maxCtx = max(maxCtx, 2)
34
+
35
+ # ligatures
36
+ elif tag == "GSUB" and lookupType == 4:
37
+ for ligatures in st.ligatures.values():
38
+ for ligature in ligatures:
39
+ maxCtx = max(maxCtx, ligature.CompCount)
40
+
41
+ # context
42
+ elif (tag == "GPOS" and lookupType == 7) or (tag == "GSUB" and lookupType == 5):
43
+ maxCtx = maxCtxContextualSubtable(maxCtx, st, "Pos" if tag == "GPOS" else "Sub")
44
+
45
+ # chained context
46
+ elif (tag == "GPOS" and lookupType == 8) or (tag == "GSUB" and lookupType == 6):
47
+ maxCtx = maxCtxContextualSubtable(
48
+ maxCtx, st, "Pos" if tag == "GPOS" else "Sub", "Chain"
49
+ )
50
+
51
+ # extensions
52
+ elif (tag == "GPOS" and lookupType == 9) or (tag == "GSUB" and lookupType == 7):
53
+ maxCtx = maxCtxSubtable(maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)
54
+
55
+ # reverse-chained context
56
+ elif tag == "GSUB" and lookupType == 8:
57
+ maxCtx = maxCtxContextualRule(maxCtx, st, "Reverse")
58
+
59
+ return maxCtx
60
+
61
+
62
+ def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=""):
63
+ """Calculate usMaxContext based on a contextual feature subtable."""
64
+
65
+ if st.Format == 1:
66
+ for ruleset in getattr(st, "%s%sRuleSet" % (chain, ruleType)):
67
+ if ruleset is None:
68
+ continue
69
+ for rule in getattr(ruleset, "%s%sRule" % (chain, ruleType)):
70
+ if rule is None:
71
+ continue
72
+ maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
73
+
74
+ elif st.Format == 2:
75
+ for ruleset in getattr(st, "%s%sClassSet" % (chain, ruleType)):
76
+ if ruleset is None:
77
+ continue
78
+ for rule in getattr(ruleset, "%s%sClassRule" % (chain, ruleType)):
79
+ if rule is None:
80
+ continue
81
+ maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
82
+
83
+ elif st.Format == 3:
84
+ maxCtx = maxCtxContextualRule(maxCtx, st, chain)
85
+
86
+ return maxCtx
87
+
88
+
89
+ def maxCtxContextualRule(maxCtx, st, chain):
90
+ """Calculate usMaxContext based on a contextual feature rule."""
91
+
92
+ if not chain:
93
+ return max(maxCtx, st.GlyphCount)
94
+ elif chain == "Reverse":
95
+ return max(maxCtx, 1 + st.LookAheadGlyphCount)
96
+ return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount)
infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/optimize/__init__.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import RawTextHelpFormatter
2
+ from fontTools.otlLib.optimize.gpos import COMPRESSION_LEVEL, compact
3
+ from fontTools.ttLib import TTFont
4
+
5
+
6
+ def main(args=None):
7
+ """Optimize the layout tables of an existing font"""
8
+ from argparse import ArgumentParser
9
+
10
+ from fontTools import configLogger
11
+
12
+ parser = ArgumentParser(
13
+ prog="otlLib.optimize",
14
+ description=main.__doc__,
15
+ formatter_class=RawTextHelpFormatter,
16
+ )
17
+ parser.add_argument("font")
18
+ parser.add_argument(
19
+ "-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file"
20
+ )
21
+ parser.add_argument(
22
+ "--gpos-compression-level",
23
+ help=COMPRESSION_LEVEL.help,
24
+ default=COMPRESSION_LEVEL.default,
25
+ choices=list(range(10)),
26
+ type=int,
27
+ )
28
+ logging_group = parser.add_mutually_exclusive_group(required=False)
29
+ logging_group.add_argument(
30
+ "-v", "--verbose", action="store_true", help="Run more verbosely."
31
+ )
32
+ logging_group.add_argument(
33
+ "-q", "--quiet", action="store_true", help="Turn verbosity off."
34
+ )
35
+ options = parser.parse_args(args)
36
+
37
+ configLogger(
38
+ level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
39
+ )
40
+
41
+ font = TTFont(options.font)
42
+ compact(font, options.gpos_compression_level)
43
+ font.save(options.outfile or options.font)
44
+
45
+
46
+ if __name__ == "__main__":
47
+ import sys
48
+
49
+ if len(sys.argv) > 1:
50
+ sys.exit(main())
51
+ import doctest
52
+
53
+ sys.exit(doctest.testmod().failed)
infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/optimize/__main__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import sys
2
+ from fontTools.otlLib.optimize import main
3
+
4
+
5
+ if __name__ == "__main__":
6
+ sys.exit(main())
infer_4_47_1/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (308 Bytes). View file