diff options
Diffstat (limited to '3rdparty/utf8proc/data')
-rw-r--r-- | 3rdparty/utf8proc/data/Makefile | 51 | ||||
-rw-r--r-- | 3rdparty/utf8proc/data/charwidths.jl | 157 | ||||
-rwxr-xr-x[-rw-r--r--] | 3rdparty/utf8proc/data/data_generator.rb | 128 |
3 files changed, 188 insertions, 148 deletions
diff --git a/3rdparty/utf8proc/data/Makefile b/3rdparty/utf8proc/data/Makefile index 19d375f6253..1c5830ef00f 100644 --- a/3rdparty/utf8proc/data/Makefile +++ b/3rdparty/utf8proc/data/Makefile @@ -1,7 +1,7 @@ # Unicode data generation rules. Except for the test data files, most # users will not use these Makefile rules, which are primarily to re-generate # unicode_data.c when we get a new Unicode version or charwidth data; they -# require ruby, fontforge, and julia to be installed. +# require ruby and julia to be installed. # programs CURL=curl @@ -9,58 +9,55 @@ RUBY=ruby PERL=perl MAKE=make JULIA=julia -FONTFORGE=fontforge CURLFLAGS = --retry 5 --location .PHONY: clean .DELETE_ON_ERROR: -utf8proc_data.c.new: data_generator.rb UnicodeData.txt GraphemeBreakProperty.txt DerivedCoreProperties.txt CompositionExclusions.txt CaseFolding.txt CharWidths.txt +utf8proc_data.c.new: data_generator.rb UnicodeData.txt GraphemeBreakProperty.txt DerivedCoreProperties.txt CompositionExclusions.txt CaseFolding.txt CharWidths.txt emoji-data.txt $(RUBY) data_generator.rb < UnicodeData.txt > $@ -# GNU Unifont version for font metric calculations: -UNIFONT_VERSION=9.0.04 - -unifont.ttf: - $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://mirrors.kernel.org/gnu/unifont/unifont-$(UNIFONT_VERSION)/unifont-$(UNIFONT_VERSION).ttf - -unifont_upper.ttf: - $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://mirrors.kernel.org/gnu/unifont/unifont-$(UNIFONT_VERSION)/unifont_upper-$(UNIFONT_VERSION).ttf - -%.sfd: %.ttf - $(FONTFORGE) -lang=ff -c "Open(\"$<\");Save(\"$@\");Quit(0);" - -CharWidths.txt: charwidths.jl unifont.sfd unifont_upper.sfd EastAsianWidth.txt +CharWidths.txt: charwidths.jl EastAsianWidth.txt $(JULIA) charwidths.jl > $@ -# Unicode data version -UNICODE_VERSION=9.0.0 +# Unicode data version (must also update utf8proc_unicode_version function) +UNICODE_VERSION=15.1.0 UnicodeData.txt: - $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/UnicodeData.txt + $(CURL) $(CURLFLAGS) -o $@ https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/UnicodeData.txt EastAsianWidth.txt: - $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/EastAsianWidth.txt + $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/EastAsianWidth.txt GraphemeBreakProperty.txt: - $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/auxiliary/GraphemeBreakProperty.txt + $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/auxiliary/GraphemeBreakProperty.txt DerivedCoreProperties.txt: - $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/DerivedCoreProperties.txt + $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/DerivedCoreProperties.txt CompositionExclusions.txt: - $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/CompositionExclusions.txt + $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/CompositionExclusions.txt CaseFolding.txt: - $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/CaseFolding.txt + $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/CaseFolding.txt NormalizationTest.txt: - $(CURL) $(CURLFLAGS) -o $@ -O $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/NormalizationTest.txt + $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/NormalizationTest.txt GraphemeBreakTest.txt: - $(CURL) $(CURLFLAGS) $(URLCACHE)http://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/auxiliary/GraphemeBreakTest.txt | $(PERL) -pe 's,÷,/,g;s,×,+,g' > $@ + $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://www.unicode.org/Public/$(UNICODE_VERSION)/ucd/auxiliary/GraphemeBreakTest.txt + +emoji-data.txt: + $(CURL) $(CURLFLAGS) -o $@ $(URLCACHE)https://unicode.org/Public/$(UNICODE_VERSION)/ucd/emoji/emoji-data.txt + +Uppercase.txt: DerivedCoreProperties.txt + $(RUBY) -e 'puts File.read("DerivedCoreProperties.txt")[/# Derived Property: Uppercase.*?# Total code points:/m]' > $@ + +Lowercase.txt: DerivedCoreProperties.txt + $(RUBY) -e 'puts File.read("DerivedCoreProperties.txt")[/# Derived Property: Lowercase.*?# Total code points:/m]' > $@ clean: - rm -f UnicodeData.txt EastAsianWidth.txt GraphemeBreakProperty.txt DerivedCoreProperties.txt CompositionExclusions.txt CaseFolding.txt NormalizationTest.txt GraphemeBreakTest.txt CharWidths.txt unifont*.ttf unifont*.sfd + rm -f UnicodeData.txt EastAsianWidth.txt GraphemeBreakProperty.txt DerivedCoreProperties.txt CompositionExclusions.txt CaseFolding.txt NormalizationTest.txt GraphemeBreakTest.txt CharWidths.txt emoji-data.txt + rm -f Uppercase.txt Lowercase.txt rm -f utf8proc_data.c.new diff --git a/3rdparty/utf8proc/data/charwidths.jl b/3rdparty/utf8proc/data/charwidths.jl index bdcbc6c59d0..1346217d810 100644 --- a/3rdparty/utf8proc/data/charwidths.jl +++ b/3rdparty/utf8proc/data/charwidths.jl @@ -1,23 +1,15 @@ # Following work by @jiahao, we compute character widths using a combination of -# * advance widths from GNU Unifont (advance width 512 = 1 en) +# * character category # * UAX 11: East Asian Width # * a few exceptions as needed # Adapted from http://nbviewer.ipython.org/gist/jiahao/07e8b08bf6d8671e9734 # +# We used to also use data from GNU Unifont, but that has proven unreliable +# and unlikely to match widths assumed by terminals. +# # Requires Julia (obviously) and FontForge. ############################################################################# -# Julia 0.3/0.4 compatibility (taken from Compat package) -if VERSION < v"0.4.0-dev+1387" - typealias AbstractString String -end -if VERSION < v"0.4.0-dev+1419" - const UInt32 = Uint32 -end -if VERSION < v"0.4.0-dev+3874" - Base.parse{T<:Integer}(::Type{T}, s::AbstractString) = parseint(T, s) -end - CharWidths = Dict{Int,Int}() ############################################################################# @@ -25,29 +17,55 @@ CharWidths = Dict{Int,Int}() # to minimize bootstrapping complexity when a new version of Unicode comes out. catcode(c) = ccall((:utf8proc_category,"../libutf8proc"), Cint, (Int32,), c) -# use Base.UTF8proc module to get category codes constants, since -# we won't change these in utf8proc. -import Base.UTF8proc +# utf8proc category constants (must match h) +const UTF8PROC_CATEGORY_CN = 0 +const UTF8PROC_CATEGORY_LU = 1 +const UTF8PROC_CATEGORY_LL = 2 +const UTF8PROC_CATEGORY_LT = 3 +const UTF8PROC_CATEGORY_LM = 4 +const UTF8PROC_CATEGORY_LO = 5 +const UTF8PROC_CATEGORY_MN = 6 +const UTF8PROC_CATEGORY_MC = 7 +const UTF8PROC_CATEGORY_ME = 8 +const UTF8PROC_CATEGORY_ND = 9 +const UTF8PROC_CATEGORY_NL = 10 +const UTF8PROC_CATEGORY_NO = 11 +const UTF8PROC_CATEGORY_PC = 12 +const UTF8PROC_CATEGORY_PD = 13 +const UTF8PROC_CATEGORY_PS = 14 +const UTF8PROC_CATEGORY_PE = 15 +const UTF8PROC_CATEGORY_PI = 16 +const UTF8PROC_CATEGORY_PF = 17 +const UTF8PROC_CATEGORY_PO = 18 +const UTF8PROC_CATEGORY_SM = 19 +const UTF8PROC_CATEGORY_SC = 20 +const UTF8PROC_CATEGORY_SK = 21 +const UTF8PROC_CATEGORY_SO = 22 +const UTF8PROC_CATEGORY_ZS = 23 +const UTF8PROC_CATEGORY_ZL = 24 +const UTF8PROC_CATEGORY_ZP = 25 +const UTF8PROC_CATEGORY_CC = 26 +const UTF8PROC_CATEGORY_CF = 27 +const UTF8PROC_CATEGORY_CS = 28 +const UTF8PROC_CATEGORY_CO = 29 ############################################################################# # Use a default width of 1 for all character categories that are -# letter/symbol/number-like. This can be overriden by Unifont or UAX 11 +# letter/symbol/number-like, as well as for unassigned/private-use chars. +# This can be overridden by UAX 11 # below, but provides a useful nonzero fallback for new codepoints when # a new Unicode version has been released but Unifont hasn't been updated yet. zerowidth = Set{Int}() # categories that may contain zero-width chars -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_CN) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_MN) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_MC) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_ME) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_SK) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_ZS) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_ZL) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_ZP) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_CC) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_CF) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_CS) -push!(zerowidth, UTF8proc.UTF8PROC_CATEGORY_CO) +push!(zerowidth, UTF8PROC_CATEGORY_MN) +push!(zerowidth, UTF8PROC_CATEGORY_MC) +push!(zerowidth, UTF8PROC_CATEGORY_ME) +# push!(zerowidth, UTF8PROC_CATEGORY_SK) # see issue #167 +push!(zerowidth, UTF8PROC_CATEGORY_ZL) +push!(zerowidth, UTF8PROC_CATEGORY_ZP) +push!(zerowidth, UTF8PROC_CATEGORY_CC) +push!(zerowidth, UTF8PROC_CATEGORY_CF) +push!(zerowidth, UTF8PROC_CATEGORY_CS) for c in 0x0000:0x110000 if catcode(c) ∉ zerowidth CharWidths[c] = 1 @@ -55,49 +73,13 @@ for c in 0x0000:0x110000 end ############################################################################# -# Widths from GNU Unifont - -#Read sfdfile for character widths -function parsesfd(filename::AbstractString, CharWidths::Dict{Int,Int}=Dict{Int,Int}()) - state=:seekchar - lineno = 0 - codepoint = width = nothing - for line in readlines(open(filename)) - lineno += 1 - if state==:seekchar #StartChar: nonmarkingreturn - if contains(line, "StartChar: ") - codepoint = nothing - width = nothing - state = :readdata - end - elseif state==:readdata #Encoding: 65538 -1 2, Width: 1024 - contains(line, "Encoding:") && (codepoint = parse(Int, split(line)[3])) - contains(line, "Width:") && (width = parse(Int, split(line)[2])) - if codepoint!=nothing && width!=nothing && codepoint >= 0 - w=div(width, 512) # 512 units to the en - if w > 0 - # only add nonzero widths, since (1) the default is zero - # and (2) this circumvents some apparent bugs in Unifont - # (https://savannah.gnu.org/bugs/index.php?45395) - CharWidths[codepoint] = w - end - state = :seekchar - end - end - end - CharWidths -end -CharWidths=parsesfd("unifont.sfd", CharWidths) -CharWidths=parsesfd("unifont_upper.sfd", CharWidths) - -############################################################################# # Widths from UAX #11: East Asian Width -# .. these take precedence over the Unifont width for all codepoints +# .. these take precedence for all codepoints # listed explicitly as wide/full/narrow/half-width for line in readlines(open("EastAsianWidth.txt")) #Strip comments - line[1] == '#' && continue + (isempty(line) || line[1] == '#') && continue precomment = split(line, '#')[1] #Parse code point range and width code tokens = split(precomment, ';') @@ -113,7 +95,7 @@ for line in readlines(open("EastAsianWidth.txt")) for c in charstart:charend if width=="W" || width=="F" # wide or full CharWidths[c]=2 - elseif width=="Na"|| width=="H" # narrow or half + elseif width=="Na"|| width=="H" CharWidths[c]=1 end end @@ -126,56 +108,53 @@ end for c in keys(CharWidths) cat = catcode(c) - # make sure format control character (category Cf) have width 0, - # except for the Arabic characters 0x06xx (see unicode std 6.2, sec. 8.2) - if cat==UTF8proc.UTF8PROC_CATEGORY_CF && c ∉ [0x0601,0x0602,0x0603,0x06dd] + # make sure format control character (category Cf) have width 0 + # (some of these, like U+0601, can have a width in some cases + # but normally act like prepended combining marks. U+fff9 etc + # are also odd, but have zero width in typical terminal contexts) + if cat==UTF8PROC_CATEGORY_CF CharWidths[c]=0 end # Unifont has nonzero width for a number of non-spacing combining # characters, e.g. (in 7.0.06): f84,17b4,17b5,180b,180d,2d7f, and # the variation selectors - if cat==UTF8proc.UTF8PROC_CATEGORY_MN + if cat==UTF8PROC_CATEGORY_MN CharWidths[c]=0 end - # We also assign width of zero to unassigned and private-use + # We also assign width of one to unassigned and private-use # codepoints (Unifont includes ConScript Unicode Registry PUA fonts, - # but since these are nonstandard it seems questionable to recognize them). - if cat==UTF8proc.UTF8PROC_CATEGORY_CO || cat==UTF8proc.UTF8PROC_CATEGORY_CN - CharWidths[c]=0 + # but since these are nonstandard it seems questionable to use Unifont metrics; + # if they are printed as the replacement character U+FFFD they will have width 1). + if cat==UTF8PROC_CATEGORY_CO || cat==UTF8PROC_CATEGORY_CN + CharWidths[c]=1 end # for some reason, Unifont has width-2 glyphs for ASCII control chars - if cat==UTF8proc.UTF8PROC_CATEGORY_CC + if cat==UTF8PROC_CATEGORY_CC CharWidths[c]=0 end end +#Soft hyphen is typically printed as a hyphen (-) in terminals. +CharWidths[0x00ad]=1 + #By definition, should have zero width (on the same line) #0x002028 '
' category: Zl name: LINE SEPARATOR/ #0x002029 '
' category: Zp name: PARAGRAPH SEPARATOR/ CharWidths[0x2028]=0 CharWidths[0x2029]=0 -#By definition, should be narrow = width of 1 en space -#0x00202f ' ' category: Zs name: NARROW NO-BREAK SPACE/ -CharWidths[0x202f]=1 - -#By definition, should be wide = width of 1 em space -#0x002001 ' ' category: Zs name: EM QUAD/ -#0x002003 ' ' category: Zs name: EM SPACE/ -CharWidths[0x2001]=2 -CharWidths[0x2003]=2 - ############################################################################# -# Output (to a file or pipe) for processing by data_generator.rb -# ... don't bother to output zero widths since that will be the default. +# Output (to a file or pipe) for processing by data_generator.rb, +# encoded as a sequence of intervals. firstc = 0x000000 lastv = 0 -uhex(c) = uppercase(hex(c,4)) +uhex(c) = uppercase(string(c,base=16,pad=4)) for c in 0x0000:0x110000 + global firstc, lastv v = get(CharWidths, c, 0) if v != lastv || c == 0x110000 v < 4 || error("invalid charwidth $v for $c") diff --git a/3rdparty/utf8proc/data/data_generator.rb b/3rdparty/utf8proc/data/data_generator.rb index 97c9033a419..91cc03d0b40 100644..100755 --- a/3rdparty/utf8proc/data/data_generator.rb +++ b/3rdparty/utf8proc/data/data_generator.rb @@ -6,6 +6,8 @@ # production use. +# Copyright (c) 2018 Steven G. Johnson, Tony Kelman, Keno Fischer, +# Benito van der Zander, Michaël Meyer, and other contributors. # Copyright (c) 2009 Public Software Group e. V., Berlin, Germany # # Permission is hereby granted, free of charge, to any person obtaining a @@ -65,7 +67,7 @@ # authorization of the copyright holder. -$ignorable_list = File.read("DerivedCoreProperties.txt")[/# Derived Property: Default_Ignorable_Code_Point.*?# Total code points:/m] +$ignorable_list = File.read("DerivedCoreProperties.txt", :encoding => 'utf-8')[/# Derived Property: Default_Ignorable_Code_Point.*?# Total code points:/m] $ignorable = [] $ignorable_list.each_line do |entry| if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)/ @@ -75,7 +77,53 @@ $ignorable_list.each_line do |entry| end end -$grapheme_boundclass_list = File.read("GraphemeBreakProperty.txt") +$uppercase_list = File.read("DerivedCoreProperties.txt", :encoding => 'utf-8')[/# Derived Property: Uppercase.*?# Total code points:/m] +$uppercase = [] +$uppercase_list.each_line do |entry| + if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)/ + $1.hex.upto($2.hex) { |e2| $uppercase << e2 } + elsif entry =~ /^[0-9A-F]+/ + $uppercase << $&.hex + end +end + +$lowercase_list = File.read("DerivedCoreProperties.txt", :encoding => 'utf-8')[/# Derived Property: Lowercase.*?# Total code points:/m] +$lowercase = [] +$lowercase_list.each_line do |entry| + if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)/ + $1.hex.upto($2.hex) { |e2| $lowercase << e2 } + elsif entry =~ /^[0-9A-F]+/ + $lowercase << $&.hex + end +end + +$icb_linker_list = File.read("DerivedCoreProperties.txt", :encoding => 'utf-8')[/# Indic_Conjunct_Break=Linker.*?# Total code points:/m] +$icb = Hash.new("UTF8PROC_INDIC_CONJUNCT_BREAK_NONE") +$icb_linker_list.each_line do |entry| + if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)/ + $1.hex.upto($2.hex) { |e2| $icb[e2] = "UTF8PROC_INDIC_CONJUNCT_BREAK_LINKER" } + elsif entry =~ /^[0-9A-F]+/ + $icb[$&.hex] = "UTF8PROC_INDIC_CONJUNCT_BREAK_LINKER" + end +end +$icb_consonant_list = File.read("DerivedCoreProperties.txt", :encoding => 'utf-8')[/# Indic_Conjunct_Break=Consonant.*?# Total code points:/m] +$icb_consonant_list.each_line do |entry| + if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)/ + $1.hex.upto($2.hex) { |e2| $icb[e2] = "UTF8PROC_INDIC_CONJUNCT_BREAK_CONSONANT" } + elsif entry =~ /^[0-9A-F]+/ + $icb[$&.hex] = "UTF8PROC_INDIC_CONJUNCT_BREAK_CONSONANT" + end +end +$icb_extend_list = File.read("DerivedCoreProperties.txt", :encoding => 'utf-8')[/# Indic_Conjunct_Break=Extend.*?# Total code points:/m] +$icb_extend_list.each_line do |entry| + if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)/ + $1.hex.upto($2.hex) { |e2| $icb[e2] = "UTF8PROC_INDIC_CONJUNCT_BREAK_EXTEND" } + elsif entry =~ /^[0-9A-F]+/ + $icb[$&.hex] = "UTF8PROC_INDIC_CONJUNCT_BREAK_EXTEND" + end +end + +$grapheme_boundclass_list = File.read("GraphemeBreakProperty.txt", :encoding => 'utf-8') $grapheme_boundclass = Hash.new("UTF8PROC_BOUNDCLASS_OTHER") $grapheme_boundclass_list.each_line do |entry| if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)\s*;\s*([A-Za-z_]+)/ @@ -85,7 +133,20 @@ $grapheme_boundclass_list.each_line do |entry| end end -$charwidth_list = File.read("CharWidths.txt") +$emoji_data_list = File.read("emoji-data.txt", :encoding => 'utf-8') +$emoji_data_list.each_line do |entry| + if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)\s*;\s*Extended_Pictographic\W/ + $1.hex.upto($2.hex) { |e2| $grapheme_boundclass[e2] = "UTF8PROC_BOUNDCLASS_EXTENDED_PICTOGRAPHIC" } + elsif entry =~ /^([0-9A-F]+)\s*;\s*Extended_Pictographic\W/ + $grapheme_boundclass[$1.hex] = "UTF8PROC_BOUNDCLASS_EXTENDED_PICTOGRAPHIC" + elsif entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)\s*;\s*Emoji_Modifier\W/ + $1.hex.upto($2.hex) { |e2| $grapheme_boundclass[e2] = "UTF8PROC_BOUNDCLASS_EXTEND" } + elsif entry =~ /^([0-9A-F]+)\s*;\s*Emoji_Modifier\W/ + $grapheme_boundclass[$1.hex] = "UTF8PROC_BOUNDCLASS_EXTEND" + end +end + +$charwidth_list = File.read("CharWidths.txt", :encoding => 'utf-8') $charwidth = Hash.new(0) $charwidth_list.each_line do |entry| if entry =~ /^([0-9A-F]+)\.\.([0-9A-F]+)\s*;\s*([0-9]+)/ @@ -95,16 +156,16 @@ $charwidth_list.each_line do |entry| end end -$exclusions = File.read("CompositionExclusions.txt")[/# \(1\) Script Specifics.*?# Total code points:/m] +$exclusions = File.read("CompositionExclusions.txt", :encoding => 'utf-8')[/# \(1\) Script Specifics.*?# Total code points:/m] $exclusions = $exclusions.chomp.split("\n").collect { |e| e.hex } -$excl_version = File.read("CompositionExclusions.txt")[/# \(2\) Post Composition Version precomposed characters.*?# Total code points:/m] +$excl_version = File.read("CompositionExclusions.txt", :encoding => 'utf-8')[/# \(2\) Post Composition Version precomposed characters.*?# Total code points:/m] $excl_version = $excl_version.chomp.split("\n").collect { |e| e.hex } -$case_folding_string = File.open("CaseFolding.txt", :encoding => 'utf-8').read +$case_folding_string = File.read("CaseFolding.txt", :encoding => 'utf-8') $case_folding = {} $case_folding_string.chomp.split("\n").each do |line| - next unless line =~ /([0-9A-F]+); [CFS]; ([0-9A-F ]+);/i + next unless line =~ /([0-9A-F]+); [CF]; ([0-9A-F ]+);/i $case_folding[$1.hex] = $2.split(" ").collect { |e| e.hex } end @@ -137,15 +198,15 @@ def cpary2utf16encoded(array) end def cpary2c(array) return "UINT16_MAX" if array.nil? || array.length == 0 - lencode = array.length - 1 #no sequence has len 0, so we encode len 1 as 0, len 2 as 1, ... + lencode = array.length - 1 #no sequence has len 0, so we encode len 1 as 0, len 2 as 1, ... array = cpary2utf16encoded(array) - if lencode >= 7 #we have only 3 bits for the length (which is already cutting it close. might need to change it to 2 bits in future Unicode versions) - array = [lencode] + array - lencode = 7 - end - idx = pushary(array) - raise "Array index out of bound" if idx > 0x1FFF - return "#{idx | (lencode << 13)}" + if lencode >= 3 #we have only 2 bits for the length + array = [lencode] + array + lencode = 3 + end + idx = pushary(array) + raise "Array index out of bound" if idx > 0x3FFF + return "#{idx | (lencode << 14)}" end def singlecpmap(cp) return "UINT16_MAX" if cp == nil @@ -188,9 +249,12 @@ class UnicodeChar @decomp_mapping = ($8=='') ? nil : $8.split.collect { |element| element.hex } @bidi_mirrored = ($13=='Y') ? true : false - @uppercase_mapping = ($16=='') ? nil : $16.hex - @lowercase_mapping = ($17=='') ? nil : $17.hex - @titlecase_mapping = ($18=='') ? nil : $18.hex + # issue #130: use nonstandard uppercase ß -> ẞ + # issue #195: if character is uppercase but has no lowercase mapping, + # then make lowercase mapping = itself (vice versa for lowercase) + @uppercase_mapping = ($16=='') ? (code==0x00df ? 0x1e9e : ($17=='' && $lowercase.include?(code) ? code : nil)) : $16.hex + @lowercase_mapping = ($17=='') ? ($16=='' && $uppercase.include?(code) ? code : nil) : $17.hex + @titlecase_mapping = ($18=='') ? (code==0x00df ? 0x1e9e : nil) : $18.hex end def case_folding $case_folding[code] @@ -211,7 +275,8 @@ class UnicodeChar "#{$ignorable.include?(code)}, " << "#{%W[Zl Zp Cc Cf].include?(category) and not [0x200C, 0x200D].include?(category)}, " << "#{$charwidth[code]}, 0, " << - "#{$grapheme_boundclass[code]}},\n" + "#{$grapheme_boundclass[code]}, " << + "#{$icb[code]}},\n" end end @@ -260,17 +325,17 @@ chars.each do |char| end unless comb2nd_indicies[dm1] comb2nd_indicies_sorted_keys << dm1 - comb2nd_indicies[dm1] = comb2nd_indicies.keys.length + comb2nd_indicies[dm1] = comb2nd_indicies.keys.length end comb_array[comb1st_indicies[dm0]] ||= [] raise "Duplicate canonical mapping: #{char.code} #{dm0} #{dm1}" if comb_array[comb1st_indicies[dm0]][comb2nd_indicies[dm1]] comb_array[comb1st_indicies[dm0]][comb2nd_indicies[dm1]] = char.code - + comb2nd_indicies_nonbasic[dm1] = true if char.code > 0xFFFF end char.c_decomp_mapping = cpary2c(char.decomp_mapping) char.c_case_folding = cpary2c(char.case_folding) -end +end comb_indicies = {} cumoffset = 0 @@ -281,7 +346,7 @@ comb1st_indicies.each do |dm0, index| last = nil offset = 0 comb2nd_indicies_sorted_keys.each_with_index do |dm1, b| - if comb_array[index][b] + if comb_array[index][b] first = offset unless first last = offset last += 1 if comb2nd_indicies_nonbasic[dm1] @@ -340,7 +405,7 @@ for code in 0...0x110000 end end -$stdout << "const utf8proc_uint16_t utf8proc_sequences[] = {\n " +$stdout << "static const utf8proc_uint16_t utf8proc_sequences[] = {\n " i = 0 $int_array.each do |entry| i += 1 @@ -352,7 +417,7 @@ $int_array.each do |entry| end $stdout << "};\n\n" -$stdout << "const utf8proc_uint16_t utf8proc_stage1table[] = {\n " +$stdout << "static const utf8proc_uint16_t utf8proc_stage1table[] = {\n " i = 0 stage1.each do |entry| i += 1 @@ -364,7 +429,7 @@ stage1.each do |entry| end $stdout << "};\n\n" -$stdout << "const utf8proc_uint16_t utf8proc_stage2table[] = {\n " +$stdout << "static const utf8proc_uint16_t utf8proc_stage2table[] = {\n " i = 0 stage2.flatten.each do |entry| i += 1 @@ -376,8 +441,8 @@ stage2.flatten.each do |entry| end $stdout << "};\n\n" -$stdout << "const utf8proc_property_t utf8proc_properties[] = {\n" -$stdout << " {0, 0, 0, 0, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, false,false,false,false, 0, 0, UTF8PROC_BOUNDCLASS_OTHER},\n" +$stdout << "static const utf8proc_property_t utf8proc_properties[] = {\n" +$stdout << " {0, 0, 0, 0, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, false,false,false,false, 1, 0, UTF8PROC_BOUNDCLASS_OTHER, UTF8PROC_INDIC_CONJUNCT_BREAK_NONE},\n" properties.each { |line| $stdout << line } @@ -385,13 +450,13 @@ $stdout << "};\n\n" -$stdout << "const utf8proc_uint16_t utf8proc_combinations[] = {\n " +$stdout << "static const utf8proc_uint16_t utf8proc_combinations[] = {\n " i = 0 comb1st_indicies.keys.each_index do |a| offset = 0 $stdout << comb1st_indicies_firstoffsets[a] << ", " << comb1st_indicies_lastoffsets[a] << ", " comb2nd_indicies_sorted_keys.each_with_index do |dm1, b| - break if offset > comb1st_indicies_lastoffsets[a] + break if offset > comb1st_indicies_lastoffsets[a] if offset >= comb1st_indicies_firstoffsets[a] i += 1 if i == 8 @@ -403,9 +468,8 @@ comb1st_indicies.keys.each_index do |a| $stdout << (v & 0xFFFF) << ", " end offset += 1 - offset += 1 if comb2nd_indicies_nonbasic[dm1] + offset += 1 if comb2nd_indicies_nonbasic[dm1] end $stdout << "\n" end $stdout << "};\n\n" - |