1 ! Copyright (C) 2008, 2009 Daniel Ehrenberg.
2 ! See http://factorcode.org/license.txt for BSD license.
3 USING: combinators.short-circuit assocs math kernel sequences
4 io.files hashtables quotations splitting grouping arrays io
5 math.parser hash2 math.order byte-arrays words namespaces words
6 compiler.units parser io.encodings.ascii values interval-maps
7 ascii sets combinators locals math.ranges sorting make
8 strings.parser io.encodings.utf8 memoize ;
19 VALUE: compatibility-map
27 : canonical-entry ( char -- seq ) canonical-map at ; inline
28 : combine-chars ( a b -- char/f ) combine-map hash2 ; inline
29 : compatibility-entry ( char -- seq ) compatibility-map at ; inline
30 : combining-class ( char -- n ) class-map at ; inline
31 : non-starter? ( char -- ? ) combining-class { 0 f } member? not ; inline
32 : name>char ( name -- char ) name-map at ; inline
33 : char>name ( char -- name ) name-map value-at ; inline
34 : property? ( char property -- ? ) properties at interval-key? ; inline
35 : ch>lower ( ch -- lower ) simple-lower at-default ; inline
36 : ch>upper ( ch -- upper ) simple-upper at-default ; inline
37 : ch>title ( ch -- title ) simple-title at-default ; inline
38 : special-case ( ch -- casing-tuple ) special-casing at ; inline
40 ! For non-existent characters, use Cn
43 "Lu" "Ll" "Lt" "Lm" "Lo"
46 "Pc" "Pd" "Ps" "Pe" "Pi" "Pf" "Po"
53 MEMO: categories-map ( -- hashtable )
54 categories <enum> [ swap ] H{ } assoc-map-as ;
56 CONSTANT: num-chars HEX: 2FA1E
60 : category# ( char -- category )
61 ! There are a few characters that should be Cn
62 ! that this gives Cf or Mn
63 ! Cf = 26; Mn = 5; Cn = 29
64 ! Use a compressed array instead?
65 dup category-map ?nth [ ] [
66 dup HEX: E0001 HEX: E007F between?
68 HEX: E0100 HEX: E01EF between? 5 29 ?
72 : category ( char -- category )
73 category# categories nth ;
77 ! Loading data from UnicodeData.txt
79 : split-; ( line -- array )
80 ";" split [ [ blank? ] trim ] map ;
82 : data ( filename -- data )
83 ascii file-lines [ split-; ] map ;
85 : load-data ( -- data )
86 "vocab:unicode/data/UnicodeData.txt" data ;
88 : filter-comments ( lines -- lines )
89 [ "#@" split first ] map harvest ;
91 : (process-data) ( index data -- newdata )
93 [ [ nth ] keep first swap ] with { } map>assoc
94 [ [ hex> ] dip ] assoc-map ;
96 : process-data ( index data -- hash )
97 (process-data) [ hex> ] assoc-map [ nip ] assoc-filter >hashtable ;
99 : (chain-decomposed) ( hash value -- newvalue )
102 [ (chain-decomposed) ] [ 1array nip ] ?if
105 : chain-decomposed ( hash -- newhash )
106 dup [ swap (chain-decomposed) ] curry assoc-map ;
108 : first* ( seq -- ? )
109 second { [ empty? ] [ first ] } 1|| ;
111 : (process-decomposed) ( data -- alist )
112 5 swap (process-data)
113 [ " " split [ hex> ] map ] assoc-map ;
115 : exclusions-file ( -- filename )
116 "vocab:unicode/data/CompositionExclusions.txt" ;
118 : exclusions ( -- set )
119 exclusions-file utf8 file-lines
120 [ "#" split1 drop [ blank? ] trim-tail hex> ] map harvest ;
122 : remove-exclusions ( alist -- alist )
123 exclusions [ dup ] H{ } map>assoc assoc-diff ;
125 : process-canonical ( data -- hash2 hash )
126 (process-decomposed) [ first* ] filter
128 [ second length 2 = ] filter remove-exclusions
129 ! using 1009 as the size, the maximum load is 4
130 [ first2 first2 rot 3array ] map 1009 alist>hash2
131 ] [ >hashtable chain-decomposed ] bi ;
133 : process-compatibility ( data -- hash )
135 [ dup first* [ first2 rest 2array ] unless ] map
136 [ second empty? not ] filter
137 >hashtable chain-decomposed ;
139 : process-combining ( data -- hash )
140 3 swap (process-data)
141 [ string>number ] assoc-map
142 [ nip zero? not ] assoc-filter
145 ! the maximum unicode char in the first 3 planes
147 : ?set-nth ( val index seq -- )
148 2dup bounds-check? [ set-nth ] [ 3drop ] if ;
150 :: fill-ranges ( table -- table )
151 name-map >alist sort-values keys
152 [ { [ "first>" tail? ] [ "last>" tail? ] } 1|| ] filter
154 [ name>char ] bi@ [ [a,b] ] [ table ?nth ] bi
155 [ swap table ?set-nth ] curry each
158 :: process-category ( data -- category-listing )
159 num-chars <byte-array> :> table
160 2 data (process-data) [| char cat |
161 cat categories-map at char table ?set-nth
162 ] assoc-each table fill-ranges ;
164 : process-names ( data -- names-hash )
165 1 swap (process-data) [
166 >lower { { CHAR: \s CHAR: - } } substitute swap
167 ] H{ } assoc-map-as ;
169 : multihex ( hexstring -- string )
170 " " split [ hex> ] map sift ;
174 TUPLE: code-point lower title upper ;
176 C: <code-point> code-point
180 : set-code-point ( seq -- )
181 4 head [ multihex ] map first4
182 <code-point> swap first set ;
185 : properties-lines ( -- lines )
186 "vocab:unicode/data/PropList.txt"
189 : parse-properties ( -- {{[a,b],prop}} )
190 properties-lines filter-comments [
192 [ ".." split1 [ dup ] unless* [ hex> ] bi@ 2array ] dip
195 : properties>intervals ( properties -- assoc[str,interval] )
196 dup values prune [ f ] H{ } map>assoc
197 [ [ push-at ] curry assoc-each ] keep
198 [ <interval-set> ] assoc-map ;
200 : load-properties ( -- assoc )
201 parse-properties properties>intervals ;
203 ! Special casing data
204 : load-special-casing ( -- special-casing )
205 "vocab:unicode/data/SpecialCasing.txt" data
206 [ length 5 = ] filter
207 [ [ set-code-point ] each ] H{ } make-assoc ;
210 [ process-names to: name-map ]
211 [ 13 swap process-data to: simple-lower ]
212 [ 12 swap process-data to: simple-upper ]
213 [ 14 swap process-data simple-upper assoc-union to: simple-title ]
214 [ process-combining to: class-map ]
215 [ process-canonical to: canonical-map to: combine-map ]
216 [ process-compatibility to: compatibility-map ]
217 [ process-category to: category-map ]
220 : postprocess-class ( -- )
221 combine-map [ [ second ] map ] map concat
222 [ combining-class not ] filter
223 [ 0 swap class-map set-at ] each ;
227 load-special-casing to: special-casing
229 load-properties to: properties
231 [ name>char [ "Invalid character" throw ] unless* ]
232 name>char-hook set-global
236 : parse-key-value ( filename -- assoc )
237 ! assoc is code point/range => name
238 ascii file-lines filter-comments [ split-; ] map ;
240 : range, ( value key -- )
242 [ = ] with find nip 2array , ;
244 : expand-ranges ( assoc -- interval-map )
247 swap CHAR: . over member? [
248 ".." split1 [ hex> ] bi@ 2array
251 ] { } make <interval-map> ;
253 : process-key-value ( ranges -- table )
254 dup values prune interned
255 [ expand-ranges ] with-variable ;
259 : load-key-value ( filename -- table )
260 parse-key-value process-key-value ;