! Copyright (C) 2008, 2009 Daniel Ehrenberg. ! See http://factorcode.org/license.txt for BSD license. USING: combinators.short-circuit assocs math kernel sequences io.files hashtables quotations splitting grouping arrays io math.parser math.order byte-arrays namespaces math.bitwise compiler.units parser io.encodings.ascii values interval-maps ascii sets combinators locals math.ranges sorting make strings.parser io.encodings.utf8 memoize simple-flat-file ; FROM: namespaces => set ; IN: unicode.data 2ch ( a b -- c ) [ 21 shift ] dip + ; : 2ch> ( c -- a b ) [ -21 shift ] [ 21 on-bits mask ] bi ; PRIVATE> VALUE: name-map : canonical-entry ( char -- seq ) canonical-map at ; inline : combine-chars ( a b -- char/f ) >2ch combine-map at ; inline : compatibility-entry ( char -- seq ) compatibility-map at ; inline : combining-class ( char -- n ) class-map at ; inline : non-starter? ( char -- ? ) combining-class { 0 f } member? not ; inline : name>char ( name -- char ) name-map at ; inline : char>name ( char -- name ) name-map value-at ; inline : property? ( char property -- ? ) properties at interval-key? ; inline : ch>lower ( ch -- lower ) simple-lower ?at drop ; inline : ch>upper ( ch -- upper ) simple-upper ?at drop ; inline : ch>title ( ch -- title ) simple-title ?at drop ; inline : special-case ( ch -- casing-tuple ) special-casing at ; inline ! For non-existent characters, use Cn CONSTANT: categories { "Cn" "Lu" "Ll" "Lt" "Lm" "Lo" "Mn" "Mc" "Me" "Nd" "Nl" "No" "Pc" "Pd" "Ps" "Pe" "Pi" "Pf" "Po" "Sm" "Sc" "Sk" "So" "Zs" "Zl" "Zp" "Cc" "Cf" "Cs" "Co" } [ swap ] H{ } assoc-map-as ; CONSTANT: num-chars HEX: 2FA1E PRIVATE> : category# ( char -- n ) ! There are a few characters that should be Cn ! that this gives Cf or Mn ! Cf = 26; Mn = 5; Cn = 29 ! Use a compressed array instead? dup category-map ?nth [ ] [ dup HEX: E0001 HEX: E007F between? [ drop 26 ] [ HEX: E0100 HEX: E01EF between? 5 29 ? ] if ] ?if ; : category ( char -- category ) category# categories nth ; assoc [ [ hex> ] dip ] assoc-map ; : process-data ( index data -- hash ) (process-data) [ hex> ] assoc-map [ nip ] assoc-filter >hashtable ; : (chain-decomposed) ( hash value -- newvalue ) [ 2dup swap at [ (chain-decomposed) ] [ 1array nip ] ?if ] with map concat ; : chain-decomposed ( hash -- newhash ) dup [ swap (chain-decomposed) ] curry assoc-map ; : first* ( seq -- ? ) second { [ empty? ] [ first ] } 1|| ; : (process-decomposed) ( data -- alist ) 5 swap (process-data) [ " " split [ hex> ] map ] assoc-map ; : exclusions-file ( -- filename ) "vocab:unicode/data/CompositionExclusions.txt" ; : exclusions ( -- set ) exclusions-file utf8 file-lines [ "#" split1 drop [ blank? ] trim-tail hex> ] map [ 0 = not ] filter ; : remove-exclusions ( alist -- alist ) exclusions [ dup ] H{ } map>assoc assoc-diff ; : process-canonical ( data -- hash hash ) (process-decomposed) [ first* ] filter [ [ second length 2 = ] filter remove-exclusions [ first2 >2ch swap ] H{ } assoc-map-as ] [ >hashtable chain-decomposed ] bi ; : process-compatibility ( data -- hash ) (process-decomposed) [ dup first* [ first2 rest 2array ] unless ] map [ second empty? not ] filter >hashtable chain-decomposed ; : process-combining ( data -- hash ) 3 swap (process-data) [ string>number ] assoc-map [ nip zero? not ] assoc-filter >hashtable ; ! the maximum unicode char in the first 3 planes : ?set-nth ( val index seq -- ) 2dup bounds-check? [ set-nth ] [ 3drop ] if ; :: fill-ranges ( table -- table ) name-map sort-values keys [ { [ "first>" tail? ] [ "last>" tail? ] } 1|| ] filter 2 group [ [ name>char ] bi@ [ [a,b] ] [ table ?nth ] bi [ swap table ?set-nth ] curry each ] assoc-each table ; :: process-category ( data -- category-listing ) num-chars :> table 2 data (process-data) [| char cat | cat categories-map at char table ?set-nth ] assoc-each table fill-ranges ; : process-names ( data -- names-hash ) 1 swap (process-data) [ >lower { { CHAR: \s CHAR: - } } substitute swap ] H{ } assoc-map-as ; : multihex ( hexstring -- string ) " " split [ hex> ] map sift ; PRIVATE> TUPLE: code-point lower title upper ; C: code-point swap first set ; ! Extra properties : parse-properties ( -- {{[a,b],prop}} ) "vocab:unicode/data/PropList.txt" data [ [ ".." split1 [ dup ] unless* [ hex> ] bi@ 2array ] dip ] assoc-map ; : properties>intervals ( properties -- assoc[str,interval] ) dup values members [ f ] H{ } map>assoc [ [ push-at ] curry assoc-each ] keep [ ] assoc-map ; : load-properties ( -- assoc ) parse-properties properties>intervals ; ! Special casing data : load-special-casing ( -- special-casing ) "vocab:unicode/data/SpecialCasing.txt" data [ length 5 = ] filter [ [ set-code-point ] each ] H{ } make-assoc ; load-data { [ process-names \ name-map set-value ] [ 13 swap process-data \ simple-lower set-value ] [ 12 swap process-data \ simple-upper set-value ] [ 14 swap process-data simple-upper assoc-union \ simple-title set-value ] [ process-combining \ class-map set-value ] [ process-canonical \ canonical-map set-value \ combine-map set-value ] [ process-compatibility \ compatibility-map set-value ] [ process-category \ category-map set-value ] } cleave : postprocess-class ( -- ) combine-map keys [ 2ch> nip ] map [ combining-class not ] filter [ 0 swap class-map set-at ] each ; postprocess-class load-special-casing \ special-casing set-value load-properties \ properties set-value [ name>char [ "Invalid character" throw ] unless* ] name>char-hook set-global