1 ! Copyright (C) 2022 CapitalEx
2 ! See https://factorcode.org/license.txt for BSD license.
3 USING: accessors arrays assocs combinators
4 combinators.short-circuit compiler.units formatting hash-sets
5 hashtables io io.encodings.utf8 io.files io.styles kernel
6 namespaces sequences sequences.parser sets sorting strings
7 unicode vectors vocabs vocabs.loader vocabs.prettyprint
8 vocabs.prettyprint.private ;
9 FROM: namespaces => set ;
13 SYMBOL: old-dictionary
16 ! Words for working with the dictionary.
17 : save-dictionary ( -- )
21 : restore-dictionary ( -- )
22 dictionary get keys >hash-set
23 old-dictionary get keys >hash-set
24 diff members [ [ forget-vocab ] each ] with-compilation-unit ;
26 : vocab-loaded? ( name -- ? )
31 : tokenize ( string -- sequence-parser )
34 : skip-after ( sequence-parser seq -- sequence-parser )
35 [ take-until-sequence* drop ] curry keep ;
37 : skip-after* ( sequence-parser object -- sequence-parser )
38 [ take-until-object drop ] curry keep ;
40 : next-line ( sequence-parser -- sequence-parser )
43 : quotation-mark? ( token -- ? )
46 : comment? ( token -- ? )
49 : string-literal? ( token -- ? )
53 ! Words for parsing tokens
56 : reject-token ( sequence-parser token -- string )
57 drop next-line next-token ;
59 : accept-token ( sequence-parser token -- string )
62 : get-token ( sequence-parser -- token )
63 skip-whitespace [ current blank? ] take-until ;
65 : next-token ( sequence-parser -- string )
66 dup get-token dup comment?
70 : skip-token ( sequence-parser -- sequence-parser )
74 ! Words for removing syntax that should be ignored
75 : ends-with-quote? ( token -- ? )
76 last2 [ CHAR: \ = not ] [ CHAR: " = ] bi* and ;
78 : end-string? ( token -- ? )
79 dup length 1 = [ quotation-mark? ] [ ends-with-quote? ] if ;
81 : skip-string ( sequence-parser string -- sequence-parser )
82 end-string? not [ dup next-token skip-string ] when ;
84 : ?handle-string ( sequence-parser string -- sequence-parser string/f )
85 dup { [ empty? not ] [ string-literal? ] } 1&& [ skip-string f ] when ;
87 : next-word/f ( sequence-parser -- sequence-parser string/f )
89 ! skip over empty tokens
93 { "FROM:" [ ";" skip-after f ] }
94 { "SYMBOLS:" [ ";" skip-after f ] }
95 { "R/" [ "/" skip-after f ] }
96 { "(" [ ")" skip-after f ] }
97 { "IN:" [ skip-token f ] }
98 { "SYMBOL:" [ skip-token f ] }
99 { ":" [ skip-token f ] }
100 { "POSTPONE:" [ skip-token f ] }
101 { "\\" [ skip-token f ] }
102 { "CHAR:" [ skip-token f ] }
105 { "!" [ next-line f ] }
106 { "((" [ "))" skip-after f ] }
107 { "/*" [ "*/" skip-after f ] }
108 { "![[" [ "]]" skip-after f ] }
109 { "![=[" [ "]=]" skip-after f ] }
110 { "![==[" [ "]==]" skip-after f ] }
111 { "![===[" [ "]===]" skip-after f ] }
112 { "![====[" [ "]====]" skip-after f ] }
113 { "![=====[" [ "]=====]" skip-after f ] }
114 { "![======[" [ "]======]" skip-after f ] }
116 ! strings (special case needed for `"`)
117 { "STRING:" [ ";" skip-after f ] }
118 { "[[" [ "]]" skip-after f ] }
119 { "[=[" [ "]=]" skip-after f ] }
120 { "[==[" [ "]==]" skip-after f ] }
121 { "[===[" [ "]===]" skip-after f ] }
122 { "[====[" [ "]====]" skip-after f ] }
123 { "[=====[" [ "]=====]" skip-after f ] }
124 { "[======[" [ "]======]" skip-after f ] }
127 { "EBNF[[" [ "]]" skip-after f ] }
128 { "EBNF[=[" [ "]=]" skip-after f ] }
129 { "EBNF[==[" [ "]==]" skip-after f ] }
130 { "EBNF[===[" [ "]===]" skip-after f ] }
131 { "EBNF[====[" [ "]====]" skip-after f ] }
132 { "EBNF[=====[" [ "]=====]" skip-after f ] }
133 { "EBNF[======[" [ "]======]" skip-after f ] }
136 { "!AUTHOR" [ next-line f ] }
137 { "!BROKEN" [ next-line f ] }
138 { "!BUG" [ next-line f ] }
139 { "!FIXME" [ next-line f ] }
140 { "!LICENSE" [ next-line f ] }
141 { "!LOL" [ next-line f ] }
142 { "!NOTE" [ next-line f ] }
143 { "!REVIEW" [ next-line f ] }
144 { "!TODO" [ next-line f ] }
145 { "!XXX" [ next-line f ] }
149 } case ?handle-string ;
151 : ?push ( vector sequence-parser string/? -- vector sequence-parser )
152 [ [ swap [ push ] keep ] curry dip ] when* ;
154 : ?keep-parsing-with ( vector sequence-parser quot -- vector )
155 [ dup sequence-parse-end? not ] dip
156 [ call( x x -- x ) ] curry [ drop ] if ;
158 : (strip-code) ( vector sequence-praser -- vector )
159 skip-whitespace next-word/f ?push
160 [ (strip-code) ] ?keep-parsing-with harvest ;
162 : strip-code ( string -- string )
163 tokenize V{ } clone swap (strip-code) ;
166 ! Words for finding the words used in a program
167 ! and stripping out import statements
168 : skip-imports ( sequence-parser -- sequence-parser string/? )
170 { "USING:" [ ";" skip-after* f ] }
171 { "USE:" [ advance f ] }
175 : take-imports ( sequence-parser -- vector )
177 { "USING:" [ ";" take-until-object ] }
178 { "USE:" [ 1 take-n ] }
182 : (find-used-words) ( vector sequence-parser -- vector )
183 skip-imports ?push [ (find-used-words) ] ?keep-parsing-with ;
185 : find-used-words ( vector -- set )
186 <sequence-parser> V{ } clone swap (find-used-words) fast-set ;
188 : (find-imports) ( vector sequence-parser -- vector )
189 dup take-imports rot prepend swap [ (find-imports) ] ?keep-parsing-with ;
191 : find-imports ( vector -- seq )
192 <sequence-parser> V{ } clone swap (find-imports) dup cache set ;
194 : (get-words) ( name -- vocab )
195 dup load-vocab words>> keys 2array ;
197 : no-vocab-found ( name -- empty )
200 : reject-unused-vocabs ( assoc hash-set -- seq )
201 '[ [ _ in? ] any? ] reject-values keys ;
203 :: print-new-header ( seq -- )
204 "Use the following header to remove unused imports: " print
205 manifest-style [ cache get seq diff pprint-using ] with-nesting ;
207 :: print-unused-vocabs ( name seq -- )
208 name "The following vocabs are unused in %s: \n" printf
209 seq [ " - " prepend print ] each
214 : print-no-unused-vocabs ( name _ -- )
215 drop "No unused vocabs found in %s.\n" printf ;
218 ! Private details for fetching words and imports
219 : get-words ( name -- assoc )
220 dup vocab-exists? [ (get-words) ] [ no-vocab-found ] if ;
222 : get-imported-words ( string -- hashtable )
224 find-imports [ get-words ] map >hashtable
229 : find-unused-in-string ( string -- seq )
230 strip-code [ get-imported-words ] [ find-used-words ] bi
231 reject-unused-vocabs sort ;
233 : find-unused-in-file ( path -- seq )
234 utf8 file-contents find-unused-in-string ;
236 : find-unused ( name -- seq )
237 vocab-source-path dup [ find-unused-in-file ] when ;
239 : find-unused. ( name -- )
240 dup find-unused dup empty?
241 [ print-no-unused-vocabs ]
242 [ print-unused-vocabs ] if ;