: parse-ldconfig-lines ( string -- triple )
[
- "=>" split1 [ [ blank? ] trim ] bi@
+ "=>" split1 [ [ unicode:blank? ] trim ] bi@
[
" " split1 [ "()" in? ] trim "," split
- [ [ blank? ] trim ] map
+ [ [ unicode:blank? ] trim ] map
[ ": Linux" swap subseq? ] reject
] dip 3array
] map ;
image-path parent-directory [
{ "git" "rev-parse" "--abbrev-ref" "HEAD" }
utf8 <process-reader> stream-contents
- [ blank? ] trim-tail
+ [ unicode:blank? ] trim-tail
] with-directory ;
: git-branch-destination ( -- dest )
: parse-color ( line -- name color )
first4
[ [ string>number 255 /f ] tri@ 1.0 <rgba> ] dip
- [ blank? ] trim-head H{ { CHAR: \s CHAR: - } } substitute swap ;
+ [ ascii:blank? ] trim-head H{ { CHAR: \s CHAR: - } } substitute swap ;
: parse-colors ( lines -- assoc )
[ "!" head? ] reject
: ?trim ( string -- string' )
dup length [ drop "" ] [
- over first-unsafe blank?
- [ drop t ] [ 1 - over nth-unsafe blank? ] if
- [ [ blank? ] trim ] when
+ over first-unsafe unicode:blank?
+ [ drop t ] [ 1 - over nth-unsafe unicode:blank? ] if
+ [ [ unicode:blank? ] trim ] when
] if-zero ; inline
: continue-field ( delimiter stream field-seps seq -- sep/f field )
dup zero? [
drop f
] [
- PQresultErrorMessage [ blank? ] trim
+ PQresultErrorMessage [ ascii:blank? ] trim
] if ;
: postgres-result-error ( res -- )
: (postgresql-error-message) ( handle -- str )
PQerrorMessage
- "\n" split [ [ blank? ] trim ] map join-lines ;
+ "\n" split [ [ ascii:blank? ] trim ] map join-lines ;
: postgresql-error-message ( -- str )
db-connection get handle>> (postgresql-error-message) ;
<PRIVATE
: blank-at? ( n seq -- n seq ? )
- 2dup ?nth blank? ;
+ 2dup ?nth unicode:blank? ;
: break-detector ( ? -- quot )
- '[ blank? _ xor ] ; inline
+ '[ unicode:blank? _ xor ] ; inline
: prev-word ( col str ? -- col )
break-detector find-last-from drop ?1+ ;
: parse-hosts ( path -- hosts )
utf8 file-lines
- [ [ blank? ] trim ] map harvest
+ [ [ unicode:blank? ] trim ] map harvest
[ "#" head? ] reject
[
- [ blank? ] split1-when
- [ blank? ] split-when harvest
+ [ unicode:blank? ] split1-when
+ [ unicode:blank? ] split-when harvest
] H{ } map>assoc ;
MEMO: system-hosts ( -- hosts ) hosts-path parse-hosts ;
INSTANCE: apropos-search topic
: apropos ( str -- )
- [ blank? ] trim <apropos-search> print-topic ;
+ [ unicode:blank? ] trim <apropos-search> print-topic ;
] produce nip ;
: code-lines ( str -- seq )
- split-lines [ [ blank? ] trim ] map harvest ;
+ split-lines [ [ ascii:blank? ] trim ] map harvest ;
: make-example ( str -- seq )
code-lines dup { [ array? ] [ length 1 > ] } 1&& [
{ "<ul><li>1</li><li>2</li><li>3</li></ul>" } [
[
"test7" test-template call-template
- ] run-template [ blank? ] reject
+ ] run-template [ ascii:blank? ] reject
] unit-test
TUPLE: person first-name last-name ;
{ "<table><tr><td>RBaxter</td><td>Unknown</td></tr><tr><td>Doug</td><td>Coleman</td></tr></table>" } [
[
"test8" test-template call-template
- ] run-template [ blank? ] reject
+ ] run-template [ ascii:blank? ] reject
] unit-test
{ } [
{ "<table><tr><td>RBaxter</td><td>Unknown</td></tr><tr><td>Doug</td><td>Coleman</td></tr></table>" } [
[
"test8" test-template call-template
- ] run-template [ blank? ] reject
+ ] run-template [ ascii:blank? ] reject
] unit-test
{ } [ 1 "id" set-value ] unit-test
{ "<table><tr><td>RBaxter</td><td>Unknown</td></tr></table>" } [
[
"test11" test-template call-template
- ] run-template [ blank? ] reject
+ ] run-template [ ascii:blank? ] reject
] unit-test
{ } [
USING: accessors http http.client http.client.private
-io.streams.string kernel namespaces sequences tools.test urls ;
+io.streams.string kernel namespaces sequences splitting
+tools.test urls ;
IN: http.client.tests
{ "foo.txt" } [ "http://www.paulgraham.com/foo.txt" download-name ] unit-test
! Copyright (C) 2005, 2010 Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
-USING: accessors ascii assocs calendar combinators.short-circuit
-destructors fry hashtables http http.client.post-data
-http.parsers io io.crlf io.encodings io.encodings.ascii
-io.encodings.binary io.encodings.iana io.encodings.string
-io.files io.pathnames io.sockets io.sockets.secure io.timeouts
-kernel locals math math.order math.parser mime.types namespaces
-present sequences splitting urls vocabs.loader combinators
-environment ;
+USING: accessors ascii assocs calendar combinators
+combinators.short-circuit destructors environment hashtables
+http http.client.post-data http.parsers io io.crlf io.encodings
+io.encodings.ascii io.encodings.binary io.encodings.iana
+io.encodings.string io.files io.pathnames io.sockets
+io.sockets.secure io.timeouts kernel math math.order math.parser
+mime.types namespaces present sequences splitting urls
+vocabs.loader ;
IN: http.client
ERROR: too-many-redirects ;
USING: accessors assocs continuations http http.server
http.server.requests io.encodings.utf8 io.encodings.binary
-io.streams.string kernel math peg sequences tools.test urls ;
+io.streams.string kernel math peg sequences tools.test urls
+splitting ;
{ t } [ [ \ + first ] [ <500> ] recover response? ] unit-test
console-vm-path "-run=listener" 2array >>command
+closed+ >>stdin
+stdout+ >>stderr
- utf8 [ lines last ] with-process-reader
+ utf8 [ read-lines last ] with-process-reader
] unit-test
: launcher-test-path ( -- str )
[ "IN: scratchpad " ] [
console-vm-path "-run=listener" 2array
- ascii [ "USE: system 0 exit" print flush lines last ] with-process-stream
+ ascii [ "USE: system 0 exit" print flush read-lines last ] with-process-stream
] unit-test
[ ] [
+++ /dev/null
-USING: tools.test io.pipes io.pipes.unix io.encodings.utf8
-io.encodings io namespaces sequences ;
-
-[ { 0 0 } ] [ { "ls" "grep ." } run-pipeline ] unit-test
-
-[ { 0 f 0 } ] [
- {
- "ls"
- [
- input-stream [ utf8 <decoder> ] change
- output-stream [ utf8 <encoder> ] change
- input-stream get lines reverse [ print ] each f
- ]
- "grep ."
- } run-pipeline
-] unit-test
--- /dev/null
+USING: tools.test io.pipes io.pipes.unix io.encodings.utf8
+io.encodings io namespaces sequences splitting ;
+
+[ { 0 0 } ] [ { "ls" "grep ." } run-pipeline ] unit-test
+
+[ { 0 f 0 } ] [
+ {
+ "ls"
+ [
+ input-stream [ utf8 <decoder> ] change
+ output-stream [ utf8 <encoder> ] change
+ input-stream get stream-contents split-lines reverse [ print ] each f
+ ]
+ "grep ."
+ } run-pipeline
+] unit-test
USING: accessors arrays assocs classes classes.algebra classes.mixin
classes.mixin.private classes.union.private compiler.units definitions
-eval hashtables kernel math parser sequences source-files strings
-tools.test vectors words ;
+eval hashtables kernel math parser sequences source-files splitting
+strings tools.test vectors words ;
IN: classes.mixin.tests
! Test mixins
USING: accessors classes.tuple.parser lexer words classes
sequences math kernel slots tools.test parser compiler.units
-arrays classes.tuple eval multiline ;
+arrays classes.tuple eval multiline splitting ;
IN: classes.tuple.parser.tests
TUPLE: test-1 ;
eval generic grouping io.pathnames io.streams.string kernel
lexer math multiline namespaces parser sequences sets
source-files source-files.errors strings tools.crossref
-tools.test vocabs vocabs.parser words words.symbol ;
+tools.test vocabs vocabs.parser words words.symbol splitting ;
IN: parser.tests
{ 1 [ 2 [ 3 ] 4 ] 5 }
{ $values { "seq" sequence } { "seq'" { $sequence string } } }
{ $description "Splits a string along line breaks." }
{ $examples
- { $example "USING: prettyprint splitting ;" "\"Hello\\r\\nworld\\n\" lines ." "{ \"Hello\" \"world\" }" }
+ { $example "USING: prettyprint splitting ;" "\"Hello\\r\\nworld\\n\" split-lines ." "{ \"Hello\" \"world\" }" }
} ;
HELP: replace
! See http://factorcode.org/license.txt for BSD license
USING: brainfuck io.streams.string kernel literals math
-math.parser math.ranges sequences tools.test ;
+math.parser math.ranges sequences splitting tools.test ;
[ "+" run-brainfuck ] must-infer
[ "+" get-brainfuck ] must-infer
! Copyright (C) 2013 Doug Coleman.
! See http://factorcode.org/license.txt for BSD license.
USING: assocs combinators.extras io.files kernel math sequences
-splitting tools.test ;
+splitting tools.test splitting ;
{ "a b" }
[ "a" "b" [ " " glue ] once ] unit-test
{ f } [ H{ { 1 H{ { 3 4 } } } } { [ 1 of ] [ 2 of ] } chain ] unit-test
{ f } [ H{ { 2 H{ { 3 4 } } } } { [ 1 of ] [ 2 of ] } chain ] unit-test
{ 5 } [
- "hello factor!" { [ words ] [ first ] [ length ] } chain
+ "hello factor!" { [ split-words ] [ first ] [ length ] } chain
] unit-test
n string tag
2over nth-check-eof {
{ [ dup openstreq member? ] [ ch read-double-matched ] } ! (=( or ((
- { [ dup blank? ] [
+ { [ dup unicode:blank? ] [
drop dup '[ _ matching-delimiter-string closestr1 2array members lex-until ] dip
swap unclip-last 3array ] } ! ( foo )
[ drop [ slice-til-whitespace drop ] dip span-slices ] ! (foo)
[ "<" head? ]
[ length 2 >= ]
[ rest strict-upper? not ]
- [ [ blank? ] any? not ]
+ [ [ unicode:blank? ] any? not ]
[ "/>" tail? ]
} 1&& ;
[ length 2 >= ]
[ second CHAR: / = not ]
[ rest strict-upper? not ]
- [ [ blank? ] any? not ]
+ [ [ unicode:blank? ] any? not ]
[ ">" tail? ]
} 1&& ;
[ length 2 >= ]
[ second CHAR: / = not ]
[ rest strict-upper? not ]
- [ [ blank? ] any? not ]
+ [ [ unicode:blank? ] any? not ]
[ ">" tail? not ]
} 1&& ;
[ "</" head? ]
[ length 2 >= ]
[ rest strict-upper? not ]
- [ [ blank? ] any? not ]
+ [ [ unicode:blank? ] any? not ]
[ ">" tail? ]
} 1&& ;
: check-compound-loop ( n/f string -- n/f string ? )
[ ] [ peek1-from ] [ previous-from ] 2tri
- [ blank? ] bi@ or not ! no blanks between tokens
+ [ unicode:blank? ] bi@ or not ! no blanks between tokens
pick and ; ! and a valid index
: lex-factor ( n/f string/f -- n'/f string literal/f )
USING: pcre.ffi sequences splitting tools.test ;
IN: pcre.ffi.tests
-{ 2 } [ pcre_version words length ] unit-test
+{ 2 } [ pcre_version split-words length ] unit-test
: normalize-robots.txt ( string -- sitemaps seq )
split-lines
- [ [ blank? ] trim ] map
+ [ [ unicode:blank? ] trim ] map
[ "#" head? ] reject harvest
- [ ":" split1 [ [ blank? ] trim ] bi@ [ >lower ] dip ] { } map>assoc
+ [ ":" split1 [ [ unicode:blank? ] trim ] bi@ [ >lower ] dip ] { } map>assoc
[ first "sitemap" = ] partition [ values ] dip
[
{
{ { 2 4 6 } } [ { 1 2 3 4 5 6 } odd-indices ] unit-test
{ "a b c d e" }
-[ "a b \t \n \r c d \n e " [ blank? ] " " compact ] unit-test
+[ "a b \t \n \r c d \n e " [ ascii:blank? ] " " compact ] unit-test
{ " a b c d e " }
-[ " a b c d e " [ blank? ] " " collapse ] unit-test
+[ " a b c d e " [ ascii:blank? ] " " collapse ] unit-test
{ { "hello," " " "world!" " " " " } }
-[ "hello, world! " [ blank? ] slice-when [ >string ] map ] unit-test
+[ "hello, world! " [ ascii:blank? ] slice-when [ >string ] map ] unit-test
{ t }
[ "abc" sequence>slice slice? ] unit-test
{ "ADEBFC" } [ { "ABC" "D" "EF" } round-robin >string ] unit-test
{ { } } [ "ABC" [ ] { } trim-as ] unit-test
-{ "ABC" } [ { 32 65 66 67 32 } [ blank? ] "" trim-as ] unit-test
+{ "ABC" } [ { 32 65 66 67 32 } [ ascii:blank? ] "" trim-as ] unit-test
-{ t } [ "ABC" dup [ blank? ] ?trim [ identity-hashcode ] same? ] unit-test
-{ "ABC" } [ " ABC " [ blank? ] ?trim ] unit-test
+{ t } [ "ABC" dup [ ascii:blank? ] ?trim [ identity-hashcode ] same? ] unit-test
+{ "ABC" } [ " ABC " [ ascii:blank? ] ?trim ] unit-test
-{ t } [ "ABC" dup [ blank? ] ?trim-head [ identity-hashcode ] same? ] unit-test
-{ t } [ "ABC" dup [ blank? ] ?trim-tail [ identity-hashcode ] same? ] unit-test
-{ "ABC " } [ " ABC " [ blank? ] ?trim-head ] unit-test
-{ " ABC" } [ " ABC " [ blank? ] ?trim-tail ] unit-test
+{ t } [ "ABC" dup [ ascii:blank? ] ?trim-head [ identity-hashcode ] same? ] unit-test
+{ t } [ "ABC" dup [ ascii:blank? ] ?trim-tail [ identity-hashcode ] same? ] unit-test
+{ "ABC " } [ " ABC " [ ascii:blank? ] ?trim-head ] unit-test
+{ " ABC" } [ " ABC " [ ascii:blank? ] ?trim-tail ] unit-test
{ "" } [ "" "" "" unsurround ] unit-test
{ "" } [ " " " " " " unsurround ] unit-test
{ "cdef" } [ 2 f "abcdef" subseq* ] unit-test
{ "cd" } [ -4 -2 "abcdef" subseq* ] unit-test
-{ "foo" "" } [ "foo" [ blank? ] cut-when ] unit-test
-{ "foo" " " } [ "foo " [ blank? ] cut-when ] unit-test
-{ "" " foo" } [ " foo" [ blank? ] cut-when ] unit-test
-{ "foo" " bar" } [ "foo bar" [ blank? ] cut-when ] unit-test
+{ "foo" "" } [ "foo" [ ascii:blank? ] cut-when ] unit-test
+{ "foo" " " } [ "foo " [ ascii:blank? ] cut-when ] unit-test
+{ "" " foo" } [ " foo" [ ascii:blank? ] cut-when ] unit-test
+{ "foo" " bar" } [ "foo bar" [ ascii:blank? ] cut-when ] unit-test
{ { 4 0 3 1 2 } } [ { 0 4 1 3 2 } 5 <iota> [ nth* ] curry map ] unit-test
{ { "hello" " " " " " " "world" } } [
"hello world"
- [ [ blank? ] find drop ] split-find
+ [ [ ascii:blank? ] find drop ] split-find
[ >string ] map
] unit-test
<PRIVATE
: trimmed ( seq -- seq )
- [ [ blank? ] trim ] map harvest ;
+ [ [ unicode:blank? ] trim ] map harvest ;
: split-paragraphs ( str -- seq )
R/ \r?\n\r?\n/ re-split trimmed ;
{
[ split-paragraphs length ]
[ split-sentences length ]
- [ [ blank? not ] count ]
+ [ [ unicode:blank? not ] count ]
[ split-words ]
} cleave {
[ length ]
dup find-` [
dup 1 - pick ?nth CHAR: : =
[ drop name/values ] [ cut swap (parse-value) ] if
- [ rest [ blank? ] trim-head ] dip
+ [ rest [ unicode:blank? ] trim-head ] dip
] [ f swap ] if* ;
: (name=value) ( string -- remain term )
parse-name [ parse-value ] dip associate ;
: name=value ( string -- remain term )
- [ blank? ] trim
+ [ unicode:blank? ] trim
":`" over subseq? [ (name=value) ] [ f swap ] if ;
: name/values ( string -- remain terms )
<page-action>
{ help-webapp "search" } >>template
[
- "search" param [ blank? ] trim [
+ "search" param [ unicode:blank? ] trim [
help-dir [
[ article-apropos links "articles" set-value ]
[ word-apropos links "words" set-value ]