: parse-ldconfig-lines ( string -- triple )
[ ":-" split1 [ drop ] dip
- "=>" split1 [ [ blank? ] trim ] bi@
+ "=>" split1 [ [ unicode:blank? ] trim ] bi@
2array
] map ;
"bootstrap." prepend require ;
: load-components ( -- )
- "include" "exclude" [ get-global words harvest ] bi@ diff
+ "include" "exclude" [ get-global split-words harvest ] bi@ diff
[ load-component ] each ;
: print-time ( us -- )
[ number>string ] [ month-name ] bi* swap " " glue 20 center. ;
: days-header. ( -- )
- day-abbreviations2 unwords print ;
+ day-abbreviations2 join-words print ;
: days. ( year month -- )
[ 1 (day-of-week) dup [ " " write ] times ]
[
[ month-name 20 center. ]
[ days-header. days. nl nl ] bi
- ] with-string-writer lines
+ ] with-string-writer split-lines
] with map 3 <groups>
[ first3 [ "%-20s %-20s %-20s\n" printf ] 3each ] each ;
[ first [ /mod ] [ dup ] if* ] [ second ] bi swap
dup 0 > [ number>string prepend , ] [ 2drop ] if
] each drop
- ] { } make [ "0s" ] [ reverse unwords ] if-empty ;
+ ] { } make [ "0s" ] [ reverse join-words ] if-empty ;
M: real elapsed-time
>integer elapsed-time ;
M: insn insn. ( insn -- )
tuple>array unclip-last insn-number. [
dup string? [ ] [ unparse ] if
- ] map unwords write nl ;
+ ] map join-words write nl ;
: block-header. ( bb -- )
[ number>> ] [ kill-block?>> "(k)" "" ? ] bi
<<
STRING: parse-insn-slot-specs-code
USING: compiler.cfg.instructions.syntax prettyprint splitting ;
-"use: src/int-rep temp: temp/int-rep" words parse-insn-slot-specs .
+"use: src/int-rep temp: temp/int-rep" split-words parse-insn-slot-specs .
;
STRING: parse-insn-slot-specs-result
: (postgresql-error-message) ( handle -- str )
PQerrorMessage
- "\n" split [ [ blank? ] trim ] map unlines ;
+ "\n" split [ [ blank? ] trim ] map join-lines ;
: postgresql-error-message ( -- str )
db-connection get handle>> (postgresql-error-message) ;
IN: db.sqlite.tests
: normalize ( str -- str' )
- " \n" split harvest unwords ;
+ " \n" split harvest join-words ;
! delete-trigger-restrict
${
: with-undo ( ..a document quot: ( ..a document -- ..b ) -- ..b )
[ t >>inside-undo? ] dip keep f >>inside-undo? drop ; inline
-: split-lines ( str -- seq )
- [ lines ] keep ?last
+: ?split-lines ( str -- seq )
+ [ split-lines ] keep ?last
[ "\r\n" member? ] [ t ] if*
[ "" suffix ] when ;
:: doc-range ( from to document -- string )
from to [ [ from to ] dip document (doc-range) ] map-lines
- unlines ;
+ join-lines ;
: add-undo ( edit document -- )
dup inside-undo?>> [ 2drop ] [
:: set-doc-range ( string from to document -- )
from to = string empty? and [
- string split-lines :> new-lines
+ string ?split-lines :> new-lines
new-lines from text+loc :> new-to
from to document doc-range :> old-string
old-string string from to new-to <edit> document add-undo
:: set-doc-range* ( string from to document -- )
from to = string empty? and [
- string split-lines :> new-lines
+ string ?split-lines :> new-lines
new-lines from text+loc :> new-to
new-lines from to document [ (set-doc-range) ] models:change-model
new-to document update-locs
[ sha1-escape-string ] { } map-as ;
M: string sha1-escape-strings ( str -- strs )
- lines sha1-escape-strings ;
\ No newline at end of file
+ split-lines sha1-escape-strings ;
\ No newline at end of file
IN: eval
: parse-string ( str -- quot )
- [ lines parse-lines ] with-compilation-unit ;
+ [ split-lines parse-lines ] with-compilation-unit ;
: (eval) ( str effect -- )
[ parse-string ] dip call-effect ; inline
[ cut-slice ] [ f ] if* swap ;
:: (take-until) ( state delimiter accum -- string/f state' )
- state empty? [ accum unlines f ] [
+ state empty? [ accum join-lines f ] [
state unclip-slice :> ( rest first )
first delimiter split1 :> ( before after )
before accum push
} case ;
: parse-farkup ( string -- farkup )
- lines [ dup empty? not ] [ parse-item ] produce nip sift ;
+ split-lines [ dup empty? not ] [ parse-item ] produce nip sift ;
CONSTANT: invalid-url "javascript:alert('Invalid URL in farkup');"
} cond ;
: render-code ( string mode -- xml )
- [ lines ] dip htmlize-lines
+ [ split-lines ] dip htmlize-lines
[XML <pre><-></pre> XML] ;
GENERIC: (write-farkup) ( farkup -- xml )
[ >time ]
[ year>> number>string ]
} cleave
- ] output>array unwords ; inline
+ ] output>array join-words ; inline
: week-of-year ( timestamp day -- n )
[ dup clone 1 >>month 1 >>day day-of-week dup ] dip > [ 7 swap - ] when
: parse-list ( ftp-response -- ftp-response )
dup strings>>
- [ words harvest ] map
+ [ split-words harvest ] map
dup length {
{ 11 [ parse-list-11 ] }
{ 9 [ parse-list-11 ] }
start-directory [
utf8 encode-output [
"." directory.
- ] with-string-writer lines
+ ] with-string-writer split-lines
harvest [ ftp-send ] each
] with-output-stream finish-directory ;
: validation-failed ( -- * )
post-request? revalidate-url and [
begin-conversation
- nested-forms-key param words harvest nested-forms cset
+ nested-forms-key param split-words harvest nested-forms cset
form get form cset
<continue-conversation>
] [ <400> ] if*
compile-a-url [ [XML <base href=<->/> XML] ] [xml-code] ;
: hidden-nested-fields ( -- xml )
- nested-forms get unwords f like nested-forms-key
+ nested-forms get join-words f like nested-forms-key
hidden-form-field ;
: render-hidden ( for -- xml )
M: gir-not-found summary
[ name>> "“" "” file not found on paths:\n" surround ]
- [ paths>> unlines ] bi
+ [ paths>> join-lines ] bi
"\n\nUse the existing path or declare GIR_DIRS environment variable"
3append ;
: css-classes ( classes -- stylesheet )
[
[ css-style " { " "}" surround ] [ "." prepend ] bi* prepend
- ] { } assoc>map unlines ;
+ ] { } assoc>map join-lines ;
:: css-styles-to-classes ( body -- stylesheet body )
H{ } clone :> classes
] ($block) ; inline
: $code ( element -- )
- unlines dup <input> [ write ] ($code) ;
+ join-lines dup <input> [ write ] ($code) ;
: $syntax ( element -- ) "Syntax" $heading $code ;
"Examples" $heading print-element ;
: $example ( element -- )
- unclip-last [ unlines ] dip over <input> [
+ unclip-last [ join-lines ] dip over <input> [
[ print ] [ output-style get format ] bi*
] ($code) ;
{ \ $vocab-link [ second ] }
{ \ $emphasis [ second ] }
{ \ $subsection [ second article-name ] }
- { \ $subsections [ rest [ article-name ] map unwords ] }
- { \ $description [ rest [ element-value ] map unwords ] }
- { \ $notes [ rest [ element-value ] map unwords ] }
- { \ $snippet [ rest [ element-value ] map unwords ] }
+ { \ $subsections [ rest [ article-name ] map join-words ] }
+ { \ $description [ rest [ element-value ] map join-words ] }
+ { \ $notes [ rest [ element-value ] map join-words ] }
+ { \ $snippet [ rest [ element-value ] map join-words ] }
[ 2drop f ]
} case
] [ dup string? [ drop f ] unless ] if ;
MEMO: article-words ( name -- words )
- article-content [ element-value ] map unwords search-words
+ article-content [ element-value ] map join-words search-words
[ [ digit? ] all? ] reject
[ [ { [ letter? ] [ digit? ] } 1|| not ] trim ] map! harvest ;
] produce nip ;
: code-lines ( str -- seq )
- lines [ [ blank? ] trim ] map harvest ;
+ split-lines [ [ blank? ] trim ] map harvest ;
: make-example ( str -- seq )
code-lines dup { [ array? ] [ length 1 > ] } 1&& [
[
[ <$pretty-link> ]
[ superclass-of <$pretty-link> ]
- [ "slots" word-prop [ name>> ] map unwords <$snippet> ]
+ [ "slots" word-prop [ name>> ] map join-words <$snippet> ]
tri 3array
] map
{ { $strong "Class" } { $strong "Superclass" } { $strong "Slots" } } prefix
: <code> ( -- code )
code new ;
-: ?lines ( str/f -- seq )
- [ { } ] [ lines ] if-empty ;
+: ?split-lines ( str/f -- seq )
+ [ { } ] [ split-lines ] if-empty ;
M: code render*
- [ ?lines ] [ drop ] [ mode>> value ] tri* htmlize-lines ;
+ [ ?split-lines ] [ drop ] [ mode>> value ] tri* htmlize-lines ;
! Farkup component
TUPLE: farkup no-follow disable-images parsed ;
[
parser-quiet? on
"html.templates.fhtml" use-vocab
- lines parse-template-lines
+ split-lines parse-template-lines
] with-file-vocabs
] with-compilation-unit ;
"content-type: text/html; charset=UTF-8"
"date: Wed, 12 Oct 2011 18:57:49 GMT"
"server: Factor http.server"
- } [ unlines ] [ "\r\n" join ] bi
+ } [ join-lines ] [ "\r\n" join ] bi
[ [ read-response ] with-string-reader ] same?
] unit-test
[ read-request ] with-string-reader
[ write-request ] with-string-writer
! normalize crlf
- lines "\n" join
+ split-lines "\n" join
] unit-test
STRING: read-request-test-2
[ read-response ] with-string-reader
[ write-response ] with-string-writer
! normalize crlf
- lines "\n" join
+ split-lines "\n" join
] unit-test
{ t } [
swap >>content-type ;
: parse-content-type-attributes ( string -- attributes )
- words harvest [
+ split-words harvest [
"=" split1
"\"" ?head drop "\"" ?tail drop
] { } map>assoc ;
"connection: close"
"host: 127.0.0.1:55532"
"user-agent: Factor http.client"
- } [ unlines ] [ "\r\n" join ] bi
+ } [ join-lines ] [ "\r\n" join ] bi
[ string>request ] same?
] unit-test
"connection: close"
"host: 127.0.0.1:55532"
"user-agent: Factor http.client"
- } [ unlines ] [ "\r\n" join ] bi
+ } [ join-lines ] [ "\r\n" join ] bi
[ [ read-request ] with-string-reader ] same?
] unit-test
TUPLE: range ufirst ulast bfirst blast ;
: b>byte-array ( string -- byte-array )
- words [ hex> ] B{ } map-as ;
+ split-words [ hex> ] B{ } map-as ;
: add-range ( contained ranges -- )
[
<PRIVATE
: parse-iana ( file -- synonym-set )
utf8 file-lines { "" } split [
- [ words ] map
+ [ split-words ] map
[ first { "Name:" "Alias:" } member? ] filter
values { "None" } diff
] map harvest ;
] when ;
: join-arguments ( args -- cmd-line )
- [ escape-argument ] map unwords ;
+ [ escape-argument ] map join-words ;
: lookup-priority ( process -- n )
priority>> {
[
[ first name>> write bl ]
[ second write ": " write ]
- [ third unlines write ]
+ [ third join-lines write ]
tri
] histogram.
nl nl
check-log-message
log-service get
2dup [ log? ] [ ] bi* and [
- [ [ lines ] [ name>> ] [ name>> ] tri* ] dip
+ [ [ split-lines ] [ name>> ] [ name>> ] tri* ] dip
4array "log-message" send-to-log-server
] [
4drop
[ date>> log-timestamp. bl ]
[ level>> pprint bl ]
[ word-name>> write nl ]
- [ message>> unlines print ]
+ [ message>> join-lines print ]
} cleave ;
: log-entries. ( errors -- )
: unix-factor ( string -- )
dup string>number [
[ ": " append write ]
- [ factors [ number>string ] map unwords print ] bi*
+ [ factors [ number>string ] map join-words print ] bi*
] [
"factor: `" "' is not a valid positive integer" surround print
] if* flush ;
>byte-array write ;
: parse-headers ( string -- hashtable )
- lines harvest [ parse-header-line ] map >hashtable ;
+ split-lines harvest [ parse-header-line ] map >hashtable ;
: fill-bytes ( multipart -- multipart )
buffer-size read
over string? [ member? ] [ [ member? ] curry any? ] if ;
: gl-extensions ( -- seq )
- GL_EXTENSIONS glGetString words ;
+ GL_EXTENSIONS glGetString split-words ;
: has-gl-extensions? ( extensions -- ? )
gl-extensions [ (has-extension?) ] curry all? ;
: (make-gl-extensions-error) ( required-extensions -- )
H{
{ "dup" dup } { "nip" nip } { "over" over } ! kernel
{ "nth" nth } ! sequences
- } [ lines parse-lines ] with-words ;
+ } [ split-lines parse-lines ] with-words ;
M: ebnf-action (transform)
ebnf-transform check-action-effect action ;
M: callable present
[ "[ ]" ] [
[ drop "[ " ]
- [ [ present ] map unwords ]
+ [ [ present ] map join-words ]
[ drop " ]" ] tri 3append
] if-empty ;
55 [ "hello" ] replicate concat ;
{ f } [ message >quoted "=\r\n" swap subseq? ] unit-test
-{ 1 } [ message >quoted lines length ] unit-test
+{ 1 } [ message >quoted split-lines length ] unit-test
{ t } [ message >quoted-lines "=\r\n" swap subseq? ] unit-test
-{ 4 } [ message >quoted-lines lines length ] unit-test
-{ "===o" } [ message >quoted-lines lines [ last ] "" map-as ] unit-test
+{ 4 } [ message >quoted-lines split-lines length ] unit-test
+{ "===o" } [ message >quoted-lines split-lines [ last ] "" map-as ] unit-test
[ parse-resolv.conf-line ] each ;
: string>resolv.conf ( string -- resolv.conf )
- lines lines>resolv.conf ;
+ split-lines lines>resolv.conf ;
: path>resolv.conf ( path -- resolv.conf )
utf8 file-lines lines>resolv.conf ;
PRIVATE>
: format-table ( table -- seq )
- [ [ lines ] map format-row flip ] map concat flip
+ [ [ split-lines ] map format-row flip ] map concat flip
[ { } ] [
[ but-last-slice [ format-column ] map! drop ] keep
- flip [ unwords ] map!
+ flip [ join-words ] map!
] if-empty ;
[
"-staging" , "-no-user-init" , "-pic=0" ,
[ staging-image-name "-output-image=" prepend , ]
- [ unwords "-include=" prepend , ] bi
+ [ join-words "-include=" prepend , ] bi
] [
input-image-name "-i=" prepend ,
"-resource-path=" "" resource-path append ,
parse-fresh [ first assoc-union ] unless-empty ;
: set-deploy-config ( assoc vocab -- )
- [ [ unparse-use ] without-limits lines ] dip
+ [ [ unparse-use ] without-limits split-lines ] dip
"deploy.factor" set-vocab-file-lines ;
: set-deploy-flag ( value key vocab -- )
! See http://factorcode.org/license.txt for BSD license.
USING: accessors arrays calendar calendar.english combinators fry io
io.directories io.files.info kernel math math.parser prettyprint sequences
-system vocabs sorting.slots calendar.format ;
+system vocabs sorting.slots calendar.format splitting ;
IN: tools.files
<PRIVATE
dup year>> dup now year>> =
[ drop listing-time ] [ nip number>string ] if
5 CHAR: \s pad-head
- ] tri 3array unwords ;
+ ] tri 3array join-words ;
: read>string ( ? -- string ) "r" "-" ? ; inline
[ kinfo_proc memory>struct ] map ;
: ps-arg ( kp_proc -- arg )
- [ p_pid>> args rest unwords ] [
+ [ p_pid>> args rest join-words ] [
drop p_comm>> 0 over index [ head ] when* >string
] recover ;
SYMBOL: nested-examples
: example-using ( using -- )
- unwords "example-using" [
+ join-words "example-using" [
nested-examples get 4 0 ? CHAR: \s <string> "example-indent" [
"${example-indent}\"Example:\"
${example-indent}{ $example \"USING: ${example-using} ;\"
"\n" split
[ rest-slice [ [ blank? ] trim-head-slice ] map! drop ]
[ but-last-slice [ [ blank? ] trim-tail-slice ] map! drop ]
- [ unwords ]
+ [ join-words ]
tri ;
: last-line? ( document line -- ? )
SLOT: string
M: label string>>
- text>> dup string? [ unlines ] unless ; inline
+ text>> dup string? [ join-lines ] unless ; inline
<PRIVATE
PRIVATE>
: ?string-lines ( string -- string/array )
- CHAR: \n over member-eq? [ lines ] when ;
+ CHAR: \n over member-eq? [ split-lines ] when ;
M: label string<<
[
'[
dup length 3639 >
[ 3639 over last-grapheme-from cut-slice ] [ f ] if
- swap "" like split-lines @ dup
+ swap "" like ?split-lines @ dup
] loop drop ; inline
M: pane-stream stream-write
{ presented image-style } pick '[ _ key? ] any? [
pane-text
] [
- [ words ] 2dip
+ [ split-words ] 2dip
[ pane-bl ] [ pane-text ] bi-curry bi-curry
interleave
] if ;
M: f draw-cell 2drop ;
: single-line ( str -- str' )
- dup [ "\r\n" member? ] any? [ lines unwords ] when ;
+ dup [ "\r\n" member? ] any? [ split-lines join-words ] when ;
M: string cell-dim single-line text-dim first2 ceiling 0 ;
M: string draw-cell single-line draw-text ;
M:: interactor stream-read-unsafe ( n buf interactor -- count )
n [ 0 ] [
drop
- interactor interactor-read dup [ unlines ] when
+ interactor interactor-read dup [ join-lines ] when
n short [ head-slice 0 buf copy ] keep
] if-zero ;
M: interactor stream-read-until
swap '[
_ interactor-read [
- unlines CHAR: \n suffix
+ join-lines CHAR: \n suffix
[ _ member? ] dupd find
[ [ head ] when* ] dip dup not
] [ f f f ] if*
: collation-test-lines ( -- lines )
"vocab:unicode/UCA/CollationTest_SHIFTED.txt.zip"
- binary file-contents uncompress utf8 decode lines
+ binary file-contents uncompress utf8 decode split-lines
[ "#" head? ] reject harvest ;
: parse-collation-test-shifted ( -- lines )
[ CHAR: \t = ] trim-tail [ [ CHAR: \s = ] trim ] bi@
{
{ "address sizes" [
- "," split [ [ CHAR: \s = ] trim words first string>number ] map
+ "," split [ [ CHAR: \s = ] trim split-words first string>number ] map
>>address-sizes
] }
{ "apicid" [ string>number >>apicid ] }
{ "bogomips" [ string>number >>bogomips ] }
{ "cache size" [
- words first [ CHAR: \s = ] trim
+ split-words first [ CHAR: \s = ] trim
string>number 1024 * >>cache-size
] }
{ "cache_alignment" [ string>number >>cache-alignment ] }
{ "cpuid level" [ string>number >>cpuid-level ] }
{ "f00f_bug" [ "yes" = >>f00f-bug? ] }
{ "fdiv_bug" [ "yes" = >>fdiv-bug? ] }
- { "flags" [ words harvest >>flags ] }
+ { "flags" [ split-words harvest >>flags ] }
{ "fpu" [ "yes" = >>fpu? ] }
{ "fpu_exception" [ "yes" = >>fpu-exception? ] }
{ "hlt_bug" [ "yes" = >>hlt-bug? ] }
: parse-proc-loadavg ( -- obj )
"/proc/loadavg" utf8 file-lines first
- words [
+ split-words [
{
[ string>number ]
[ string>number ]
! Different kernels have fewer fields. Make sure we have enough.
: parse-proc-meminfo ( -- meminfo )
"/proc/meminfo" utf8 file-lines
- [ words harvest second string>number 1024 * ] map
+ [ split-words harvest second string>number 1024 * ] map
proc-meminfo "slots" word-prop length f pad-tail
[ proc-meminfo boa ] input<sequence ;
[ second [ line>cpu ] map ]
[
third
- [ " " split1 nip words [ string>number ] map ] map
+ [ " " split1 nip split-words [ string>number ] map ] map
[
{
[ ]
: parse-proc-uptime ( -- uptime )
"/proc/uptime" utf8 file-lines first
- words first2 [ string>number seconds ] bi@
+ split-words first2 [ string>number seconds ] bi@
proc-uptime boa ;
! /proc/pid/*
: parse-proc-pid-stat ( pid -- stat )
"stat" proc-pid-path
proc-first-line
- words harvest
+ split-words harvest
pid-stat "slots" word-prop length "0" pad-tail
[ dup string>number [ nip ] when* ] map
[ pid-stat boa ] input<sequence ;
: parse-platform-section ( string suffix -- )
[
- [ [ lines parse-lines ] curry with-nested-compilation-unit ]
+ [ [ split-lines parse-lines ] curry with-nested-compilation-unit ]
curry
] dip with-vocabulary drop ; inline
<PRIVATE
-: split-line ( string -- elements )
+: wrap-split-line ( string -- elements )
dup [ " \t" member? not ] find drop 0 or
[ f swap ] [ cut ] if-zero
" \t" split harvest [ dup length 1 <element> ] map!
swap [ 0 over length <element> prefix ] when* ;
-: split-lines ( string -- elements-lines )
- lines [ split-line ] map! ;
+: wrap-split-lines ( string -- elements-lines )
+ split-lines [ wrap-split-line ] map! ;
: join-elements ( wrapped-lines -- lines )
- [ unwords ] map! ;
-
-: join-lines ( strings -- string )
- unlines ;
+ [ join-words ] map! ;
PRIVATE>
: wrap-lines ( string width -- newlines )
- [ split-lines ] dip '[ _ wrap join-elements ] map! concat ;
+ [ wrap-split-lines ] dip '[ _ wrap join-elements ] map! concat ;
: wrap-string ( string width -- newstring )
wrap-lines join-lines ;
{ } [
"<style type=\"text/css\" media=\"screen\" >
* {margin:0; padding:0; border:0;}"
- lines "html" htmlize-lines drop
+ split-lines "html" htmlize-lines drop
] unit-test
{ } [
vocab-source-path highlight. ;
M: word highlight.
- [ see ] with-string-writer lines
+ [ see ] with-string-writer split-lines
"factor" highlight-lines ;
] if-empty ;
M: anonymous-intersection class-name
- participants>> [ class-name ] map unwords ;
+ participants>> [ class-name ] map join-words ;
PRIVATE>
"INSTANCE: sequence mixin-forget-test"
"GENERIC: mixin-forget-test-g ( x -- y )"
"M: mixin-forget-test mixin-forget-test-g ;"
- } unlines <string-reader> "mixin-forget-test"
+ } join-lines <string-reader> "mixin-forget-test"
parse-stream drop
] unit-test
"INSTANCE: hashtable mixin-forget-test"
"GENERIC: mixin-forget-test-g ( x -- y )"
"M: mixin-forget-test mixin-forget-test-g ;"
- } unlines <string-reader> "mixin-forget-test"
+ } join-lines <string-reader> "mixin-forget-test"
parse-stream drop
] unit-test
" f"
" 3"
"}"
- } unlines eval( -- tuple )
+ } join-lines eval( -- tuple )
] unit-test
{ T{ parsing-corner-case f 3 } } [
"T{ parsing-corner-case"
" { x 3 }"
"}"
- } unlines eval( -- tuple )
+ } join-lines eval( -- tuple )
] unit-test
{ T{ parsing-corner-case f 3 } } [
"T{ parsing-corner-case {"
" x 3 }"
"}"
- } unlines eval( -- tuple )
+ } join-lines eval( -- tuple )
] unit-test
{
"USE: classes.tuple.parser.tests T{ parsing-corner-case"
" { x 3 }"
- } unlines eval( -- tuple )
+ } join-lines eval( -- tuple )
] [ error>> unexpected-eof? ] must-fail-with
[
{
"USE: classes.tuple.parser.tests T{ parsing-corner-case {"
" x 3 }"
- } unlines eval( -- tuple )
+ } join-lines eval( -- tuple )
] [ error>> unexpected-eof? ] must-fail-with
TUPLE: bad-inheritance-tuple ;
C: <computer> computer
{ "TUPLE: computer cpu ram ;" } [
- [ \ computer see ] with-string-writer lines second
+ [ \ computer see ] with-string-writer split-lines second
] unit-test
TUPLE: laptop < computer battery ;
test-laptop-slot-values
{ "TUPLE: laptop < computer battery ;" } [
- [ \ laptop see ] with-string-writer lines second
+ [ \ laptop see ] with-string-writer split-lines second
] unit-test
{ { tuple computer laptop } } [ laptop superclasses-of ] unit-test
{ f } [ \ + server? ] unit-test
{ "TUPLE: server < computer rackmount ;" } [
- [ \ server see ] with-string-writer lines second
+ [ \ server see ] with-string-writer split-lines second
] unit-test
[
members>> [ instance? ] with any? ;
M: anonymous-union class-name
- members>> [ class-name ] map unwords ;
+ members>> [ class-name ] map join-words ;
M: union-class normalize-class
class-members <anonymous-union> normalize-class ;
"GENERIC: change-combination ( obj a -- b )"
"M: integer change-combination 2drop 1 ;"
"M: array change-combination 2drop 2 ;"
- } unlines <string-reader> "change-combination-test" parse-stream drop
+ } join-lines <string-reader> "change-combination-test" parse-stream drop
] unit-test
{ } [
"GENERIC#: change-combination 1 ( obj a -- b )"
"M: integer change-combination 2drop 1 ;"
"M: array change-combination 2drop 2 ;"
- } unlines <string-reader> "change-combination-test" parse-stream drop
+ } join-lines <string-reader> "change-combination-test" parse-stream drop
] unit-test
{ 2 } [
{ 4 V{ 1 2 3 } } [ 4 V{ 1 4 2 5 3 6 } [ dupd > ] filter! ] unit-test
{ "hello world how are you" }
-[ { "hello" "world" "how" "are" "you" } unwords ] unit-test
+[ { "hello" "world" "how" "are" "you" } join-words ] unit-test
{ "hello world how are you" }
[ { "hello" "world" "how" "are" "you" } " " "" join-as ] unit-test
split-when-slice
}
"Splitting a string into lines:"
-{ $subsections lines }
+{ $subsections split-lines }
"Replacing subsequences with another subsequence:"
{ $subsections replace } ;
{ $values { "seq" sequence } { "end" sequence } { "newseq" slice } { "?" boolean } }
{ $description "Like " { $link ?tail } ", except the resulting sequence is a " { $link slice } "." } ;
-HELP: lines
+HELP: split-lines
{ $values { "seq" sequence } { "seq'" { $sequence string } } }
{ $description "Splits a string along line breaks." }
{ $examples
{ "Beginning and end" f } [ "Beginning and end" "eginning " ?tail ] unit-test
{ { "This" "is" "a" "split" "sentence" } }
-[ "This is a split sentence" words ]
+[ "This is a split sentence" split-words ]
unit-test
{ { "OneWord" } }
-[ "OneWord" words ]
+[ "OneWord" split-words ]
unit-test
{ { "a" "b" "c" "d" "e" "f" } }
[ "aXbYcXdYeXf" "XY" split ] unit-test
{ { "" "" } }
-[ " " words ] unit-test
+[ " " split-words ] unit-test
{ { "hey" } }
-[ "hey" words ] unit-test
+[ "hey" split-words ] unit-test
{ "Hello world" t } [ "Hello world\n" "\n" ?tail ] unit-test
{ "Hello world" f } [ "Hello world" "\n" ?tail ] unit-test
{ "" t } [ "\n" "\n" ?tail ] unit-test
{ "" f } [ "" "\n" ?tail ] unit-test
-{ { } } [ "" lines ] unit-test
-{ { "" } } [ "\n" lines ] unit-test
-{ { "" } } [ "\r" lines ] unit-test
-{ { "" } } [ "\r\n" lines ] unit-test
-{ { "hello" } } [ "hello" lines ] unit-test
-{ { "hello" } } [ "hello\n" lines ] unit-test
-{ { "hello" } } [ "hello\r" lines ] unit-test
-{ { "hello" } } [ "hello\r\n" lines ] unit-test
-{ { "hello" "hi" } } [ "hello\nhi" lines ] unit-test
-{ { "hello" "hi" } } [ "hello\rhi" lines ] unit-test
-{ { "hello" "hi" } } [ "hello\r\nhi" lines ] unit-test
-{ { "hello" "" "" } } [ "hello\n\n\n" lines ] unit-test
-
-{ { } } [ SBUF" " lines ] unit-test
-{ { "" } } [ SBUF" \n" lines ] unit-test
-{ { "" } } [ SBUF" \r" lines ] unit-test
-{ { "" } } [ SBUF" \r\n" lines ] unit-test
-{ { "hello" } } [ SBUF" hello" lines ] unit-test
-{ { "hello" } } [ SBUF" hello\n" lines ] unit-test
-{ { "hello" } } [ SBUF" hello\r" lines ] unit-test
-{ { "hello" } } [ SBUF" hello\r\n" lines ] unit-test
-{ { "hello" "hi" } } [ SBUF" hello\nhi" lines ] unit-test
-{ { "hello" "hi" } } [ SBUF" hello\rhi" lines ] unit-test
-{ { "hello" "hi" } } [ SBUF" hello\r\nhi" lines ] unit-test
-{ { "hello" "" "" } } [ SBUF" hello\n\n\n" lines ] unit-test
+{ { } } [ "" split-lines ] unit-test
+{ { "" } } [ "\n" split-lines ] unit-test
+{ { "" } } [ "\r" split-lines ] unit-test
+{ { "" } } [ "\r\n" split-lines ] unit-test
+{ { "hello" } } [ "hello" split-lines ] unit-test
+{ { "hello" } } [ "hello\n" split-lines ] unit-test
+{ { "hello" } } [ "hello\r" split-lines ] unit-test
+{ { "hello" } } [ "hello\r\n" split-lines ] unit-test
+{ { "hello" "hi" } } [ "hello\nhi" split-lines ] unit-test
+{ { "hello" "hi" } } [ "hello\rhi" split-lines ] unit-test
+{ { "hello" "hi" } } [ "hello\r\nhi" split-lines ] unit-test
+{ { "hello" "" "" } } [ "hello\n\n\n" split-lines ] unit-test
+
+{ { } } [ SBUF" " split-lines ] unit-test
+{ { "" } } [ SBUF" \n" split-lines ] unit-test
+{ { "" } } [ SBUF" \r" split-lines ] unit-test
+{ { "" } } [ SBUF" \r\n" split-lines ] unit-test
+{ { "hello" } } [ SBUF" hello" split-lines ] unit-test
+{ { "hello" } } [ SBUF" hello\n" split-lines ] unit-test
+{ { "hello" } } [ SBUF" hello\r" split-lines ] unit-test
+{ { "hello" } } [ SBUF" hello\r\n" split-lines ] unit-test
+{ { "hello" "hi" } } [ SBUF" hello\nhi" split-lines ] unit-test
+{ { "hello" "hi" } } [ SBUF" hello\rhi" split-lines ] unit-test
+{ { "hello" "hi" } } [ SBUF" hello\r\nhi" split-lines ] unit-test
+{ { "hello" "" "" } } [ SBUF" hello\n\n\n" split-lines ] unit-test
{ { "hey" "world" "what's" "happening" } }
[ "heyAworldBwhat'sChappening" [ LETTER? ] split-when ] unit-test
[ pick subseq ] keep swap
] map 2nip ;
-! lines uses string-nth-fast which is 50% faster over
+! split-lines uses string-nth-fast which is 50% faster over
! nth-unsafe. be careful when changing the definition so that
! you don't unoptimize it.
-GENERIC: lines ( seq -- seq' )
+GENERIC: split-lines ( seq -- seq' )
-ALIAS: string-lines lines
+ALIAS: string-lines split-lines
-M: string lines
+M: string split-lines
[ V{ } clone 0 ] dip [ 2dup bounds-check? ] [
2dup [ "\r\n" member? ] find-from swapd [
over [ [ nip length ] keep ] unless
] when
] while 2drop { } like ;
-M: sbuf lines "" like lines ;
+M: sbuf split-lines "" like split-lines ;
-: unlines-as ( seq exemplar -- seq ) "\n" swap join-as ; inline
-: unlines ( seq -- seq ) "" unlines-as ; inline
-: words ( seq -- seq ) " " split ; inline
-: unwords-as ( seq exemplar -- seq ) " " swap join-as ; inline
-: unwords ( seq -- seq ) " " unwords-as ; inline
+: join-lines-as ( seq exemplar -- seq ) "\n" swap join-as ; inline
+: join-lines ( seq -- seq ) "" join-lines-as ; inline
+: split-words ( seq -- seq ) " " split ; inline
+: join-words-as ( seq exemplar -- seq ) " " swap join-as ; inline
+: join-words ( seq -- seq ) " " join-words-as ; inline
! Copyright (C) 2011 Doug Coleman.
! See http://factorcode.org/license.txt for BSD license.
-USING: combinators combinators.smart io kernel math math.parser
-math.ranges sequences ascii ;
+USING: ascii combinators combinators.smart io kernel math
+math.parser math.ranges sequences splitting ;
IN: 99-bottles
: bottles ( n -- number string )
[ bottles "of beer.\nTake one down, pass it around," ]
[ 1 - bottles [ >lower ] dip "of beer on the wall." ]
} cleave
- ] output>array unwords print nl ;
+ ] output>array join-words print nl ;
: last-verse ( -- )
"No more bottles of beer on the wall, no more bottles of beer." print
] map-sum
] map-sum ;
-CONSTANT: words { 24-from-1 24-from-2 24-from-3 24-from-4 }
+CONSTANT: 24-words { 24-from-1 24-from-2 24-from-3 24-from-4 }
: backtrack-benchmark ( -- )
- words [ reset-memoized ] each
+ 24-words [ reset-memoized ] each
find-impossible-24 6479 assert=
- words [ "memoize" word-prop assoc-size ] map
+ 24-words [ "memoize" word-prop assoc-size ] map
{ 1588 5137 4995 10000 } assert= ;
MAIN: backtrack-benchmark
! Copyright (C) 2009 Doug Coleman.
! See http://factorcode.org/license.txt for BSD license.
-USING: accessors arrays assocs combinators
-concurrency.mailboxes fry io kernel make math math.parser
-math.text.english sequences threads ;
+USING: accessors arrays combinators concurrency.mailboxes io
+kernel make math math.parser math.text.english sequences
+splitting threads ;
IN: benchmark.chameneos-redux
SYMBOLS: red yellow blue ;
] if ;
: number>chameneos-string ( n -- string )
- number>string string>digits [ number>text ] { } map-as unwords ;
+ number>string string>digits [ number>text ] { } map-as join-words ;
: chameneos-redux ( n colors -- )
[ <meeting-place> ] [ make-creatures ] bi*
! Based on http://shootout.alioth.debian.org/gp4/benchmark.php?test=fasta&lang=java&id=2
-USING: assocs benchmark.reverse-complement byte-arrays fry io
-io.encodings.ascii io.files locals kernel math sequences
-sequences.private specialized-arrays strings typed alien.data ;
+USING: alien.data assocs benchmark.reverse-complement
+byte-arrays io io.encodings.ascii io.files kernel math sequences
+sequences.private specialized-arrays splitting strings typed ;
QUALIFIED-WITH: alien.c-types c
SPECIALIZED-ARRAY: c:double
IN: benchmark.fasta
: write-description ( desc id -- )
">" write write bl print ;
-:: split-lines ( n quot -- )
+:: n-split-lines ( n quot -- )
n line-length /mod
[ [ line-length quot call ] times ] dip
quot unless-zero ; inline
TYPED: write-random-fasta ( seed: float n: fixnum chars: byte-array floats: double-array desc id -- seed: float )
write-description
- '[ _ _ make-random-fasta ] split-lines ;
+ '[ _ _ make-random-fasta ] n-split-lines ;
TYPED:: make-repeat-fasta ( k: fixnum len: fixnum alu: string -- k': fixnum )
alu length :> kn
[let
:> alu
0 :> k!
- [| len | k len alu make-repeat-fasta k! ] split-lines
+ [| len | k len alu make-repeat-fasta k! ] n-split-lines
] ;
: fasta ( n out -- )
! Copyright (C) 2012 John Benediktsson
! See http://factorcode.org/license.txt for BSD license.
-USING: kernel math random sequences strings unicode ;
+USING: kernel math random sequences splitting strings unicode ;
IN: benchmark.unicode
: crazy-unicode-string ( -- string )
- 8 [ 8 0xffff random-integers ] replicate unwords ;
+ 8 [ 8 0xffff random-integers ] replicate join-words ;
: unicode-benchmark ( -- )
crazy-unicode-string 8 [
! Squares of numbers from 0 to 100
-${ 100 [0,b] [ dup * number>string ] map unlines "\n" append }
+${ 100 [0,b] [ dup * number>string ] map join-lines "\n" append }
[
"
++++[>+++++<-]>[<+++++>-]+<+[
IN: bunny.model
: numbers ( str -- seq )
- words [ string>number ] map sift ;
+ split-words [ string>number ] map sift ;
: (parse-model) ( vs is -- vs is )
readln [
IN: compiler.cfg.graphviz
: left-justify ( str -- str' )
- lines "\\l" join ;
+ split-lines "\\l" join ;
: left-justified ( quot -- str )
with-string-writer left-justify ; inline
M: reference-expr expr>str value>> unparse ;
-M: sequence expr>str [ unparse ] map unwords ;
+M: sequence expr>str [ unparse ] map join-words ;
M: object expr>str unparse ;
! Process the list of strings, which should make
! up an 8080 instruction, and output a quotation
! that would implement that instruction.
- dup unwords instruction-quotations
+ dup join-words instruction-quotations
[
"_" join [ "emulate-" % % ] "" make create-word-in
dup last-instruction set-global
now next-times-after ;
: read-crontab ( -- entries )
- lines harvest [ parse-cronentry ] map ;
+ read-lines harvest [ parse-cronentry ] map ;
[ "Memory: " write cuda-device-memory number>string print ]
[
"Capability: " write
- cuda-device-capability [ number>string ] map unwords print
+ cuda-device-capability [ number>string ] map join-words print
]
[ "Properties: " write cuda-device-properties . ]
[
dup connect-opposite-edges ;
: parse-vertex ( line -- position )
- words first3 [ string>number >float ] tri@ 0.0 double-4-boa ;
+ split-words first3 [ string>number >float ] tri@ 0.0 double-4-boa ;
: read-vertex ( line vertices -- )
[ parse-vertex ] dip push ;
dup 0 >= [ nip 1 - ] [ [ length ] dip + ] if ;
: parse-face ( line vertices -- vertices )
- [ words ] dip '[ _ parse-face-index ] map ;
+ [ split-words ] dip '[ _ parse-face-index ] map ;
: read-face ( line vertices faces -- )
[ parse-face ] dip push ;
: gemini-print ( url body meta -- )
f pre [
PAGE delete-all
- gemini-charset decode lines [
+ gemini-charset decode split-lines [
{ [ pre get not ] [ "=>" ?head ] } 0&& [
swap gemini-link present over 2array PAGE push
PAGE length swap "[%s] %s\n" printf
PRIVATE>
: gemtext. ( base-url body -- )
- f pre [ lines [ gemini-line. ] with each ] with-variable ;
+ f pre [ split-lines [ gemini-line. ] with each ] with-variable ;
USING: accessors arrays assocs combinators combinators.smart csv
grouping http.client interval-maps io.encodings.ascii io.files
io.files.temp io.launcher io.pathnames ip-parser kernel math
-math.parser memoize sequences strings ;
+math.parser sequences splitting strings ;
IN: geo-ip
: db-path ( -- path ) "IpToCountry.csv" cache-file ;
MEMO: ip-db ( -- seq )
download-db ascii file-lines
- [ "#" head? ] reject unlines string>csv
+ [ "#" head? ] reject join-lines string>csv
[ parse-ip-entry ] map ;
: filter-overlaps ( alist -- alist' )
: commit. ( commit -- )
{
[ hash>> "commit " prepend print ]
- [ author>> "Author: " prepend words 2 head* unwords print ]
- [ author>> words git-date>string "Date: " prepend print ]
- [ message>> "\n" split [ " " prepend ] map unlines nl print nl ]
+ [ author>> "Author: " prepend split-words 2 head* join-words print ]
+ [ author>> split-words git-date>string "Date: " prepend print ]
+ [ message>> "\n" split [ " " prepend ] map join-lines nl print nl ]
} cleave ;
ERROR: unknown-field name parameter ;
: git-string>assoc ( string -- assoc )
"\n\n" split1 [
- lines [ nip first CHAR: \s = ] monotonic-split
+ split-lines [ nip first CHAR: \s = ] monotonic-split
[
dup length 1 = [
first " " split1 2array
] [
[ first " " split1 ]
[ rest [ rest ] map ] bi
- swap prefix unlines 2array
+ swap prefix join-lines 2array
] if
] map
] [
] if ;
: gopher-text ( object -- lines )
- utf8 decode lines { "." } split1 drop ;
+ utf8 decode split-lines { "." } split1 drop ;
: gopher-text. ( object -- )
gopher-text [ print ] each ;
: replace-log-line-numbers ( object log -- log' )
"\n" split harvest
[ replace-log-line-number ] with map
- unlines ;
+ join-lines ;
: gl-shader-kind ( shader-kind -- shader-kind )
{
! Special parsing
: parse-items ( seq -- items )
- first words 2 tail ;
+ first split-words 2 tail ;
: parse-list-folders ( str -- folder )
[[ \* LIST \(([^\)]+)\) "([^"]+)" "?([^"]+)"?]] pcre:findall
: parse-status ( seq -- assoc )
first [[ \* STATUS "[^"]+" \(([^\)]+)\)]] pcre:findall first last last
- words 2 group [ string>number ] assoc-map ;
+ split-words 2 group [ string>number ] assoc-map ;
: parse-store-mail-line ( str -- pair/f )
[[ \(FLAGS \(([^\)]+)\) UID (\d+)\)]] pcre:findall [ f ] [
- first rest values first2 [ words ] dip string>number swap 2array
+ first rest values first2 [ split-words ] dip string>number swap 2array
] if-empty ;
: parse-store-mail ( seq -- assoc )
drop ;
: status-folder ( mailbox keys -- assoc )
- [ >utf7imap4 ] dip unwords "STATUS \"%s\" (%s)" sprintf
+ [ >utf7imap4 ] dip join-words "STATUS \"%s\" (%s)" sprintf
"" command-response parse-status ;
: close-folder ( -- )
{
[ prefix>> ]
[ command>> ]
- [ parameters>> unwords ]
+ [ parameters>> join-words ]
[ trailing>> dup [ CHAR: : prefix ] when ]
- } cleave 4array sift unwords ;
+ } cleave 4array sift join-words ;
<PRIVATE
: ?define-irc-parameters ( class slot-names -- )
PREDICATE: action < ctcp trailing>> rest "ACTION" head? ;
M: rpl-names post-process-irc-message ( rpl-names -- )
- [ [ blank? ] trim words ] change-nicks drop ;
+ [ [ ascii:blank? ] trim split-words ] change-nicks drop ;
M: ctcp post-process-irc-message ( ctcp -- )
[ rest but-last ] change-text drop ;
: split-message ( string -- prefix command parameters trailing )
":" ?head [ " " split1 ] [ f swap ] if
":" split1
- [ words harvest unclip swap ] dip ;
+ [ split-words harvest unclip swap ] dip ;
: sender ( irc-message -- sender )
prefix>> [ ":" ?head drop "!" split-at-first drop ] [ f ] if* ;
! Copyright (C) 2008 Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
-USING: accessors calendar calendar.format fonts fry grouping
-kernel math sequences timers ui ui.gadgets ui.gadgets.labels ;
+USING: accessors calendar calendar.format fonts grouping kernel
+math sequences splitting timers ui ui.gadgets ui.gadgets.labels ;
IN: lcd
: lcd-digit ( digit row -- str )
'[ _ lcd-digit ] { } map-as concat ;
: lcd ( digit-str -- string )
- 4 <iota> [ lcd-row ] with map unlines ;
+ 4 <iota> [ lcd-row ] with map join-lines ;
TUPLE: time-display < label timer ;
] [ call-next-method ] if ;
SYNTAX: <LITERATE
- "LITERATE>" parse-multiline-string lines [
+ "LITERATE>" parse-multiline-string string-lines [
<literate-lexer> (parse-lines) append!
] with-nested-compilation-unit ;
utf8 file-contents ;
: load-tabular-file ( name -- lines )
- load-file [ blank? ] trim lines
+ load-file [ blank? ] trim split-lines
[ [ blank? ] split-when harvest ] map harvest ;
: numerify ( table -- data names )
: git-id ( -- id )
{ "git" "show" } utf8 [ read-lines ] with-process-reader
- first words second ;
+ first split-words second ;
<PRIVATE
! Copyright (C) 2008, 2010 Eduardo Cavazos, Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
-USING: assocs combinators.smart debugger fry io.encodings.utf8
-io.files io.streams.string kernel literals locals mason.common
-mason.config mason.disk mason.test math namespaces sequences
+USING: assocs combinators.smart debugger io.encodings.utf8
+io.files io.streams.string kernel literals mason.common
+mason.config mason.disk math namespaces sequences splitting
xml.syntax xml.writer ;
IN: mason.report
] with-file-writer ; inline
: file-tail ( file encoding lines -- seq )
- [ file-lines ] dip short tail* unlines ;
+ [ file-lines ] dip short tail* join-lines ;
:: failed-report ( error file what -- status )
[
: over-1000000 ( n -- str )
3 digit-groups [ 1 + units nth n-units ] map-index sift
- reverse unwords ;
+ reverse join-words ;
: decompose ( n -- str ) 1000000 /mod [ over-1000000 ] dip
dup 0 > [ basic space-append ] [ drop ] if ;
[ drop f ]
} case [
2 group dup [ weather key? ] all?
- [ [ weather at ] map unwords ]
+ [ [ weather at ] map join-words ]
[ concat parse-glossary ] if
] dip prepend
] if ;
[ [ f ] [ low-clouds at "low clouds are %s" sprintf ] if-zero ]
[ [ f ] [ mid-clouds at "middle clouds are %s" sprintf ] if-zero ]
[ [ f ] [ high-clouds at "high clouds are %s" sprintf ] if-zero ]
- tri* 3array unwords ;
+ tri* 3array join-words ;
: parse-inches ( str -- str' )
dup [ CHAR: / = ] all? [ drop "unknown" ] [
"sea-level pressure is %s hPa" sprintf ;
: parse-lightning ( str -- str' )
- "LTG" ?head drop 2 group [ lightning at ] map unwords ;
+ "LTG" ?head drop 2 group [ lightning at ] map join-words ;
CONSTANT: re-recent-weather R/ ((\w{2})?[BE]\d{2,4}((\w{2})?[BE]\d{2,4})?)+/
: parse-recent-weather ( str -- str' )
split-recent-weather
- [ (parse-recent-weather) ] map unwords ;
+ [ (parse-recent-weather) ] map join-words ;
: parse-varying ( str -- str' )
"V" split1 [ string>number ] bi@
} cond ;
: metar-remarks ( report seq -- report )
- [ parse-remark ] map unwords >>remarks ;
+ [ parse-remark ] map join-words >>remarks ;
: <metar-report> ( metar -- report )
[ metar-report new ] dip [ >>raw ] keep
: <taf-report> ( taf -- report )
[ taf-report new ] dip [ >>raw ] keep
- lines [ [ blank? ] trim ] map
+ split-lines [ [ blank? ] trim ] map
rest dup first "TAF" = [ rest ] when
harvest unclip swapd taf-body swap taf-partials ;
! See http://factorcode.org/license.txt for BSD license.
USING: accessors arrays combinators combinators.short-circuit
generalizations kernel make math modern modern.slices multiline
-sequences sequences.extras strings unicode ;
+sequences sequences.extras splitting strings unicode ;
IN: modern.html
TUPLE: tag name open-close-delimiter props children ;
M: doctype write-html
[ open>> % ]
- [ values>> [ >value ] map unwords [ " " % % ] unless-empty ]
+ [ values>> [ >value ] map join-words [ " " % % ] unless-empty ]
[ close>> % ] tri ;
: write-props ( seq -- )
- [ dup array? [ first2 >value "=" glue ] [ >value ] if ] map unwords [ " " % % ] unless-empty ;
+ [ dup array? [ first2 >value "=" glue ] [ >value ] if ] map join-words [ " " % % ] unless-empty ;
M: open-tag write-html
{
<PRIVATE
: word>morse ( str -- morse )
- [ ch>morse ] { } map-as unwords ;
+ [ ch>morse ] { } map-as join-words ;
: sentence>morse ( str -- morse )
- words [ word>morse ] map " / " join ;
+ split-words [ word>morse ] map " / " join ;
: trim-blanks ( str -- newstr )
[ blank? ] trim ; inline
: morse>word ( morse -- str )
- words [ morse>ch ] "" map-as ;
+ split-words [ morse>ch ] "" map-as ;
: morse>sentence ( morse -- sentence )
- "/" split [ trim-blanks morse>word ] map unwords ;
+ "/" split [ trim-blanks morse>word ] map join-words ;
: replace-underscores ( str -- str' )
[ dup CHAR: _ = [ drop CHAR: - ] when ] map ;
M: p pdf-width
[ style>> set-style ] keep
- [ font>> ] [ string>> ] bi* lines
+ [ font>> ] [ string>> ] bi* split-lines
[ dupd text-width ] map nip supremum ;
M: text pdf-width
[ style>> set-style ] keep
- [ font>> ] [ string>> ] bi* lines
+ [ font>> ] [ string>> ] bi* split-lines
[ dupd text-width ] map nip supremum ;
"/Type /Catalog"
"/Pages 15 0 R"
">>"
- } unlines ;
+ } join-lines ;
: pdf-pages ( n -- str )
[
"/Kids [ " "]" surround ,
] bi
">>" ,
- ] { } make unlines ;
+ ] { } make join-lines ;
: pdf-page ( n -- page )
[
"/F10 12 0 R /F11 13 0 R /F12 14 0 R" ,
">> >>" ,
">>" ,
- ] { } make unlines ;
+ ] { } make join-lines ;
: pdf-trailer ( objects -- str )
[
"startxref" ,
[ length 1 + ] map-sum 9 + "%d" sprintf ,
"%%EOF" ,
- ] { } make unlines ;
+ ] { } make join-lines ;
SYMBOLS: pdf-producer pdf-author pdf-creator ;
dup length [1,b] zip [ first2 pdf-object ] map ;
: objects>pdf ( objects -- str )
- [ unlines "\n" append "%PDF-1.4\n" ]
+ [ join-lines "\n" append "%PDF-1.4\n" ]
[ pdf-trailer ] bi surround ;
! Rename to pdf>string, have it take a <pdf> object?
IN: pdf
: text-to-pdf ( str -- pdf )
- lines [
+ split-lines [
H{ { font-name "monospace" } { font-size 10 } } <p>
] map pdf>string ;
! FIXME: what about "proper" tab support?
: string>texts ( string style -- seq )
- [ lines ] dip '[ _ <text> 1array ] map
+ [ split-lines ] dip '[ _ <text> 1array ] map
<br> 1array join ;
PRIVATE>
} cleave
"/BaseFont " prepend ,
">>" ,
- ] { } make unlines ;
+ ] { } make join-lines ;
M: timestamp pdf-value
"%Y%m%d%H%M%S" strftime "D:" prepend ;
M: integer >ckf ;
: parse-cards ( string -- seq )
- words [ >ckf ] map ;
+ split-words [ >ckf ] map ;
: flush? ( cards -- ? )
0xF000 [ bitand ] reduce 0 = not ;
: get-ok-and-total ( -- total )
stream [
readln dup "+OK" head? [
- words second string>number dup account count<<
+ split-words second string>number dup account count<<
] [ throw ] if
] with-stream* ;
: get-ok-and-uidl ( -- uidl )
stream [
readln dup "+OK" head? [
- words last
+ split-words last
] [ throw ] if
] with-stream* ;
: source-067 ( -- seq )
"resource:extra/project-euler/067/triangle.txt"
- ascii file-lines [ words [ string>number ] map ] map ;
+ ascii file-lines [ split-words [ string>number ] map ] map ;
PRIVATE>
[ n>> ] [ generator>> ] bi
'[ _ generate-question* ] replicate ;
-: trim-blanks ( seq -- seq' ) words harvest unwords ;
+: trim-blanks ( seq -- seq' ) split-words harvest join-words ;
: first-n-letters ( n -- seq ) <iota> [ CHAR: a + 1string ] map ;
: alphabet-zip ( seq -- zip ) [ length <iota> [ CHAR: a + 1string ] { } map-as ] keep zip ;
M: question parse-response drop trim-blanks ;
M: stack-shuffler generate-question*
n-shufflers>> [ stack-shufflers random ] [ ] replicate-as
[ inputs first-n-letters ] keep
- '[ _ _ with-datastack unwords ] ;
+ '[ _ _ with-datastack join-words ] ;
M: question ask-question generated>> . ;
M: string-response ask-question generated>> . ;
>robots.txt-url http-get nip ;
: normalize-robots.txt ( string -- sitemaps seq )
- lines
+ split-lines
[ [ blank? ] trim ] map
[ "#" head? ] reject harvest
[ ":" split1 [ [ blank? ] trim ] bi@ [ >lower ] dip ] { } map>assoc
: select ( seq -- seq' ) [ amb-lazy ] map ;
: search ( -- )
- words select dup sentence-match? [ unwords ] [ fail ] if . ;
+ words select dup sentence-match? [ join-words ] [ fail ] if . ;
MAIN: search
! Copyright (c) 2012 Anonymous
! See http://factorcode.org/license.txt for BSD license.
-USING: io kernel math sequences ;
+USING: io kernel math sequences splitting ;
IN: rosetta-code.sierpinski-triangle
! http://rosettacode.org/wiki/Sierpinski_triangle
[ drop [ dup " " glue ] map ] 2bi append ;
: (sierpinski) ( triangle spaces n -- triangle' )
- dup 0 = [ 2drop unlines ] [
+ dup 0 = [ 2drop join-lines ] [
[
[ iterate-triangle ]
[ nip dup append ] 2bi
: out? ( line -- ? ) [ "OUT" ] dip subseq? ; inline
-: line-time ( line -- time ) words harvest fourth ; inline
+: line-time ( line -- time ) split-words harvest fourth ; inline
: update-max-count ( max -- max' )
dup [ current-count>> ] [ max-count>> ] bi >
M: push-insn eval-insn value>> swons ;
: rpn-tokenize ( string -- string' )
- words harvest sequence>list ;
+ split-words harvest sequence>list ;
: rpn-parse ( string -- tokens )
rpn-tokenize [
M: object array-element>string smalltalk>string ;
M: array array-element>string
- [ array-element>string ] map unwords "(" ")" surround ;
+ [ array-element>string ] map join-words "(" ")" surround ;
M: array smalltalk>string
array-element>string "#" prepend ;
M: byte-array smalltalk>string
- [ number>string ] { } map-as unwords "#[" "]" surround ;
+ [ number>string ] { } map-as join-words "#[" "]" surround ;
M: symbol smalltalk>string
name>> smalltalk>string "#" prepend ;
[ ?first string>number ]
[
?second " " split1
- [ "-->" split1 [ [ blank? ] trim parse-srt-timestamp ] bi@ ]
+ [ "-->" split1 [ [ ascii:blank? ] trim parse-srt-timestamp ] bi@ ]
[
- [ blank? ] trim words sift [
+ [ ascii:blank? ] trim split-words sift [
f
] [
[ ":" split1 nip string>number ] map
] if-empty
] bi*
]
- [ 2 tail unlines ] tri srt-chunk boa ;
+ [ 2 tail join-lines ] tri srt-chunk boa ;
: parse-srt-lines ( seq -- seq' )
{ "" } split harvest
[ parse-srt-chunk ] { } map-as ;
: parse-srt-string ( seq -- seq' )
- lines parse-srt-lines ;
+ split-lines parse-srt-lines ;
: parse-srt-file ( path -- seq )
utf8 file-lines parse-srt-lines ;
"/Author " "USER" os-env "unknown" or pdf-string append ,
"/Creator (created with Factor)" ,
">>" ,
- ] { } make unlines ;
+ ] { } make join-lines ;
: pdf-catalog ( -- str )
{
"/Type /Catalog"
"/Pages 4 0 R"
">>"
- } unlines ;
+ } join-lines ;
: pdf-font ( -- str )
{
"/Subtype /Type1"
"/BaseFont /Courier"
">>"
- } unlines ;
+ } join-lines ;
: pdf-pages ( n -- str )
[
"/Kids [ " "]" surround ,
] bi
">>" ,
- ] { } make unlines ;
+ ] { } make join-lines ;
: pdf-text ( lines -- str )
[
"12 TL" ,
[ pdf-string "'" append , ] each
"ET" ,
- ] { } make unlines pdf-stream ;
+ ] { } make join-lines pdf-stream ;
: pdf-page ( n -- page )
[
1 + "/Contents %d 0 R" sprintf ,
"/Resources << /Font << /F1 3 0 R >> >>" ,
">>" ,
- ] { } make unlines ;
+ ] { } make join-lines ;
: pdf-trailer ( objects -- str )
[
"startxref" ,
[ length 1 + ] map-sum 9 + "%d" sprintf ,
"%%EOF" ,
- ] { } make unlines ;
+ ] { } make join-lines ;
: string>lines ( str -- lines )
- "\t" split " " join lines
+ "\t" split " " join split-lines
[ [ " " ] when-empty ] map ;
: lines>pages ( lines -- pages )
dup length [1,b] zip [ first2 pdf-object ] map ;
: objects>pdf ( objects -- str )
- [ unlines "\n" append "%PDF-1.4\n" ]
+ [ join-lines "\n" append "%PDF-1.4\n" ]
[ pdf-trailer ] bi surround ;
PRIVATE>
! See http://factorcode.org/license.txt for BSD license
USING: combinators command-line generic io kernel math
-math.text.english namespaces present sequences strings system
-ui.operations vocabs ;
+math.text.english namespaces present sequences splitting
+strings system ui.operations vocabs ;
IN: text-to-speech
command-line get [
[ speak ] each-line
] [
- unwords speak
+ join-words speak
] if-empty ;
MAIN: speak-main
! Copyright (C) 2011 John Benediktsson
! See http://factorcode.org/license.txt for BSD license
-USING: command-line io kernel namespaces sequences ;
+USING: command-line io kernel namespaces sequences splitting ;
IN: tools.echo
[ first "-n" = ] keep over [ rest ] when ;
: echo-args ( args -- )
- -n? unwords write [ nl ] unless ;
+ -n? join-words write [ nl ] unless ;
: run-echo ( -- )
command-line get [ nl ] [ echo-args ] if-empty ;
DEFER: name/values
: (parse-value) ( string -- values )
- decode-value lines
+ decode-value split-lines
[ "" ] [ dup length 1 = [ first ] when ] if-empty ;
: parse-value ( string -- remain value )
GENERIC: >txon ( object -- string )
M: sequence >txon
- [ >txon ] map unlines ;
+ [ >txon ] map join-lines ;
M: assoc >txon
>alist [
first2 [ encode-value ] [ >txon ] bi* "%s:`%s`" sprintf
- ] map unlines ;
+ ] map join-lines ;
M: string >txon
encode-value ;
! Copyright (C) 2010 Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
-USING: io kernel make sequences webapps.mason.version.common
-webapps.mason.version.files ;
+USING: io kernel make sequences splitting
+webapps.mason.version.common webapps.mason.version.files ;
IN: webapps.mason.version.binary
: binary-release-command ( version builder -- command )
] "" make ;
: binary-release-script ( version builders -- string )
- [ binary-release-command ] with map unlines ;
+ [ binary-release-command ] with map join-lines ;
: do-binary-release ( version builders -- )
"Copying binary releases to release directory..." print flush
[ "new" [ from-object ] nest-form ]
bi*
]
- [ [ content>> lines ] bi@ lcs-diff "diff" set-value ]
+ [ [ content>> split-lines ] bi@ lcs-diff "diff" set-value ]
2bi
] >>init
: article. ( name -- )
wikipedia-url http-get nip parse-html
"content" find-by-id-between
- html-text lines
- [ [ blank? ] trim ] map harvest [
+ html-text split-lines
+ [ [ ascii:blank? ] trim ] map harvest [
html-unescape 72 wrap-string print nl
] each ;
[ "title" attr "%s:\n" printf ]
[
"plaintext" deep-tags-named [
- children>string lines
+ children>string split-lines
[ " %s\n" printf ] each
] each
] bi
2dup >byte-array ZMQ_SUBSCRIBE swap zmq-setopt
0 100 dup [
[ pick 0 zmq-recv
- >string words [ string>number ] map second +
+ >string split-words [ string>number ] map second +
] times
] dip
/ "Average temperature for zipcode '%s' was %dF\n" printf