20000 <hashtable> objects set
emit-header t, 0, 1, -1,
"Building generic words..." print flush
- call-remake-generics-hook
+ remake-generics
"Serializing words..." print flush
emit-words
"Serializing JIT data..." print flush
{ $description "Looks up the month name and returns it as a string. January has an index of 1 instead of zero." } ;
HELP: month-abbreviations
-{ $values { "array" array } }
+{ $values { "value" array } }
{ $description "Returns an array with the English abbreviated names of all the months." }
{ $warning "Do not use this array for looking up a month name directly. Use month-abbreviation instead." } ;
{ $description "Looks up the day name and returns it as a string." } ;
HELP: day-abbreviations2
-{ $values { "array" array } }
+{ $values { "value" array } }
{ $description "Returns an array with the abbreviated English names of the days of the week. This abbreviation is two characters long." } ;
HELP: day-abbreviation2
{ $description "Looks up the abbreviated day name and returns it as a string. This abbreviation is two characters long." } ;
HELP: day-abbreviations3
-{ $values { "array" array } }
+{ $values { "value" array } }
{ $description "Returns an array with the abbreviated English names of the days of the week. This abbreviation is three characters long." } ;
HELP: day-abbreviation3
drop "Months are indexed starting at 1" ;
<PRIVATE
+
: check-month ( n -- n )
dup zero? [ not-a-month ] when ;
+
PRIVATE>
: month-names ( -- array )
: month-name ( n -- string )
check-month 1- month-names nth ;
-: month-abbreviations ( -- array )
+CONSTANT: month-abbreviations
{
"Jan" "Feb" "Mar" "Apr" "May" "Jun"
"Jul" "Aug" "Sep" "Oct" "Nov" "Dec"
- } ;
+ }
: month-abbreviation ( n -- string )
check-month 1- month-abbreviations nth ;
: day-name ( n -- string ) day-names nth ;
-: day-abbreviations2 ( -- array )
- { "Su" "Mo" "Tu" "We" "Th" "Fr" "Sa" } ;
+CONSTANT: day-abbreviations2
+ { "Su" "Mo" "Tu" "We" "Th" "Fr" "Sa" }
: day-abbreviation2 ( n -- string )
- day-abbreviations2 nth ;
+ day-abbreviations2 nth ; inline
-: day-abbreviations3 ( -- array )
- { "Sun" "Mon" "Tue" "Wed" "Thu" "Fri" "Sat" } ;
+CONSTANT: day-abbreviations3
+ { "Sun" "Mon" "Tue" "Wed" "Thu" "Fri" "Sat" }
: day-abbreviation3 ( n -- string )
- day-abbreviations3 nth ;
+ day-abbreviations3 nth ; inline
: average-month ( -- ratio ) 30+5/12 ; inline
: months-per-year ( -- integer ) 12 ; inline
--- /dev/null
+Daniel Ehrenberg
+Slava Pestov
--- /dev/null
+extensions
"Normally, new word definitions are recompiled automatically. This can be changed:"
{ $subsection disable-compiler }
{ $subsection enable-compiler }
-"The optimizing compiler can be called directly, although this should not be necessary under normal circumstances:"
-{ $subsection optimized-recompile-hook }
"Removing a word's optimized definition:"
{ $subsection decompile }
"Compiling a single quotation:"
{ $description "Compile a single word." }
{ $notes "This is an internal word, and user code should call " { $link compile } " instead." } ;
-HELP: optimized-recompile-hook
-{ $values { "words" "a sequence of words" } { "alist" "an association list" } }
-{ $description "Compile a set of words." }
+HELP: optimizing-compiler
+{ $description "Singleton class implementing " { $link recompile } " to call the optimizing compiler." }
{ $notes "This is an internal word, and user code should call " { $link compile } " instead." } ;
HELP: compile-call
] with-return ;
: compile-loop ( deque -- )
- [ (compile) yield-hook get call ] slurp-deque ;
+ [ (compile) yield-hook get assert-depth ] slurp-deque ;
: decompile ( word -- )
f 2array 1array modify-code-heap ;
: compile-call ( quot -- )
[ dup infer define-temp ] with-compilation-unit execute ;
-: optimized-recompile-hook ( words -- alist )
+SINGLETON: optimizing-compiler
+
+M: optimizing-compiler recompile ( words -- alist )
[
<hashed-dlist> compile-queue set
H{ } clone compiled set
] with-scope ;
: enable-compiler ( -- )
- [ optimized-recompile-hook ] recompile-hook set-global ;
+ optimizing-compiler compiler-impl set-global ;
: disable-compiler ( -- )
- [ default-recompile-hook ] recompile-hook set-global ;
+ f compiler-impl set-global ;
: recompile-all ( -- )
forget-errors all-words compile ;
check_sse2 ;
"-no-sse2" (command-line) member? [
- [ optimized-recompile-hook ] recompile-hook
- [ { check_sse2 } compile ] with-variable
+ optimizing-compiler compiler-impl [ { check_sse2 } compile ] with-variable
"Checking if your CPU supports SSE2..." print flush
sse2? [
"SQLite example combinator:"
{ $code <"
-USING: db.sqlite db io.files ;
+USING: db.sqlite db io.files io.files.temp ;
: with-sqlite-db ( quot -- )
"my-database.db" temp-file <sqlite-db> swap with-db ; inline"> }
t in-transaction [
begin-transaction
[ ] [ rollback-transaction ] cleanup commit-transaction
- ] with-variable ;
+ ] with-variable ; inline
--- /dev/null
+unportable
Doug Coleman
-Slava Pestov
+Daniel Ehrenberg
] unit-test
[ "<p>a-b</p>" ] [ "a-b" convert-farkup ] unit-test
-[ "<p>*foo\nbar\n</p>" ] [ "*foo\nbar\n" convert-farkup ] unit-test
+[ "<p><strong>foo</strong></p><p>bar</p>" ] [ "*foo\nbar\n" convert-farkup ] unit-test
[ "<p><strong>Wow!</strong></p>" ] [ "*Wow!*" convert-farkup ] unit-test
[ "<p><em>Wow.</em></p>" ] [ "_Wow._" convert-farkup ] unit-test
-[ "<p>*</p>" ] [ "*" convert-farkup ] unit-test
+[ "<p><strong></strong></p>" ] [ "*" convert-farkup ] unit-test
[ "<p>*</p>" ] [ "\\*" convert-farkup ] unit-test
-[ "<p>**</p>" ] [ "\\**" convert-farkup ] unit-test
+[ "<p>*<strong></strong></p>" ] [ "\\**" convert-farkup ] unit-test
[ "<ul><li>a-b</li></ul>" ] [ "-a-b" convert-farkup ] unit-test
[ "<ul><li>foo</li></ul>" ] [ "-foo" convert-farkup ] unit-test
-[ "<ul><li>foo</li>\n</ul>" ] [ "-foo\n" convert-farkup ] unit-test
-[ "<ul><li>foo</li>\n<li>bar</li></ul>" ] [ "-foo\n-bar" convert-farkup ] unit-test
-[ "<ul><li>foo</li>\n<li>bar</li>\n</ul>" ] [ "-foo\n-bar\n" convert-farkup ] unit-test
+[ "<ul><li>foo</li></ul>" ] [ "-foo\n" convert-farkup ] unit-test
+[ "<ul><li>foo</li><li>bar</li></ul>" ] [ "-foo\n-bar" convert-farkup ] unit-test
+[ "<ul><li>foo</li><li>bar</li></ul>" ] [ "-foo\n-bar\n" convert-farkup ] unit-test
-[ "<ul><li>foo</li>\n</ul><p>bar\n</p>" ] [ "-foo\nbar\n" convert-farkup ] unit-test
+[ "<ul><li>foo</li></ul><p>bar</p>" ] [ "-foo\nbar\n" convert-farkup ] unit-test
[ "<ol><li>a-b</li></ol>" ] [ "#a-b" convert-farkup ] unit-test
[ "<ol><li>foo</li></ol>" ] [ "#foo" convert-farkup ] unit-test
-[ "<ol><li>foo</li>\n</ol>" ] [ "#foo\n" convert-farkup ] unit-test
-[ "<ol><li>foo</li>\n<li>bar</li></ol>" ] [ "#foo\n#bar" convert-farkup ] unit-test
-[ "<ol><li>foo</li>\n<li>bar</li>\n</ol>" ] [ "#foo\n#bar\n" convert-farkup ] unit-test
+[ "<ol><li>foo</li></ol>" ] [ "#foo\n" convert-farkup ] unit-test
+[ "<ol><li>foo</li><li>bar</li></ol>" ] [ "#foo\n#bar" convert-farkup ] unit-test
+[ "<ol><li>foo</li><li>bar</li></ol>" ] [ "#foo\n#bar\n" convert-farkup ] unit-test
-[ "<ol><li>foo</li>\n</ol><p>bar\n</p>" ] [ "#foo\nbar\n" convert-farkup ] unit-test
+[ "<ol><li>foo</li></ol><p>bar</p>" ] [ "#foo\nbar\n" convert-farkup ] unit-test
-[ "\n\n" ] [ "\n\n" convert-farkup ] unit-test
-[ "\n\n" ] [ "\r\n\r\n" convert-farkup ] unit-test
-[ "\n\n\n\n" ] [ "\r\r\r\r" convert-farkup ] unit-test
-[ "\n\n\n" ] [ "\r\r\r" convert-farkup ] unit-test
-[ "\n\n\n" ] [ "\n\n\n" convert-farkup ] unit-test
-[ "<p>foo\n</p><p>bar</p>" ] [ "foo\n\nbar" convert-farkup ] unit-test
-[ "<p>foo\n</p><p>bar</p>" ] [ "foo\r\n\r\nbar" convert-farkup ] unit-test
-[ "<p>foo\n</p><p>bar</p>" ] [ "foo\r\rbar" convert-farkup ] unit-test
-[ "<p>foo\n</p><p>bar</p>" ] [ "foo\r\r\nbar" convert-farkup ] unit-test
+[ "" ] [ "\n\n" convert-farkup ] unit-test
+[ "" ] [ "\r\n\r\n" convert-farkup ] unit-test
+[ "" ] [ "\r\r\r\r" convert-farkup ] unit-test
+[ "" ] [ "\r\r\r" convert-farkup ] unit-test
+[ "" ] [ "\n\n\n" convert-farkup ] unit-test
+[ "<p>foo</p><p>bar</p>" ] [ "foo\n\nbar" convert-farkup ] unit-test
+[ "<p>foo</p><p>bar</p>" ] [ "foo\r\n\r\nbar" convert-farkup ] unit-test
+[ "<p>foo</p><p>bar</p>" ] [ "foo\r\rbar" convert-farkup ] unit-test
+[ "<p>foo</p><p>bar</p>" ] [ "foo\r\r\nbar" convert-farkup ] unit-test
-[ "\n<p>bar\n</p>" ] [ "\nbar\n" convert-farkup ] unit-test
-[ "\n<p>bar\n</p>" ] [ "\rbar\r" convert-farkup ] unit-test
-[ "\n<p>bar\n</p>" ] [ "\r\nbar\r\n" convert-farkup ] unit-test
+[ "<p>bar</p>" ] [ "\nbar\n" convert-farkup ] unit-test
+[ "<p>bar</p>" ] [ "\rbar\r" convert-farkup ] unit-test
+[ "<p>bar</p>" ] [ "\r\nbar\r\n" convert-farkup ] unit-test
-[ "<p>foo\n</p><p>bar</p>" ] [ "foo\n\n\nbar" convert-farkup ] unit-test
+[ "<p>foo</p><p>bar</p>" ] [ "foo\n\n\nbar" convert-farkup ] unit-test
[ "" ] [ "" convert-farkup ] unit-test
-[ "<p>|a</p>" ]
+[ "<table><tr><td>a</td></tr></table>" ]
[ "|a" convert-farkup ] unit-test
[ "<table><tr><td>a</td></tr></table>" ]
[ "<table><tr><td>a</td><td>b</td></tr><tr><td>c</td><td>d</td></tr></table>" ]
[ "|a|b|\n|c|d|\n" convert-farkup ] unit-test
-[ "<p><strong>foo</strong>\n</p><h1>aheading</h1>\n<p>adfasd</p>" ]
+[ "<p><strong>foo</strong></p><h1>aheading</h1><p>adfasd</p>" ]
[ "*foo*\n=aheading=\nadfasd" convert-farkup ] unit-test
-[ "<h1>foo</h1>\n" ] [ "=foo=\n" convert-farkup ] unit-test
-[ "<p>lol</p><h1>foo</h1>\n" ] [ "lol=foo=\n" convert-farkup ] unit-test
-[ "<p>=foo\n</p>" ] [ "=foo\n" convert-farkup ] unit-test
+[ "<h1>foo</h1>" ] [ "=foo=\n" convert-farkup ] unit-test
+[ "<p>lol=foo=</p>" ] [ "lol=foo=\n" convert-farkup ] unit-test
+[ "<p>=foo</p>" ] [ "=foo\n" convert-farkup ] unit-test
[ "<p>=foo</p>" ] [ "=foo" convert-farkup ] unit-test
[ "<p>==foo</p>" ] [ "==foo" convert-farkup ] unit-test
-[ "<p>=</p><h1>foo</h1>" ] [ "==foo=" convert-farkup ] unit-test
+[ "<h1>foo</h1>" ] [ "==foo=" convert-farkup ] unit-test
[ "<h2>foo</h2>" ] [ "==foo==" convert-farkup ] unit-test
[ "<h2>foo</h2>" ] [ "==foo==" convert-farkup ] unit-test
-[ "<p>=</p><h2>foo</h2>" ] [ "===foo==" convert-farkup ] unit-test
-[ "<h1>foo</h1><p>=</p>" ] [ "=foo==" convert-farkup ] unit-test
+[ "<h2>foo</h2>" ] [ "===foo==" convert-farkup ] unit-test
+[ "<h1>foo</h1>" ] [ "=foo==" convert-farkup ] unit-test
[ "<pre><span class=\"KEYWORD3\">int</span> <span class=\"FUNCTION\">main</span><span class=\"OPERATOR\">(</span><span class=\"OPERATOR\">)</span></pre>" ]
[ "[c{int main()}]" convert-farkup ] unit-test
-[ "<p><img src=\"lol.jpg\"/></p>" ] [ "[[image:lol.jpg]]" convert-farkup ] unit-test
+[ "<p><img src=\"lol.jpg\" alt=\"image:lol.jpg\"/></p>" ] [ "[[image:lol.jpg]]" convert-farkup ] unit-test
[ "<p><img src=\"lol.jpg\" alt=\"teh lol\"/></p>" ] [ "[[image:lol.jpg|teh lol]]" convert-farkup ] unit-test
[ "<p><a href=\"http://lol.com\">http://lol.com</a></p>" ] [ "[[http://lol.com]]" convert-farkup ] unit-test
[ "<p><a href=\"http://lol.com\">haha</a></p>" ] [ "[[http://lol.com|haha]]" convert-farkup ] unit-test
[ "<pre>hello</pre>" ] [ "[{hello}]" convert-farkup ] unit-test
[
- "<p>Feature comparison:\n<table><tr><td>a</td><td>Factor</td><td>Java</td><td>Lisp</td></tr><tr><td>Coolness</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Badass</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Enterprise</td><td>Yes</td><td>Yes</td><td>No</td></tr><tr><td>Kosher</td><td>Yes</td><td>No</td><td>Yes</td></tr></table></p>"
+ "<p>Feature comparison:</p><table><tr><td>a</td><td>Factor</td><td>Java</td><td>Lisp</td></tr><tr><td>Coolness</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Badass</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Enterprise</td><td>Yes</td><td>Yes</td><td>No</td></tr><tr><td>Kosher</td><td>Yes</td><td>No</td><td>Yes</td></tr></table>"
] [ "Feature comparison:\n|a|Factor|Java|Lisp|\n|Coolness|Yes|No|No|\n|Badass|Yes|No|No|\n|Enterprise|Yes|Yes|No|\n|Kosher|Yes|No|Yes|\n" convert-farkup ] unit-test
[
- "<p>Feature comparison:\n</p><table><tr><td>a</td><td>Factor</td><td>Java</td><td>Lisp</td></tr><tr><td>Coolness</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Badass</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Enterprise</td><td>Yes</td><td>Yes</td><td>No</td></tr><tr><td>Kosher</td><td>Yes</td><td>No</td><td>Yes</td></tr></table>"
+ "<p>Feature comparison:</p><table><tr><td>a</td><td>Factor</td><td>Java</td><td>Lisp</td></tr><tr><td>Coolness</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Badass</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Enterprise</td><td>Yes</td><td>Yes</td><td>No</td></tr><tr><td>Kosher</td><td>Yes</td><td>No</td><td>Yes</td></tr></table>"
] [ "Feature comparison:\n\n|a|Factor|Java|Lisp|\n|Coolness|Yes|No|No|\n|Badass|Yes|No|No|\n|Enterprise|Yes|Yes|No|\n|Kosher|Yes|No|Yes|\n" convert-farkup ] unit-test
[
[ "<p><foo></p>" ] [ "<foo>" convert-farkup ] unit-test
-[ "<p>asdf\n<ul><li>lol</li>\n<li>haha</li></ul></p>" ] [ "asdf\n-lol\n-haha" convert-farkup ] unit-test
+[ "<p>asdf</p><ul><li>lol</li><li>haha</li></ul>" ] [ "asdf\n-lol\n-haha" convert-farkup ] unit-test
-[ "<p>asdf\n</p><ul><li>lol</li>\n<li>haha</li></ul>" ]
+[ "<p>asdf</p><ul><li>lol</li><li>haha</li></ul>" ]
[ "asdf\n\n-lol\n-haha" convert-farkup ] unit-test
[ "<hr/>" ] [ "___" convert-farkup ] unit-test
-[ "<hr/>\n" ] [ "___\n" convert-farkup ] unit-test
+[ "<hr/>" ] [ "___\n" convert-farkup ] unit-test
-[ "<p>before:\n<pre><span class=\"OPERATOR\">{</span> <span class=\"DIGIT\">1</span> <span class=\"DIGIT\">2</span> <span class=\"DIGIT\">3</span> <span class=\"OPERATOR\">}</span> <span class=\"DIGIT\">1</span> tail</pre></p>" ]
+[ "<p>before:</p><pre><span class=\"OPERATOR\">{</span> <span class=\"DIGIT\">1</span> <span class=\"DIGIT\">2</span> <span class=\"DIGIT\">3</span> <span class=\"OPERATOR\">}</span> <span class=\"DIGIT\">1</span> tail</pre>" ]
[ "before:\n[factor{{ 1 2 3 } 1 tail}]" convert-farkup ] unit-test
[ "<p><a href=\"Factor\">Factor</a>-rific!</p>" ]
[ "[[Factor]]-rific!" convert-farkup ] unit-test
-[ "<p>[ factor { 1 2 3 }]</p>" ]
+[ "<pre> 1 2 3 </pre>" ]
[ "[ factor { 1 2 3 }]" convert-farkup ] unit-test
-[ "<p>paragraph\n<hr/></p>" ]
+[ "<p>paragraph</p><hr/>" ]
[ "paragraph\n___" convert-farkup ] unit-test
-[ "<p>paragraph\n a ___ b</p>" ]
+[ "<p>paragraph</p><p> a <em></em><em> b</em></p>" ]
[ "paragraph\n a ___ b" convert-farkup ] unit-test
-[ "\n<ul><li> a</li>\n</ul><hr/>" ]
+[ "<ul><li> a</li></ul><hr/>" ]
[ "\n- a\n___" convert-farkup ] unit-test
-[ "<p>hello_world how are you today?\n<ul><li> hello_world how are you today?</li></ul></p>" ]
+[ "<p>hello<em>world how are you today?</em></p><ul><li> hello<em>world how are you today?</em></li></ul>" ]
[ "hello_world how are you today?\n- hello_world how are you today?" convert-farkup ] unit-test
: check-link-escaping ( string -- link )
[ "<foo>" ] [ "[[<foo>]]" check-link-escaping ] unit-test
[ "&blah;" ] [ "[[&blah;]]" check-link-escaping ] unit-test
[ "C++" ] [ "[[C++]]" check-link-escaping ] unit-test
+
+[ "<h1>The <em>important</em> thing</h1>" ] [ "=The _important_ thing=" convert-farkup ] unit-test
+[ "<p><a href=\"Foo\"><strong>emphasized</strong> text</a></p>" ] [ "[[Foo|*emphasized* text]]" convert-farkup ] unit-test
+[ "<table><tr><td><strong>bold</strong></td><td><em>italics</em></td></tr></table>" ]
+[ "|*bold*|_italics_|" convert-farkup ] unit-test
+[ "<p><em>italics<strong>both</strong></em></p>" ] [ "_italics*both" convert-farkup ] unit-test
+[ "<p><em>italics<strong>both</strong></em></p>" ] [ "_italics*both*" convert-farkup ] unit-test
+[ "<p><em>italics<strong>both</strong></em></p>" ] [ "_italics*both*_" convert-farkup ] unit-test
+[ "<p><em>italics<strong>both</strong></em></p>" ] [ "_italics*both_" convert-farkup ] unit-test
+[ "<p><em>italics<strong>both</strong></em>after<strong></strong></p>" ] [ "_italics*both_after*" convert-farkup ] unit-test
+[ "<table><tr><td>foo|bar</td></tr></table>" ] [ "|foo\\|bar|" convert-farkup ] unit-test
+[ "<p></p>" ] [ "\\" convert-farkup ] unit-test
-! Copyright (C) 2008 Doug Coleman.
+! Copyright (C) 2008, 2009 Doug Coleman, Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.
-USING: accessors arrays combinators io
-io.streams.string kernel math namespaces peg peg.ebnf
-sequences sequences.deep strings xml.entities xml.syntax
-vectors splitting xmode.code2html urls.encoding xml.data
-xml.writer ;
+USING: sequences kernel splitting lists fry accessors assocs math.order
+math combinators namespaces urls.encoding xml.syntax xmode.code2html
+xml.data arrays strings vectors xml.writer io.streams.string locals
+unicode.categories ;
IN: farkup
SYMBOL: relative-link-prefix
: simple-link-title ( string -- string' )
dup absolute-url? [ "/" split1-last swap or ] unless ;
-EBNF: parse-farkup
-nl = ("\r\n" | "\r" | "\n") => [[ drop "\n" ]]
-whitespace = " " | "\t" | nl
-
-heading1 = "=" (!("=" | nl).)+ "="
- => [[ second >string heading1 boa ]]
-
-heading2 = "==" (!("=" | nl).)+ "=="
- => [[ second >string heading2 boa ]]
-
-heading3 = "===" (!("=" | nl).)+ "==="
- => [[ second >string heading3 boa ]]
-
-heading4 = "====" (!("=" | nl).)+ "===="
- => [[ second >string heading4 boa ]]
-
-heading = heading4 | heading3 | heading2 | heading1
-
-
-
-strong = "*" (!("*" | nl).)+ "*"
- => [[ second >string strong boa ]]
-
-emphasis = "_" (!("_" | nl).)+ "_"
- => [[ second >string emphasis boa ]]
-
-superscript = "^" (!("^" | nl).)+ "^"
- => [[ second >string superscript boa ]]
-
-subscript = "~" (!("~" | nl).)+ "~"
- => [[ second >string subscript boa ]]
-
-inline-code = "%" (!("%" | nl).)+ "%"
- => [[ second >string inline-code boa ]]
-
-link-content = (!("|"|"]").)+
- => [[ >string ]]
-
-image-link = "[[image:" link-content "|" link-content "]]"
- => [[ [ second >string ] [ fourth >string ] bi image boa ]]
- | "[[image:" link-content "]]"
- => [[ second >string f image boa ]]
-
-simple-link = "[[" link-content "]]"
- => [[ second >string dup simple-link-title link boa ]]
-
-labeled-link = "[[" link-content "|" link-content "]]"
- => [[ [ second >string ] [ fourth >string ] bi link boa ]]
-
-link = image-link | labeled-link | simple-link
-
-escaped-char = "\" .
- => [[ second 1string ]]
-
-inline-tag = strong | emphasis | superscript | subscript | inline-code
- | link | escaped-char
-
-
-
-inline-delimiter = '*' | '_' | '^' | '~' | '%' | '\' | '['
-
-cell = (!(inline-delimiter | '|' | nl).)+
- => [[ >string ]]
-
-table-column = (list | cell | inline-tag | inline-delimiter ) '|'
- => [[ first ]]
-table-row = "|" (table-column)+
- => [[ second table-row boa ]]
-table = ((table-row nl => [[ first ]] )+ table-row? | table-row)
- => [[ table boa ]]
-
-text = (!(nl | code | heading | inline-delimiter | table ).)+
- => [[ >string ]]
-
-paragraph-nl-item = nl list
- | nl line
- | nl => [[ line-breaks? get [ drop line-break new ] when ]]
-paragraph-item = (table | code | text | inline-tag | inline-delimiter)+
-paragraph = ((paragraph-item paragraph-nl-item)+ nl+ => [[ first ]]
- | (paragraph-item paragraph-nl-item)+ paragraph-item?
- | paragraph-item)
- => [[ paragraph boa ]]
-
-
-list-item = (cell | inline-tag | inline-delimiter)*
-
-ordered-list-item = '#' list-item
- => [[ second list-item boa ]]
-ordered-list = ((ordered-list-item nl)+ ordered-list-item? | ordered-list-item)
- => [[ ordered-list boa ]]
-
-unordered-list-item = '-' list-item
- => [[ second list-item boa ]]
-unordered-list = ((unordered-list-item nl)+ unordered-list-item? | unordered-list-item)
- => [[ unordered-list boa ]]
-
-list = ordered-list | unordered-list
-
-
-line = '___'
- => [[ drop line new ]]
-
-
-named-code
- = '[' (!('{' | whitespace | '[').)+ '{' (!("}]").)+ "}]"
- => [[ [ second >string ] [ fourth >string ] bi code boa ]]
-
-simple-code
- = "[{" (!("}]").)+ "}]"
- => [[ second >string f swap code boa ]]
-
-code = named-code | simple-code
+! _foo*bar_baz*bing works like <i>foo*bar</i>baz<b>bing</b>
+! I could support overlapping, but there's not a good use case for it.
+
+DEFER: (parse-paragraph)
+
+: parse-paragraph ( string -- seq )
+ (parse-paragraph) list>array ;
+
+: make-paragraph ( string -- paragraph )
+ parse-paragraph paragraph boa ;
+
+: cut-half-slice ( string i -- before after-slice )
+ [ head ] [ 1+ short tail-slice ] 2bi ;
+
+: find-cut ( string quot -- before after delimiter )
+ dupd find
+ [ [ cut-half-slice ] [ f ] if* ] dip ; inline
+
+: parse-delimiter ( string delimiter class -- paragraph )
+ [ '[ _ = ] find-cut drop ] dip
+ '[ parse-paragraph _ new swap >>child ]
+ [ (parse-paragraph) ] bi* cons ;
+
+: delimiter-class ( delimiter -- class )
+ H{
+ { CHAR: * strong }
+ { CHAR: _ emphasis }
+ { CHAR: ^ superscript }
+ { CHAR: ~ subscript }
+ { CHAR: % inline-code }
+ } at ;
+
+: parse-link ( string -- paragraph-list )
+ rest-slice "]]" split1-slice [
+ "|" split1
+ [ "" like dup simple-link-title ] unless*
+ [ "image:" ?head ] dip swap [ image boa ] [ parse-paragraph link boa ] if
+ ] dip [ (parse-paragraph) cons ] when* ;
+
+: ?first ( seq -- elt ) 0 swap ?nth ;
+
+: parse-big-link ( before after -- link rest )
+ dup ?first CHAR: [ =
+ [ parse-link ]
+ [ [ CHAR: [ suffix ] [ (parse-paragraph) ] bi* ]
+ if ;
+
+: escape ( before after -- before' after' )
+ [ nil ] [ unclip-slice swap [ suffix ] dip (parse-paragraph) ] if-empty ;
+
+: (parse-paragraph) ( string -- list )
+ [ nil ] [
+ [ "*_^~%[\\" member? ] find-cut [
+ {
+ { CHAR: [ [ parse-big-link ] }
+ { CHAR: \\ [ escape ] }
+ [ dup delimiter-class parse-delimiter ]
+ } case cons
+ ] [ drop "" like 1list ] if*
+ ] if-empty ;
+
+: <farkup-state> ( string -- state ) string-lines ;
+: look ( state i -- char ) swap first ?nth ;
+: done? ( state -- ? ) empty? ;
+: take-line ( state -- state' line ) unclip-slice ;
+
+: take-lines ( state char -- state' lines )
+ dupd '[ ?first _ = not ] find drop
+ [ cut-slice ] [ f ] if* swap ;
+
+:: (take-until) ( state delimiter accum -- string/f state' )
+ state empty? [ accum "\n" join f ] [
+ state unclip-slice :> first :> rest
+ first delimiter split1 :> after :> before
+ before accum push
+ after [
+ accum "\n" join
+ rest after prefix
+ ] [
+ rest delimiter accum (take-until)
+ ] if
+ ] if ;
+: take-until ( state delimiter -- string/f state' )
+ V{ } clone (take-until) ;
+
+: count= ( string -- n )
+ dup <reversed> [ [ CHAR: = = not ] find drop 0 or ] bi@ min ;
+
+: trim= ( string -- string' )
+ [ CHAR: = = ] trim ;
+
+: make-heading ( string class -- heading )
+ [ trim= parse-paragraph ] dip boa ; inline
+
+: parse-heading ( state -- state' heading )
+ take-line dup count= {
+ { 0 [ make-paragraph ] }
+ { 1 [ heading1 make-heading ] }
+ { 2 [ heading2 make-heading ] }
+ { 3 [ heading3 make-heading ] }
+ { 4 [ heading4 make-heading ] }
+ [ drop heading4 make-heading ]
+ } case ;
+
+: trim-row ( seq -- seq' )
+ rest
+ dup peek empty? [ but-last ] when ;
+
+: ?peek ( seq -- elt/f )
+ [ f ] [ peek ] if-empty ;
+
+: coalesce ( rows -- rows' )
+ V{ } clone [
+ '[
+ _ dup ?peek ?peek CHAR: \\ =
+ [ [ pop "|" rot 3append ] keep ] when
+ push
+ ] each
+ ] keep ;
+
+: parse-table ( state -- state' table )
+ CHAR: | take-lines [
+ "|" split
+ trim-row
+ coalesce
+ [ parse-paragraph ] map
+ table-row boa
+ ] map table boa ;
+
+: parse-line ( state -- state' item )
+ take-line dup "___" =
+ [ drop line new ] [ make-paragraph ] if ;
+
+: parse-list ( state char class -- state' list )
+ [
+ take-lines
+ [ rest parse-paragraph list-item boa ] map
+ ] dip boa ; inline
+
+: parse-ul ( state -- state' ul )
+ CHAR: - unordered-list parse-list ;
+
+: parse-ol ( state -- state' ul )
+ CHAR: # ordered-list parse-list ;
+
+: parse-code ( state -- state' item )
+ dup 1 look CHAR: [ =
+ [ unclip-slice make-paragraph ] [
+ "{" take-until [ rest ] dip
+ "}]" take-until
+ [ code boa ] dip swap
+ ] if ;
-stand-alone
- = (line | code | heading | list | table | paragraph | nl)*
-;EBNF
+: parse-item ( state -- state' item )
+ dup 0 look {
+ { CHAR: = [ parse-heading ] }
+ { CHAR: | [ parse-table ] }
+ { CHAR: _ [ parse-line ] }
+ { CHAR: - [ parse-ul ] }
+ { CHAR: # [ parse-ol ] }
+ { CHAR: [ [ parse-code ] }
+ { f [ rest-slice f ] }
+ [ drop take-line make-paragraph ]
+ } case ;
+
+: parse-farkup ( string -- farkup )
+ <farkup-state> [ dup done? not ] [ parse-item ] produce nip sift ;
CONSTANT: invalid-url "javascript:alert('Invalid URL in farkup');"
[ relative-link-prefix get prepend "" like url-encode ]
} cond ;
-: write-link ( href text -- xml )
- [ check-url link-no-follow? get "nofollow" and ] dip
- [XML <a href=<-> rel=<->><-></a> XML] ;
-
-: write-image-link ( href text -- xml )
- disable-images? get [
- 2drop
- [XML <strong>Images are not allowed</strong> XML]
- ] [
- [ check-url ] [ f like ] bi*
- [XML <img src=<-> alt=<->/> XML]
- ] if ;
-
: render-code ( string mode -- xml )
[ string-lines ] dip htmlize-lines
[XML <pre><-></pre> XML] ;
M: paragraph (write-farkup) "p" farkup-inside ;
M: table (write-farkup) "table" farkup-inside ;
+: write-link ( href text -- xml )
+ [ check-url link-no-follow? get "nofollow" and ] dip
+ [XML <a href=<-> rel=<->><-></a> XML] ;
+
+: write-image-link ( href text -- xml )
+ disable-images? get [
+ 2drop
+ [XML <strong>Images are not allowed</strong> XML]
+ ] [
+ [ check-url ] [ f like ] bi*
+ [XML <img src=<-> alt=<->/> XML]
+ ] if ;
+
+: open-link ( link -- href text )
+ [ href>> ] [ text>> (write-farkup) ] bi ;
+
M: link (write-farkup)
- [ href>> ] [ text>> ] bi write-link ;
+ open-link write-link ;
M: image (write-farkup)
- [ href>> ] [ text>> ] bi write-image-link ;
+ open-link write-image-link ;
M: code (write-farkup)
[ string>> ] [ mode>> ] bi render-code ;
M: string (write-farkup) ;
-M: vector (write-farkup) [ (write-farkup) ] map ;
-
-M: f (write-farkup) ;
+M: array (write-farkup) [ (write-farkup) ] map ;
: farkup>xml ( string -- xml )
parse-farkup (write-farkup) ;
: convert-farkup ( string -- string' )
[ write-farkup ] with-string-writer ;
+
USING: assocs classes help.markup help.syntax io.streams.string
http http.server.dispatchers http.server.responses
-furnace.redirection strings multiline ;
+furnace.redirection strings multiline html.forms ;
IN: furnace.actions
HELP: <action>
}
} ;
+{ validate-params validate-values } related-words
+
HELP: validation-failed
{ $description "Stops processing the current request and takes action depending on the type of the current request:"
{ $list
-! Copyright (C) 2008 Slava Pestov.\r
+! Copyright (C) 2008, 2009 Slava Pestov.\r
! See http://factorcode.org/license.txt for BSD license.\r
USING: accessors sequences kernel assocs combinators\r
validators http hashtables namespaces fry continuations locals\r
-io arrays math boxes splitting urls\r
+io arrays math boxes splitting urls call\r
xml.entities\r
http.server\r
http.server.responses\r
'[\r
_ dup display>> [\r
{\r
- [ init>> call ]\r
- [ authorize>> call ]\r
+ [ init>> call( -- ) ]\r
+ [ authorize>> call( -- ) ]\r
[ drop restore-validation-errors ]\r
- [ display>> call ]\r
+ [ display>> call( -- response ) ]\r
} cleave\r
] [ drop <400> ] if\r
] with-exit-continuation ;\r
: handle-post ( action -- response )\r
'[\r
_ dup submit>> [\r
- [ validate>> call ]\r
- [ authorize>> call ]\r
- [ submit>> call ]\r
+ [ validate>> call( -- ) ]\r
+ [ authorize>> call( -- ) ]\r
+ [ submit>> call( -- response ) ]\r
tri\r
] [ drop <400> ] if\r
] with-exit-continuation ;\r
\r
\ successful-login DEBUG add-input-logging\r
\r
-: logout ( -- )\r
+: logout ( -- response )\r
permit-id get [ delete-permit ] when*\r
URL" $realm" end-aside ;\r
\r
-! Copyright (c) 2008 Slava Pestov
+! Copyright (c) 2008, 2009 Slava Pestov
! See http://factorcode.org/license.txt for BSD license.
-USING: accessors kernel math.order namespaces combinators.short-circuit
+USING: accessors kernel math.order namespaces combinators.short-circuit call
html.forms
html.templates
html.templates.chloe
M:: boilerplate call-responder* ( path responder -- )
begin-form
path responder call-next-method
- responder init>> call
+ responder init>> call( -- )
dup wrap-boilerplate? [
clone [| body |
[
-! Copyright (C) 2008 Slava Pestov.
+! Copyright (C) 2008, 2009 Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
USING: accessors kernel http.server http.server.filters
-http.server.responses furnace.utilities ;
+http.server.responses furnace.utilities call ;
IN: furnace.referrer
TUPLE: referrer-check < filter-responder quot ;
C: <referrer-check> referrer-check
M: referrer-check call-responder*
- referrer over quot>> call
+ referrer over quot>> call( referrer -- ? )
[ call-next-method ]
[ 2drop 403 "Bad referrer" <trivial-response> ] if ;
exit-continuation get continue-with ;
: with-exit-continuation ( quot -- value )
- '[ exit-continuation set @ ] callcc1 exit-continuation off ;
+ '[ exit-continuation set @ ] callcc1 exit-continuation off ; inline
Slava Pestov
+Daniel Ehrenberg
{ $code "\"file.txt\" utf16 file-contents" }
"Encoding descriptors are also used by " { $link "io.streams.byte-array" } " and taken by combinators like " { $link with-file-writer } " and " { $link with-byte-reader } " which deal with streams. It is " { $emphasis "not" } " used with " { $link "io.streams.string" } " because these deal with abstract text."
$nl
-"When the " { $link binary } " encoding is used, a " { $link byte-array } " is expected for writing and returned for reading, since the stream deals with bytes. All other encodings deal with strings, since they are used to represent text."
-{ $see-also "stream-elements" } ;
+"When the " { $link binary } " encoding is used, a " { $link byte-array } " is expected for writing and returned for reading, since the stream deals with bytes. All other encodings deal with strings, since they are used to represent text." ;
ARTICLE: "io" "Input and output"
{ $heading "Streams" }
[ ] [ "-foo\n-bar" "farkup" set-value ] unit-test
-[ "<ul><li>foo</li>\n<li>bar</li></ul>" ] [
+[ "<ul><li>foo</li><li>bar</li></ul>" ] [
[ "farkup" T{ farkup } render ] with-string-writer
] unit-test
-! Copyright (C) 2008 Slava Pestov
+! Copyright (C) 2008, 2009 Slava Pestov
! See http://factorcode.org/license.txt for BSD license.
-USING: kernel accessors strings namespaces assocs hashtables io
+USING: kernel accessors strings namespaces assocs hashtables io call
mirrors math fry sequences words continuations
xml.entities xml.writer xml.syntax ;
IN: html.forms
>hashtable "validators" set-word-prop ;
: validate ( value quot -- result )
- [ <validation-error> ] recover ; inline
+ '[ _ call( value -- validated ) ] [ <validation-error> ] recover ;
: validate-value ( name value quot -- )
validate
-! Copyright (C) 2008 Slava Pestov.
+! Copyright (C) 2008, 2009 Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
USING: accessors kernel fry io io.encodings.utf8 io.files
debugger prettyprint continuations namespaces boxes sequences
-arrays strings html io.streams.string assocs
+arrays strings html io.streams.string assocs call
quotations xml.data xml.writer xml.syntax ;
IN: html.templates
M: string call-template* write ;
-M: callable call-template* call ;
+M: callable call-template* call( -- ) ;
M: xml call-template* write-xml ;
[ content-charset>> encode-output ]
[ write-response-body ]
bi
- ] unless ;
+ ] unless drop ;
M: raw-response write-response ( respose -- )
write-response-line
write-response-body
drop ;
-M: raw-response write-full-response ( response -- )
- write-response ;
+M: raw-response write-full-response ( request response -- )
+ nip write-response ;
: post-request? ( -- ? ) request get method>> "POST" = ;
swap development? get [ make-http-error >>body ] [ drop ] if ;
: do-response ( response -- )
- [ request get swap write-full-response ]
+ '[ request get _ write-full-response ]
[
[ \ do-response log-error ]
[
{ $side-effects "responder" } ;
ARTICLE: "http.server.static.extend" "Hooks for dynamic content"
-"The static responder can be extended for dynamic content by associating quotations with MIME types in the hashtable stored in the " { $slot "special" } " slot. The quotations have stack effect " { $snippet "( path -- )" } "."
+"The static responder can be extended for dynamic content by associating quotations with MIME types in the hashtable stored in the " { $slot "special" } " slot. The quotations have stack effect " { $snippet "( path -- response )" } "."
$nl
"A utility word uses the above feature to enable server-side " { $snippet ".fhtml" } " scripts, allowing a development style much like PHP:"
{ $subsection enable-fhtml }
-! Copyright (C) 2004, 2008 Slava Pestov.\r
+! Copyright (C) 2004, 2009 Slava Pestov.\r
! See http://factorcode.org/license.txt for BSD license.\r
USING: calendar kernel math math.order math.parser namespaces\r
parser sequences strings assocs hashtables debugger mime.types\r
io.files.info io.directories io.pathnames io.encodings.binary\r
fry xml.entities destructors urls html xml.syntax\r
html.templates.fhtml http http.server http.server.responses\r
-http.server.redirection xml.writer ;\r
+http.server.redirection xml.writer call ;\r
IN: http.server.static\r
\r
TUPLE: file-responder root hook special allow-listings ;\r
\r
: serve-static ( filename mime-type -- response )\r
over modified-since?\r
- [ file-responder get hook>> call ] [ 2drop <304> ] if ;\r
+ [ file-responder get hook>> call( filename mime-type -- response ) ]\r
+ [ 2drop <304> ]\r
+ if ;\r
\r
: serving-path ( filename -- filename )\r
[ file-responder get root>> trim-tail-separators "/" ] dip\r
: serve-file ( filename -- response )\r
dup mime-type\r
dup file-responder get special>> at\r
- [ call ] [ serve-static ] ?if ;\r
+ [ call( filename -- response ) ] [ serve-static ] ?if ;\r
\r
\ serve-file NOTICE add-input-logging\r
\r
USING: images.bitmap images.viewer io.encodings.binary
-io.files io.files.unique kernel tools.test images.loader ;
+io.files io.files.unique kernel tools.test images.loader
+literals sequences ;
IN: images.bitmap.tests
-: test-bitmap24 ( -- path )
- "vocab:images/test-images/thiswayup24.bmp" ;
+CONSTANT: test-bitmap24 "vocab:images/test-images/thiswayup24.bmp"
-: test-bitmap8 ( -- path )
- "vocab:images/test-images/rgb8bit.bmp" ;
+CONSTANT: test-bitmap8 "vocab:images/test-images/rgb8bit.bmp"
-: test-bitmap4 ( -- path )
- "vocab:images/test-images/rgb4bit.bmp" ;
+CONSTANT: test-bitmap4 "vocab:images/test-images/rgb4bit.bmp"
-: test-bitmap1 ( -- path )
- "vocab:images/test-images/1bit.bmp" ;
+CONSTANT: test-bitmap1 "vocab:images/test-images/1bit.bmp"
[ t ]
[
"test-bitmap24" unique-file
[ save-bitmap ] [ binary file-contents ] bi =
] unit-test
+
+{
+ $ test-bitmap8
+ $ test-bitmap24
+ "vocab:ui/render/test/reference.bmp"
+} [ [ ] swap [ load-image drop ] curry unit-test ] each
\ No newline at end of file
USING: accessors alien alien.c-types arrays byte-arrays columns
combinators fry grouping io io.binary io.encodings.binary io.files
kernel macros math math.bitwise math.functions namespaces sequences
-strings images endian summary ;
+strings images endian summary locals ;
IN: images.bitmap
-TUPLE: bitmap-image < image
-magic size reserved offset header-length width
+: assert-sequence= ( a b -- )
+ 2dup sequence= [ 2drop ] [ assert ] if ;
+
+: read2 ( -- n ) 2 read le> ;
+: read4 ( -- n ) 4 read le> ;
+: write2 ( n -- ) 2 >le write ;
+: write4 ( n -- ) 4 >le write ;
+
+TUPLE: bitmap-image < image ;
+
+! Used to construct the final bitmap-image
+
+TUPLE: loading-bitmap
+size reserved offset header-length width
height planes bit-count compression size-image
x-pels y-pels color-used color-important rgb-quads color-index ;
-! Currently can only handle 24/32bit bitmaps.
-! Handles row-reversed bitmaps (their height is negative)
-
ERROR: bitmap-magic magic ;
M: bitmap-magic summary
<PRIVATE
-: array-copy ( bitmap array -- bitmap array' )
- over size-image>> abs memory>byte-array ;
-
: 8bit>buffer ( bitmap -- array )
[ rgb-quads>> 4 <sliced-groups> [ 3 head-slice ] map ]
[ color-index>> >array ] bi [ swap nth ] with map concat ;
ERROR: bmp-not-supported n ;
-: raw-bitmap>buffer ( bitmap -- array )
+: reverse-lines ( byte-array width -- byte-array )
+ 3 * <sliced-groups> <reversed> concat ; inline
+
+: raw-bitmap>seq ( loading-bitmap -- array )
dup bit-count>>
{
{ 32 [ color-index>> ] }
- { 24 [ color-index>> ] }
- { 16 [ bmp-not-supported ] }
- { 8 [ 8bit>buffer ] }
- { 4 [ bmp-not-supported ] }
- { 2 [ bmp-not-supported ] }
- { 1 [ bmp-not-supported ] }
+ { 24 [ [ color-index>> ] [ width>> ] bi reverse-lines ] }
+ { 8 [ [ 8bit>buffer ] [ width>> ] bi reverse-lines ] }
+ [ bmp-not-supported ]
} case >byte-array ;
-: read2 ( -- n ) 2 read le> ;
-: read4 ( -- n ) 4 read le> ;
-
-: parse-file-header ( bitmap -- bitmap )
- 2 read dup "BM" sequence= [ bitmap-magic ] unless >>magic
+: parse-file-header ( loading-bitmap -- loading-bitmap )
+ 2 read "BM" assert-sequence=
read4 >>size
read4 >>reserved
read4 >>offset ;
-: parse-bitmap-header ( bitmap -- bitmap )
+: parse-bitmap-header ( loading-bitmap -- loading-bitmap )
read4 >>header-length
read4 >>width
- read4 >>height
+ read4 32 >signed >>height
read2 >>planes
read2 >>bit-count
read4 >>compression
read4 >>color-used
read4 >>color-important ;
-: rgb-quads-length ( bitmap -- n )
+: rgb-quads-length ( loading-bitmap -- n )
[ offset>> 14 - ] [ header-length>> ] bi - ;
-: color-index-length ( bitmap -- n )
+: color-index-length ( loading-bitmap -- n )
{
[ width>> ]
[ planes>> * ]
[ height>> abs * ]
} cleave ;
-: parse-bitmap ( bitmap -- bitmap )
+: image-size ( loading-bitmap -- n )
+ [ [ width>> ] [ height>> ] bi * ] [ bit-count>> 8 /i ] bi * abs ;
+
+:: fixup-color-index ( loading-bitmap -- loading-bitmap )
+ loading-bitmap width>> :> width
+ width 3 * :> width*3
+ loading-bitmap height>> abs :> height
+ loading-bitmap color-index>> length :> color-index-length
+ color-index-length height /i :> stride
+ color-index-length width*3 height * - height /i :> padding
+ padding 0 > [
+ loading-bitmap [
+ stride <sliced-groups>
+ [ width*3 head-slice ] map concat
+ ] change-color-index
+ ] [
+ loading-bitmap
+ ] if ;
+
+: parse-bitmap ( loading-bitmap -- loading-bitmap )
dup rgb-quads-length read >>rgb-quads
- dup color-index-length read >>color-index ;
+ dup color-index-length read >>color-index
+ fixup-color-index ;
-: load-bitmap-data ( path bitmap -- bitmap )
+: load-bitmap-data ( path loading-bitmap -- loading-bitmap )
[ binary ] dip '[
_ parse-file-header parse-bitmap-header parse-bitmap
] with-file-reader ;
-: process-bitmap-data ( bitmap -- bitmap )
- dup raw-bitmap>buffer >>bitmap ;
-
ERROR: unknown-component-order bitmap ;
-: bitmap>component-order ( bitmap -- object )
+: bitmap>component-order ( loading-bitmap -- object )
bit-count>> {
{ 32 [ BGRA ] }
{ 24 [ BGR ] }
[ unknown-component-order ]
} case ;
-: fill-image-slots ( bitmap -- bitmap )
- dup {
- [ [ width>> ] [ height>> ] bi 2array >>dim ]
+: loading-bitmap>bitmap-image ( loading-bitmap -- bitmap-image )
+ [ bitmap-image new ] dip
+ {
+ [ raw-bitmap>seq >>bitmap ]
+ [ [ width>> ] [ height>> abs ] bi 2array >>dim ]
+ [ height>> 0 < [ t >>upside-down? ] when ]
[ bitmap>component-order >>component-order ]
- [ bitmap>> >>bitmap ]
} cleave ;
-M: bitmap-image load-image* ( path bitmap -- bitmap )
- load-bitmap-data process-bitmap-data
- fill-image-slots ;
-
-MACRO: (nbits>bitmap) ( bits -- )
- [ -3 shift ] keep '[
- bitmap-image new
- 2over * _ * >>size-image
- swap >>height
- swap >>width
- swap array-copy [ >>bitmap ] [ >>color-index ] bi
- _ >>bit-count fill-image-slots
- t >>upside-down?
- ] ;
-
-: bgr>bitmap ( array height width -- bitmap )
- 24 (nbits>bitmap) ;
-
-: bgra>bitmap ( array height width -- bitmap )
- 32 (nbits>bitmap) ;
-
-: write2 ( n -- ) 2 >le write ;
-: write4 ( n -- ) 4 >le write ;
+M: bitmap-image load-image* ( path loading-bitmap -- bitmap )
+ drop loading-bitmap new
+ load-bitmap-data
+ loading-bitmap>bitmap-image ;
PRIVATE>
-: save-bitmap ( bitmap path -- )
+: bitmap>color-index ( bitmap-array -- byte-array )
+ 4 <sliced-groups> [ 3 head-slice <reversed> ] map B{ } join ; inline
+
+: save-bitmap ( image path -- )
binary [
B{ CHAR: B CHAR: M } write
[
- color-index>> length 14 + 40 + write4
+ bitmap>> bitmap>color-index length 14 + 40 + write4
0 write4
54 write4
40 write4
] [
{
- [ width>> write4 ]
- [ height>> write4 ]
- [ planes>> 1 or write2 ]
- [ bit-count>> 24 or write2 ]
- [ compression>> 0 or write4 ]
- [ size-image>> write4 ]
- [ x-pels>> 0 or write4 ]
- [ y-pels>> 0 or write4 ]
- [ color-used>> 0 or write4 ]
- [ color-important>> 0 or write4 ]
- [ rgb-quads>> write ]
- [ color-index>> write ]
+ ! width height
+ [ dim>> first2 [ write4 ] bi@ ]
+
+ ! planes
+ [ drop 1 write2 ]
+
+ ! bit-count
+ [ drop 24 write2 ]
+
+ ! compression
+ [ drop 0 write4 ]
+
+ ! size-image
+ [ bitmap>> bitmap>color-index length write4 ]
+
+ ! x-pels
+ [ drop 0 write4 ]
+
+ ! y-pels
+ [ drop 0 write4 ]
+
+ ! color-used
+ [ drop 0 write4 ]
+
+ ! color-important
+ [ drop 0 write4 ]
+
+ ! rgb-quads
+ [
+ [ bitmap>> bitmap>color-index ] [ dim>> first ] bi
+ reverse-lines write
+ ]
} cleave
] bi
] with-file-writer ;
M: R16G16B16 normalize-component-order*
drop RGB16>8 add-dummy-alpha ;
-: BGR>RGB ( bitmap bytes-per-pixel -- pixels )
- <groups> [ 3 cut [ reverse ] dip append ] map B{ } join ; inline
+: BGR>RGB ( bitmap -- pixels )
+ 3 <sliced-groups> [ <reversed> ] map B{ } join ; inline
+
+: BGRA>RGBA ( bitmap -- pixels )
+ 4 <sliced-groups>
+ [ unclip-last-slice [ <reversed> ] dip suffix ] map concat ; inline
M: BGRA normalize-component-order*
- drop 4 BGR>RGB ;
+ drop BGRA>RGBA ;
M: RGB normalize-component-order*
drop add-dummy-alpha ;
M: BGR normalize-component-order*
- drop 3 BGR>RGB add-dummy-alpha ;
+ drop BGR>RGB add-dummy-alpha ;
: ARGB>RGBA ( bitmap -- bitmap' )
- 4 <groups> [ unclip suffix ] map B{ } join ;
+ 4 <groups> [ unclip suffix ] map B{ } join ; inline
M: ARGB normalize-component-order*
drop ARGB>RGBA ;
M: ABGR normalize-component-order*
- drop ARGB>RGBA 4 BGR>RGB ;
+ drop ARGB>RGBA BGRA>RGBA ;
: normalize-scan-line-order ( image -- image )
dup upside-down?>> [
! See http://factorcode.org/license.txt for BSD license.
USING: accessors constructors images io io.binary io.encodings.ascii
io.encodings.binary io.encodings.string io.files io.files.info kernel
-sequences io.streams.limited ;
+sequences io.streams.limited fry combinators arrays math
+checksums checksums.crc32 ;
IN: images.png
-TUPLE: png-image < image chunks ;
+TUPLE: png-image < image chunks
+width height bit-depth color-type compression-method
+filter-method interlace-method uncompressed ;
CONSTRUCTOR: png-image ( -- image )
V{ } clone >>chunks ;
-TUPLE: png-chunk length type data crc ;
+TUPLE: png-chunk length type data ;
CONSTRUCTOR: png-chunk ( -- png-chunk ) ;
bad-png-header
] unless drop ;
+ERROR: bad-checksum ;
+
: read-png-chunks ( image -- image )
<png-chunk>
- 4 read be> >>length
- 4 read ascii decode >>type
- dup length>> read >>data
- 4 read >>crc
+ 4 read be> [ >>length ] [ 4 + ] bi
+ read dup crc32 checksum-bytes
+ 4 read = [ bad-checksum ] unless
+ 4 cut-slice
+ [ ascii decode >>type ]
+ [ B{ } like >>data ] bi*
[ over chunks>> push ]
[ type>> ] bi "IEND" =
[ read-png-chunks ] unless ;
+: find-chunk ( image string -- chunk )
+ [ chunks>> ] dip '[ type>> _ = ] find nip ;
+
+: parse-ihdr-chunk ( image -- image )
+ dup "IHDR" find-chunk data>> {
+ [ [ 0 4 ] dip subseq be> >>width ]
+ [ [ 4 8 ] dip subseq be> >>height ]
+ [ [ 8 ] dip nth >>bit-depth ]
+ [ [ 9 ] dip nth >>color-type ]
+ [ [ 10 ] dip nth >>compression-method ]
+ [ [ 11 ] dip nth >>filter-method ]
+ [ [ 12 ] dip nth >>interlace-method ]
+ } cleave ;
+
+: find-compressed-bytes ( image -- bytes )
+ chunks>> [ type>> "IDAT" = ] filter
+ [ data>> ] map concat ;
+
+: fill-image-data ( image -- image )
+ dup [ width>> ] [ height>> ] bi 2array >>dim ;
+
: load-png ( path -- image )
- [ binary <file-reader> ] [ file-info size>> ] bi stream-throws <limited-stream> [
+ [ binary <file-reader> ] [ file-info size>> ] bi
+ stream-throws <limited-stream> [
<png-image>
read-png-header
read-png-chunks
+ parse-ihdr-chunk
+ fill-image-data
] with-input-stream ;
[ unknown-component-order ]
} case ;
+: normalize-alpha-data ( seq -- byte-array )
+ ! [ normalize-alpha-data ] change-bitmap
+ B{ } like dup
+ byte-array>float-array
+ 4 <sliced-groups>
+ [
+ dup fourth dup 0 = [
+ 2drop
+ ] [
+ [ 3 head-slice ] dip '[ _ / ] change-each
+ ] if
+ ] each ;
+
: handle-alpha-data ( ifd -- ifd )
dup extra-samples find-tag {
- { extra-samples-associated-alpha-data [
- [
- B{ } like dup
- byte-array>float-array
- 4 <sliced-groups>
- [
- dup fourth dup 0 = [
- 2drop
- ] [
- [ 3 head-slice ] dip '[ _ / ] change-each
- ] if
- ] each
- ] change-bitmap
- ] }
- { extra-samples-unspecified-alpha-data [
- ] }
- { extra-samples-unassociated-alpha-data [
- ] }
+ { extra-samples-associated-alpha-data [ ] }
+ { extra-samples-unspecified-alpha-data [ ] }
+ { extra-samples-unassociated-alpha-data [ ] }
[ bad-extra-samples ]
} case ;
continuations debugger classes.tuple namespaces make vectors
bit-arrays byte-arrays strings sbufs math.functions macros
sequences.private combinators mirrors splitting
-combinators.short-circuit fry words.symbol generalizations ;
+combinators.short-circuit fry words.symbol generalizations call ;
RENAME: _ fry => __
IN: inverse
M: pop-inverse inverse
[ "pop-length" word-prop cut-slice swap >quotation ]
- [ "pop-inverse" word-prop ] bi compose call ;
+ [ "pop-inverse" word-prop ] bi compose call( -- quot ) ;
: (undo) ( revquot -- )
[ unclip-slice inverse % (undo) ] unless-empty ;
TUPLE: input-port < buffered-port ;
+M: input-port stream-element-type drop +byte+ ;
+
: <input-port> ( handle -- input-port )
input-port <buffered-port> ;
[ nip ] [ buffer>> buffer-capacity <= ] 2bi
[ drop ] [ stream-flush ] if ; inline
+M: output-port stream-element-type stream>> stream-element-type ;
+
M: output-port stream-write1
dup check-disposed
1 over wait-to-write
-! Copyright (C) 2003, 2008 Slava Pestov.
+! Copyright (C) 2003, 2009 Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
USING: continuations destructors kernel math math.parser
namespaces parser sequences strings prettyprint
io.sockets.secure io.files io.streams.duplex io.timeouts
io.encodings threads make concurrency.combinators
concurrency.semaphores concurrency.flags
-combinators.short-circuit ;
+combinators.short-circuit call ;
IN: io.servers.connection
TUPLE: threaded-server
[ [ remote-address set ] [ local-address set ] bi* ]
2bi ;
-M: threaded-server handle-client* handler>> call ;
+M: threaded-server handle-client* handler>> call( -- ) ;
: handle-client ( client remote local -- )
'[
io.streams.sequence destructors math combinators ;
IN: io.streams.byte-array
+M: byte-vector stream-element-type drop +byte+ ;
+
: <byte-writer> ( encoding -- stream )
512 <byte-vector> swap <encoder> ;
TUPLE: byte-reader { underlying byte-array read-only } { i array-capacity } ;
+M: byte-reader stream-element-type drop +byte+ ;
+
M: byte-reader stream-read-partial stream-read ;
M: byte-reader stream-read sequence-read ;
M: byte-reader stream-read1 sequence-read1 ;
: >duplex-stream< ( stream -- in out ) [ in>> ] [ out>> ] bi ; inline
+M: duplex-stream stream-element-type
+ [ in>> ] [ out>> ] bi
+ [ stream-element-type ] bi@
+ 2dup eq? [ drop ] [ "Cannot determine element type" throw ] if ;
+
M: duplex-stream set-timeout
>duplex-stream< [ set-timeout ] bi-curry@ bi ;
: <memory-stream> ( alien -- stream )
0 memory-stream boa ;
+M: memory-stream stream-element-type drop +byte+ ;
+
M: memory-stream stream-read1
[ [ alien>> ] [ index>> ] bi alien-unsigned-1 ]
[ [ 1+ ] change-index drop ] bi ;
io.streams.plain io.encodings math.order growable io.streams.sequence ;
IN: io.streams.string
-<PRIVATE
-
-SINGLETON: null-encoding
-
-M: null-encoding decode-char drop stream-read1 ;
-
-PRIVATE>
-
-M: growable dispose drop ;
-
-M: growable stream-write1 push ;
-M: growable stream-write push-all ;
-M: growable stream-flush drop ;
-
-: <string-writer> ( -- stream )
- 512 <sbuf> ;
-
-: with-string-writer ( quot -- str )
- <string-writer> swap [ output-stream get ] compose with-output-stream*
- >string ; inline
-
-! New implementation
-
+! Readers
TUPLE: string-reader { underlying string read-only } { i array-capacity } ;
+M: string-reader stream-element-type drop +character+ ;
M: string-reader stream-read-partial stream-read ;
M: string-reader stream-read sequence-read ;
M: string-reader stream-read1 sequence-read1 ;
M: string-reader stream-read-until sequence-read-until ;
M: string-reader dispose drop ;
+<PRIVATE
+SINGLETON: null-encoding
+M: null-encoding decode-char drop stream-read1 ;
+PRIVATE>
+
: <string-reader> ( str -- stream )
0 string-reader boa null-encoding <decoder> ;
: with-string-reader ( str quot -- )
[ <string-reader> ] dip with-input-stream ; inline
-INSTANCE: growable plain-writer
+! Writers
+M: sbuf stream-element-type drop +character+ ;
+
+: <string-writer> ( -- stream )
+ 512 <sbuf> ;
+
+: with-string-writer ( quot -- str )
+ <string-writer> swap [ output-stream get ] compose with-output-stream*
+ >string ; inline
\ No newline at end of file
CONSULT: formatted-output-stream-protocol filter-writer stream>> ;
+M: filter-writer stream-element-type stream>> stream-element-type ;
+
M: filter-writer dispose stream>> dispose ;
TUPLE: ignore-close-stream < filter-writer ;
{ $subsection cdr }
{ $subsection nil? } ;
-ARTICLE: { "lists" "strict" } "Strict lists"
+ARTICLE: { "lists" "strict" } "Constructing strict lists"
"Strict lists are simply cons cells where the car and cdr have already been evaluated. These are the lists of Lisp. To construct a strict list, the following words are provided:"
{ $subsection cons }
{ $subsection swons }
[ >alist sort-values <reversed> ] dip [\r
[ swapd with-cell pprint-cell ] with-row\r
] curry assoc-each\r
- ] tabular-output ;\r
+ ] tabular-output ; inline\r
\r
: log-entry. ( entry -- )\r
"====== " write\r
PRIVATE>\r
\r
: (define-logging) ( word level quot -- )\r
- [ dup ] 2dip 2curry annotate ;\r
+ [ dup ] 2dip 2curry annotate ; inline\r
\r
: call-logging-quot ( quot word level -- quot' )\r
[ "called" ] 2dip [ log-message ] 3curry prepose ;\r
[ fixnum-bitnot ] [ \ bitnot modular-variant ] unit-test
[ fixnum+fast ] [ \ fixnum+ modular-variant ] unit-test
[ fixnum+fast ] [ \ fixnum+fast modular-variant ] unit-test
+
+[ 3 ] [ 1 2 +-integer-integer ] unit-test
+[ 3 ] [ 1 >bignum 2 +-integer-integer ] unit-test
+[ 3 ] [ 1 2 >bignum +-integer-integer ] unit-test
+[ 3 ] [ 1 >bignum 2 >bignum +-integer-integer ] unit-test
\ No newline at end of file
{ bitnot fixnum-bitnot }
} at swap or ;
+: bignum-fixnum-op-quot ( big-word -- quot )
+ '[ fixnum>bignum _ execute ] ;
+
+: fixnum-bignum-op-quot ( big-word -- quot )
+ '[ [ fixnum>bignum ] dip _ execute ] ;
+
: integer-fixnum-op-quot ( fix-word big-word -- quot )
[
[ over fixnum? ] %
- [ '[ _ execute ] , ]
- [ '[ fixnum>bignum _ execute ] , ] bi*
- \ if ,
+ [ '[ _ execute ] , ] [ bignum-fixnum-op-quot , ] bi* \ if ,
] [ ] make ;
: fixnum-integer-op-quot ( fix-word big-word -- quot )
[
[ dup fixnum? ] %
- [ '[ _ execute ] , ]
- [ '[ [ fixnum>bignum ] dip _ execute ] , ] bi*
- \ if ,
+ [ '[ _ execute ] , ] [ fixnum-bignum-op-quot , ] bi* \ if ,
+ ] [ ] make ;
+
+: integer-bignum-op-quot ( big-word -- quot )
+ [
+ [ over fixnum? ] %
+ [ fixnum-bignum-op-quot , ] [ '[ _ execute ] , ] bi \ if ,
] [ ] make ;
: integer-integer-op-quot ( fix-word big-word -- quot )
[
- [ dup fixnum? ] %
- 2dup integer-fixnum-op-quot ,
+ [ 2dup both-fixnums? ] %
+ [ '[ _ execute ] , ]
[
- [ over fixnum? [ [ fixnum>bignum ] dip ] when ] %
- nip ,
- ] [ ] make ,
- \ if ,
+ [
+ [ dup fixnum? ] %
+ [ bignum-fixnum-op-quot , ]
+ [ integer-bignum-op-quot , ] bi \ if ,
+ ] [ ] make ,
+ ] bi* \ if ,
] [ ] make ;
: integer-op-word ( triple -- word )
continuations peg peg.parsers unicode.categories multiline\r
splitting accessors effects sequences.deep peg.search\r
combinators.short-circuit lexer io.streams.string stack-checker\r
-io combinators parser ;\r
+io combinators parser call ;\r
IN: peg.ebnf\r
\r
: rule ( name word -- parser )\r
\r
: TOKENIZER: \r
scan search [ "Tokenizer not found" throw ] unless*\r
- execute \ tokenizer set-global ; parsing\r
+ execute( -- tokenizer ) \ tokenizer set-global ; parsing\r
\r
TUPLE: ebnf-non-terminal symbol ;\r
TUPLE: ebnf-terminal symbol ;\r
options>> [ (transform) ] map choice ;\r
\r
M: ebnf-any-character (transform) ( ast -- parser )\r
- drop tokenizer any>> call ;\r
+ drop tokenizer any>> call( -- parser ) ;\r
\r
M: ebnf-range (transform) ( ast -- parser )\r
pattern>> range-pattern ;\r
\r
M: ebnf-action (transform) ( ast -- parser )\r
[ parser>> (transform) ] [ code>> insert-escapes ] [ parser>> ] tri build-locals \r
- string-lines parse-lines check-action-effect action ;\r
+ [ string-lines parse-lines ] call( string -- quot ) check-action-effect action ;\r
\r
M: ebnf-semantic (transform) ( ast -- parser )\r
[ parser>> (transform) ] [ code>> insert-escapes ] [ parser>> ] tri build-locals \r
- string-lines parse-lines semantic ;\r
+ [ string-lines parse-lines ] call( string -- quot ) semantic ;\r
\r
M: ebnf-var (transform) ( ast -- parser )\r
parser>> (transform) ;\r
\r
M: ebnf-terminal (transform) ( ast -- parser )\r
- symbol>> tokenizer one>> call ;\r
+ symbol>> tokenizer one>> call( symbol -- parser ) ;\r
\r
M: ebnf-foreign (transform) ( ast -- parser )\r
dup word>> search\r
swap rule>> [ main ] unless* over rule [\r
nip\r
] [\r
- execute\r
+ execute( -- parser )\r
] if* ;\r
\r
: parser-not-found ( name -- * )\r
\r
: EBNF: \r
reset-tokenizer CREATE-WORD dup ";EBNF" parse-multiline-string \r
- ebnf>quot swapd 1 1 <effect> define-declared "ebnf-parser" set-word-prop \r
+ ebnf>quot swapd (( input -- ast )) define-declared "ebnf-parser" set-word-prop \r
reset-tokenizer ; parsing\r
\r
\r
peg peg.private peg.parsers accessors words math accessors ;
IN: peg.tests
+\ parse must-infer
+
[ ] [ reset-pegs ] unit-test
[
io vectors arrays math.parser math.order vectors combinators
classes sets unicode.categories compiler.units parser words
quotations effects memoize accessors locals effects splitting
-combinators.short-circuit generalizations ;
+combinators.short-circuit generalizations call ;
IN: peg
TUPLE: parse-result remaining ast ;
#! Work through all delayed parsers and recompile their
#! words to have the correct bodies.
delayed get [
- call compile-parser 1quotation (( -- result )) define-declared
+ call( -- parser ) compile-parser 1quotation (( -- result )) define-declared
] assoc-each ;
: compile ( parser -- word )
] with-compilation-unit ;
: compiled-parse ( state word -- result )
- swap [ execute [ error-stack get first throw ] unless* ] with-packrat ; inline
+ swap [ execute( -- result ) [ error-stack get first throw ] unless* ] with-packrat ;
: (parse) ( input parser -- result )
dup word? [ compile ] unless compiled-parse ;
#! to produce the parser to be compiled.
#! This differs from 'delay' which calls
#! it at run time.
- quot>> call compile-parser 1quotation ;
+ quot>> call( -- parser ) compile-parser 1quotation ;
PRIVATE>
"abc 123 def 456" 'integer' [ 2 * number>string ] action replace
] unit-test
+\ search must-infer
+\ replace must-infer
TUPLE: options on off ;
C: <options> options
-SINGLETONS: unix-lines dotall multiline comments case-insensitive
-unicode-case reversed-regexp ;
+SINGLETONS: unix-lines dotall multiline case-insensitive reversed-regexp ;
: <maybe> ( term -- term' )
f <concatenation> 2array <alternation> ;
control-character-class hex-digit-class java-blank-class c-identifier-class
unmatchable-class terminator-class word-boundary-class ;
-SINGLETONS: beginning-of-input ^ end-of-input $ end-of-file word-break ;
+SINGLETONS: beginning-of-input ^ end-of-input $ end-of-file ^unix $unix word-break ;
TUPLE: range from to ;
C: <range> range
ABOUT: "regexp.combinators"
+ARTICLE: "regexp.combinators.intro" "Regular expression combinator rationale"
+"Regular expression combinators are useful when part of the regular expression contains user input. For example, given a sequence of strings on the stack, a regular expression which matches any one of them can be constructed:"
+{ $code
+ "[ <literal> ] map <or>"
+}
+"Without combinators, a naive approach would look as follows:"
+{ $code
+ "\"|\" join <regexp>"
+}
+"However, this code is incorrect, because one of the strings in the sequence might contain characters which have special meaning inside a regular expression. Combinators avoid this problem by building a regular expression syntax tree directly, without any parsing." ;
+
ARTICLE: "regexp.combinators" "Regular expression combinators"
-"The " { $vocab-link "regexp.combinators" } " vocabulary defines combinators which can be used to build up regular expressions to match strings. This is in addition to the traditional syntax defined in the " { $vocab-link "regexp" } " vocabulary."
+"The " { $vocab-link "regexp.combinators" } " vocabulary defines combinators which can be used to build up regular expressions to match strings. This complements the traditional syntax defined in the " { $vocab-link "regexp" } " vocabulary."
+{ $subsection "regexp.combinators.intro" }
+"Basic combinators:"
{ $subsection <literal> }
{ $subsection <nothing> }
+"Higher-order combinators for building new regular expressions from existing ones:"
{ $subsection <or> }
{ $subsection <and> }
{ $subsection <not> }
{ $subsection <sequence> }
{ $subsection <zero-or-more> }
+"Derived combinators implemented in terms of the above:"
{ $subsection <one-or-more> }
+"Setting options:"
{ $subsection <option> } ;
HELP: <literal>
USING: regexp.classes kernel sequences regexp.negation
quotations assocs fry math locals combinators
accessors words compiler.units kernel.private strings
-sequences.private arrays call namespaces unicode.breaks
+sequences.private arrays namespaces unicode.breaks
regexp.transition-tables combinators.short-circuit ;
IN: regexp.compiler
M: t question>quot drop [ 2drop t ] ;
M: f question>quot drop [ 2drop f ] ;
-M: not-class question>quot
- class>> question>quot [ not ] compose ;
-
M: beginning-of-input question>quot
drop [ drop zero? ] ;
M: ^ question>quot
drop [ { [ drop zero? ] [ [ 1- ] dip ?nth "\r\n" member? ] } 2|| ] ;
+M: $unix question>quot
+ drop [ { [ length = ] [ ?nth CHAR: \n = ] } 2|| ] ;
+
+M: ^unix question>quot
+ drop [ { [ drop zero? ] [ [ 1- ] dip ?nth CHAR: \n = ] } 2|| ] ;
+
M: word-break question>quot
drop [ word-break-at? ] ;
transitions>quot ;
: states>code ( words dfa -- )
- [ ! with-compilation-unit doesn't compile, so we need call( -- )
- [
- '[
- dup _ word>quot
- (( last-match index string -- ? ))
- define-declared
- ] each
- ] with-compilation-unit
- ] call( words dfa -- ) ;
+ '[
+ dup _ word>quot
+ (( last-match index string -- ? ))
+ define-declared
+ ] each ;
: states>words ( dfa -- words dfa )
dup transitions>> keys [ gensym ] H{ } map>assoc
PRIVATE>
-: simple-define-temp ( quot effect -- word )
- [ [ define-temp ] with-compilation-unit ] call( quot effect -- word ) ;
-
: dfa>word ( dfa -- quot )
dfa>main-word execution-quot '[ drop [ f ] 2dip @ ]
- (( start-index string regexp -- i/f )) simple-define-temp ;
+ (( start-index string regexp -- i/f )) define-temp ;
: dfa>shortest-word ( dfa -- word )
t shortest? [ dfa>word ] with-variable ;
! Copyright (C) 2009 Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.
USING: kernel accessors regexp.classes math.bits assocs sequences
-arrays sets regexp.dfa math fry regexp.minimize regexp.ast regexp.transition-tables ;
+arrays sets regexp.dfa math fry regexp.minimize regexp.ast
+locals regexp.transition-tables ;
IN: regexp.disambiguate
TUPLE: parts in out ;
: make-partition ( choices classes -- partition )
zip [ first ] partition [ values ] bi@ parts boa ;
-: powerset-partition ( classes -- partitions )
+: powerset-partition ( sequence -- partitions )
[ length [ 2^ ] keep ] keep '[
_ <bits> _ make-partition
] map rest ;
[ in>> <and-class> ] bi
prefix <and-class> ;
-: get-transitions ( partition state-transitions -- next-states )
- [ in>> ] dip '[ _ at ] gather sift ;
+: singleton-partition ( integer non-integers -- {class,partition} )
+ dupd
+ '[ _ [ class-member? ] with filter ] keep
+ prefix f parts boa
+ 2array ;
+
+: add-out ( seq partition -- partition' )
+ [ out>> append ] [ in>> ] bi swap parts boa ;
+
+: intersection ( seq -- elts )
+ [ f ] [ unclip [ intersect ] reduce ] if-empty ;
+
+: meaningful-integers ( partition table -- integers )
+ [ [ in>> ] [ out>> ] bi ] dip
+ '[ [ _ at ] map intersection ] bi@ diff ;
+
+: class-integers ( classes integers -- table )
+ '[ _ over '[ _ class-member? ] filter ] H{ } map>assoc ;
+
+: add-integers ( partitions classes integers -- partitions )
+ class-integers '[
+ [ _ meaningful-integers ] keep add-out
+ ] map ;
+
+: class-partitions ( classes -- assoc )
+ [ integer? ] partition [
+ dup powerset-partition spin add-integers
+ [ [ partition>class ] keep 2array ] map
+ [ first ] filter
+ ] [ '[ _ singleton-partition ] map ] 2bi append ;
: new-transitions ( transitions -- assoc ) ! assoc is class, partition
values [ keys ] gather
[ tagged-epsilon? not ] filter
- powerset-partition
- [ [ partition>class ] keep ] { } map>assoc
- [ drop ] assoc-filter ;
+ class-partitions ;
+
+: get-transitions ( partition state-transitions -- next-states )
+ [ in>> ] dip '[ _ at ] gather sift ;
: preserving-epsilon ( state-transitions quot -- new-state-transitions )
[ [ drop tagged-epsilon? ] assoc-filter ] bi
assoc-union H{ } assoc-like ; inline
+
: disambiguate ( nfa -- nfa )
expand-ors [
dup new-transitions '[
! Copyright (C) 2008, 2009 Doug Coleman, Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.
-USING: accessors arrays assocs grouping kernel
-locals math namespaces sequences fry quotations
-math.order math.ranges vectors unicode.categories
-regexp.transition-tables words sets hashtables combinators.short-circuit
-unicode.case.private regexp.ast regexp.classes ;
+USING: accessors arrays assocs grouping kernel locals math namespaces
+sequences fry quotations math.order math.ranges vectors
+unicode.categories regexp.transition-tables words sets hashtables
+combinators.short-circuit unicode.case unicode.case.private regexp.ast
+regexp.classes ;
IN: regexp.nfa
! This uses unicode.case.private for ch>upper and ch>lower
M: object modify-epsilon ;
+: line-option ( multiline unix-lines default -- option )
+ multiline option? [
+ drop [ unix-lines option? ] 2dip swap ?
+ ] [ 2nip ] if ;
+
M: $ modify-epsilon
- multiline option? [ drop end-of-input ] unless ;
+ $unix end-of-input line-option ;
M: ^ modify-epsilon
- multiline option? [ drop beginning-of-input ] unless ;
+ ^unix beginning-of-input line-option ;
M: tagged-epsilon nfa-node
clone [ modify-epsilon ] change-tag add-simple-entry ;
! See http://factorcode.org/license.txt for BSD license.
USING: peg.ebnf kernel math.parser sequences assocs arrays fry math
combinators regexp.classes strings splitting peg locals accessors
-regexp.ast ;
+regexp.ast unicode.case ;
IN: regexp.parser
: allowed-char? ( ch -- ? )
ERROR: bad-class name ;
: name>class ( name -- class )
- {
- { "Lower" letter-class }
- { "Upper" LETTER-class }
- { "Alpha" Letter-class }
- { "ASCII" ascii-class }
- { "Digit" digit-class }
- { "Alnum" alpha-class }
- { "Punct" punctuation-class }
- { "Graph" java-printable-class }
- { "Print" java-printable-class }
- { "Blank" non-newline-blank-class }
- { "Cntrl" control-character-class }
- { "XDigit" hex-digit-class }
- { "Space" java-blank-class }
+ >string >case-fold {
+ { "lower" letter-class }
+ { "upper" LETTER-class }
+ { "alpha" Letter-class }
+ { "ascii" ascii-class }
+ { "digit" digit-class }
+ { "alnum" alpha-class }
+ { "punct" punctuation-class }
+ { "graph" java-printable-class }
+ { "blank" non-newline-blank-class }
+ { "cntrl" control-character-class }
+ { "xdigit" hex-digit-class }
+ { "space" java-blank-class }
! TODO: unicode-character-class
} [ bad-class ] at-error ;
{ CHAR: i case-insensitive }
{ CHAR: d unix-lines }
{ CHAR: m multiline }
- { CHAR: n multiline }
{ CHAR: r reversed-regexp }
{ CHAR: s dotall }
- { CHAR: u unicode-case }
- { CHAR: x comments }
} ;
: ch>option ( ch -- singleton )
QuotedCharacter = !("\\E") .
-Escape = "p{" CharacterInBracket*:s "}" => [[ s >string name>class <primitive-class> ]]
- | "P{" CharacterInBracket*:s "}" => [[ s >string name>class <primitive-class> <negation> ]]
+Escape = "p{" CharacterInBracket*:s "}" => [[ s name>class <primitive-class> ]]
+ | "P{" CharacterInBracket*:s "}" => [[ s name>class <primitive-class> <negation> ]]
| "Q" QuotedCharacter*:s "\\E" => [[ s <concatenation> ]]
| "u" Character:a Character:b Character:c Character:d
=> [[ { a b c d } hex> ensure-number ]]
! Copyright (C) 2008, 2009 Doug Coleman, Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.
-USING: kernel strings help.markup help.syntax math ;
+USING: kernel strings help.markup help.syntax math regexp.parser
+regexp.ast multiline ;
IN: regexp
ABOUT: "regexp"
ARTICLE: "regexp" "Regular expressions"
"The " { $vocab-link "regexp" } " vocabulary provides word for creating and using regular expressions."
+{ $subsection { "regexp" "intro" } }
+"The class of regular expressions:"
+{ $subsection regexp }
+"Basic usage:"
{ $subsection { "regexp" "syntax" } }
+{ $subsection { "regexp" "options" } }
{ $subsection { "regexp" "construction" } }
-{ $vocab-subsection "regexp.combinators" "Regular expression combinators" }
{ $subsection { "regexp" "operations" } }
-{ $subsection regexp }
-{ $subsection { "regexp" "theory" } } ;
+"Advanced topics:"
+{ $vocab-subsection "Regular expression combinators" "regexp.combinators" }
+{ $subsection { "regexp" "theory" } }
+{ $subsection { "regexp" "deploy" } } ;
+
+ARTICLE: { "regexp" "intro" } "A quick introduction to regular expressions"
+"Regular expressions are a terse way to do certain simple string processing tasks. For example, to replace all instances of " { $snippet "foo" } " in one string with " { $snippet "bar" } ", the following can be used:"
+{ $code "R/ foo/ \"bar\" re-replace" }
+"That could be done with sequence operations, but consider doing this replacement for an arbitrary number of o's, at least two:"
+{ $code "R/ foo+/ \"bar\" re-replace" }
+"The " { $snippet "+" } " operator matches one or more occurrences of the previous expression; in this case " { $snippet "o" } ". Another useful feature is alternation. Say we want to do this replacement with fooooo or boooo. Then we could use the code"
+{ $code "R/ (f|b)oo+/ \"bar\" re-replace" }
+"To search a file for all lines that match a given regular expression, you could use code like this:"
+{ $code <" "file.txt" ascii file-lines [ R/ (f|b)oo+/ re-contains? ] filter "> }
+"To test if a string in its entirety matches a regular expression, the following can be used:"
+{ $example <" USING: regexp prettyprint ; "fooo" R/ (b|f)oo+/ matches? . "> "t" }
+"Regular expressions can't be used for all parsing tasks. For example, they are not powerful enough to match balancing parentheses." ;
ARTICLE: { "regexp" "construction" } "Constructing regular expressions"
-"Words which are useful for creating regular expressions:"
+"Most of the time, regular expressions are literals and the parsing word should be used, to construct them at parse time. This ensures that they are only compiled once, and gives parse time syntax checking."
{ $subsection POSTPONE: R/ }
+"Sometimes, regular expressions need to be constructed at run time instead; for example, in a text editor, the user might input a regular expression to search for in a document."
{ $subsection <regexp> }
{ $subsection <optioned-regexp> }
-{ $heading "See also" }
-{ $vocab-link "regexp.combinators" } ;
+"Another approach is to use " { $vocab-link "regexp.combinators" } "." ;
ARTICLE: { "regexp" "syntax" } "Regular expression syntax"
-"Regexp syntax is largely compatible with Perl, Java and extended POSIX regexps, but not completely." $nl
-"A new addition is the inclusion of a negation operator, with the syntax " { $snippet "(?~foo)" } " to match everything that does not match " { $snippet "foo" } "." $nl
+"Regexp syntax is largely compatible with Perl, Java and extended POSIX regexps, but not completely. Below, the syntax is documented."
+{ $heading "Characters" }
+"At its core, regular expressions consist of character literals. For example, " { $snippet "R/ f/" } " is a regular expression matching just the string 'f'. In addition, the normal escape codes are provided, like " { $snippet "\\t" } " for the tab character and " { $snippet "\\uxxxxxx" } "for an arbitrary Unicode code point, by its hex value. In addition, any character can be preceded by a backslash to escape it, unless this has special meaning. For example, to match a literal opening parenthesis, use " { $snippet "\\(" } "."
+{ $heading "Concatenation, alternation and grouping" }
+"Regular expressions can be built out of multiple characters by concatenation. For example, " { $snippet "R/ ab/" } " matches a followed by b. The " { $snippet "|" } " (alternation) operator can construct a regexp which matches one of two alternatives. Parentheses can be used for gropuing. So " { $snippet "R/ f(oo|ar)/" } " would match either 'foo' or 'far'."
+{ $heading "Character classes" }
+"Square brackets define a convenient way to refer to a set of characters. For example, " { $snippet "[ab]" } " refers to either a or b. And " { $snippet "[a-z]" } " refers to all of the characters between a and z, in code point order. You can use these together, as in " { $snippet "[ac-fz]" } " which matches all of the characters between c and f, in addition to a and z. Character classes can be negated using a carat, as in " { $snippet "[^a]" } " which matches all characters which are not a."
+{ $heading "Predefined character classes" }
+"Several character classes are predefined, both for convenience and because they are too large to represent directly. In Factor regular expressions, all character classes are Unicode-aware."
+{ $table
+ { { $snippet "\\d" } "Digits" }
+ { { $snippet "\\D" } "Not digits" }
+ { { $snippet "\\s" } "Whitespace" }
+ { { $snippet "\\S" } "Not whitespace" }
+ { { $snippet "\\w" } "Word character (alphanumeric or underscore)" }
+ { { $snippet "\\W" } "Not word character" }
+ { { $snippet "\\p{property}" } "Character which fulfils the property" }
+ { { $snippet "\\P{property}" } "Character which does not fulfil the property" } }
+"Properties for " { $snippet "\\p" } " and " { $snippet "\\P" } " (case-insensitive):"
+{ $table
+ { { $snippet "\\p{lower}" } "Lower case letters" }
+ { { $snippet "\\p{upper}" } "Upper case letters" }
+ { { $snippet "\\p{alpha}" } "Letters" }
+ { { $snippet "\\p{ascii}" } "Characters in the ASCII range" }
+ { { $snippet "\\p{alnum}" } "Letters or numbers" }
+ { { $snippet "\\p{punct}" } "Punctuation" }
+ { { $snippet "\\p{blank}" } "Non-newline whitespace" }
+ { { $snippet "\\p{cntrl}" } "Control character" }
+ { { $snippet "\\p{space}" } "Whitespace" }
+ { { $snippet "\\p{xdigit}" } "Hexidecimal digit" } } ! In the future: Unicode
+"Full unicode properties are not yet supported."
+{ $heading "Boundaries" }
+"Special operators exist to match certain points in the string. These are called 'zero-width' because they do not consume any characters."
+{ $table
+ { { $snippet "^" } "Beginning of a line" }
+ { { $snippet "$" } "End of a line" }
+ { { $snippet "\\A" } "Beginning of text" }
+ { { $snippet "\\z" } "End of text" }
+ { { $snippet "\\Z" } "Almost end of text: only thing after is newline" }
+ { { $snippet "\\b" } "Word boundary (by Unicode word boundaries)" }
+ { { $snippet "\\b" } "Not word boundary (by Unicode word boundaries)" } }
+{ $heading "Greedy quantifiers" }
+"It is possible to have a regular expression which matches a variable number of occurrences of another regular expression."
+{ $table
+ { { $snippet "a*" } "Zero or more occurrences of a" }
+ { { $snippet "a+" } "One or more occurrences of a" }
+ { { $snippet "a?" } "Zero or one occurrences of a" }
+ { { $snippet "a{n}" } "n occurrences of a" }
+ { { $snippet "a{n,}" } "At least n occurrences of a" }
+ { { $snippet "a{,m}" } "At most m occurrences of a" }
+ { { $snippet "a{n,m}" } "Between n and m occurrences of a" } }
+"All of these quantifiers are " { $emphasis "greedy" } ", meaning that they take as many repetitions as possible within the larger regular expression. Reluctant and posessive quantifiers are not yet supported."
+{ $heading "Lookaround" }
+"Operators are provided to look ahead and behind the current point in the regular expression. These can be used in any context, but they're the most useful at the beginning or end of a regular expression."
+{ $table
+ { { $snippet "(?=a)" } "Asserts that the current position is immediately followed by a" }
+ { { $snippet "(?!a)" } "Asserts that the current position is not immediately followed by a" }
+ { { $snippet "(?<=a)" } "Asserts that the current position is immediately preceded by a" }
+ { { $snippet "(?<!a)" } "Asserts that the current position is not immediately preceded by a" } }
+{ $heading "Quotation" }
+"To make it convenient to have a long string which uses regexp operators, a special syntax is provided. If a substring begins with " { $snippet "\\Q" } " then everything until " { $snippet "\\E" } " is quoted (escaped). For example, " { $snippet "R/ \\Qfoo\\bar|baz()\\E/" } " matches exactly the string " { $snippet "\"foo\\bar|baz()\"" } "."
+{ $heading "Unsupported features" }
"One missing feature is backreferences. This is because of a design decision to allow only regular expressions following the formal theory of regular languages. For more information, see " { $link { "regexp" "theory" } } ". You can create a new regular expression to match a particular string using " { $vocab-link "regexp.combinators" } " and group capture is available to extract parts of a regular expression match." $nl
-"A distinction from Perl is that " { $snippet "\\G" } ", which references the previous match, is not included. This is because that sequence is inherently stateful, and Factor regexps don't hold state." $nl
-"Additionally, none of the operations which embed code into a regexp are supported, as this would require the inclusion of the Factor parser and compiler in any application which wants to expose regexps to the user. None of the casing operations are included, for simplicity." ; ! Also describe syntax, from the beginning
+"Another feature is Perl's " { $snippet "\\G" } " syntax, which references the previous match, is not included. This is because that sequence is inherently stateful, and Factor regexps don't hold state." $nl
+"None of the operations which embed code into a regexp are supported, as this would require the inclusion of the Factor parser and compiler in any application which wants to expose regexps to the user. None of the casing operations are included of Perl like \\L, for simplicity." ; ! Also describe syntax, from the beginning
+
+ARTICLE: { "regexp" "options" } "Regular expression options"
+"When " { $link { "regexp" "construction" } } ", various options can be provided. Options have single-character names. A string of options has one of the following two forms:"
+{ $code "on" "on-off" }
+"The latter syntax allows some options to be disabled. The " { $snippet "on" } " and " { $snippet "off" } " strings name options to be enabled and disabled, respectively."
+$nl
+"The following options are supported:"
+{ $table
+ { "i" { $link case-insensitive } }
+ { "d" { $link unix-lines } }
+ { "m" { $link multiline } }
+ { "s" { $link dotall } }
+ { "r" { $link reversed-regexp } }
+} ;
+
+HELP: case-insensitive
+{ $syntax "R/ .../i" }
+{ $description "On regexps, the " { $snippet "i" } " option makes the match case-insenstive. Currently, this is handled incorrectly with respect to Unicode, as characters like ß do not expand into SS in upper case. This should be fixed in a future version." } ;
+
+HELP: unix-lines
+{ $syntax "R/ .../d" }
+{ $description "With this mode, only newlines (" { $snippet "\\n" } ") are recognized for line breaking. This affects " { $snippet "$" } " and " { $snippet "^" } " when in multiline mode." } ;
+
+HELP: multiline
+{ $syntax "R/ .../m" }
+{ $description "This mode makes the zero-width constraints " { $snippet "$" } " and " { $snippet "^" } " match the beginning or end of a line. Otherwise, they only match the beginning or end of the input text. This can be used together with " { $link dotall } "." } ;
+
+HELP: dotall
+{ $syntax "R/ .../s" }
+{ $description "This mode, traditionally called single line mode, makes " { $snippet "." } " match everything, including line breaks. By default, it does not match line breaking characters. This can be used together with " { $link multiline } "." } ;
+
+HELP: reversed-regexp
+{ $syntax "R/ .../r" }
+{ $description "When running a regexp compiled with this mode, matches will start from the end of the input string, going towards the beginning." } ;
ARTICLE: { "regexp" "theory" } "The theory of regular expressions"
"Far from being just a practical tool invented by Unix hackers, regular expressions were studied formally before computer programs were written to process them." $nl
"The Factor regular expression engine was built with the design decision to support negation and intersection at the expense of backreferences. This lets us have a guaranteed linear-time matching algorithm. Systems like Ragel and Lex also use this algorithm, but in the Factor regular expression engine, all other features of regexps are still present." ;
ARTICLE: { "regexp" "operations" } "Matching operations with regular expressions"
+"Testing if a string matches a regular expression:"
{ $subsection matches? }
+"Finding a match inside a string:"
{ $subsection re-contains? }
{ $subsection first-match }
+"Finding all matches inside a string:"
+{ $subsection count-matches }
{ $subsection all-matching-slices }
{ $subsection all-matching-subseqs }
+"Splitting a string into tokens delimited by a regular expression:"
{ $subsection re-split }
-{ $subsection re-replace }
-{ $subsection count-matches } ;
+"Replacing occurrences of a regular expression with a string:"
+{ $subsection re-replace } ;
+
+ARTICLE: { "regexp" "deploy" } "Regular expressions and the deploy tool"
+"The " { $link "tools.deploy" } " tool has the option to strip out the optimizing compiler from the resulting image. Since regular expressions compile to Factor code, this creates a minor performance-related caveat."
+$nl
+"Regular expressions constructed at runtime from a deployed application will be compiled with the non-optimizing compiler, which is always available because it is built into the Factor VM. This will result in lower performance than when using the optimizing compiler."
+$nl
+"Literal regular expressions constructed at parse time do not suffer from this restriction, since the deployed application is loaded and compiled before anything is stripped out."
+$nl
+"None of this applies to deployed applications which include the optimizing compiler, or code running inside a development image."
+{ $see-also "compiler" { "regexp" "construction" } "deploy-flags" } ;
HELP: <regexp>
{ $values { "string" string } { "regexp" regexp } }
{ $description "Creates a regular expression object, given a string in regular expression syntax. When it is first used for matching, a DFA is compiled, and this DFA is stored for reuse so it is only compiled once." } ;
HELP: <optioned-regexp>
-{ $values { "string" string } { "options" string } { "regexp" regexp } }
+{ $values { "string" string } { "options" "a string of " { $link { "regexp" "options" } } } { "regexp" regexp } }
{ $description "Given a string in regular expression syntax, and a string of options, creates a regular expression object. When it is first used for matching, a DFA is compiled, and this DFA is stored for reuse so it is only compiled once." } ;
HELP: R/
-{ $syntax "R/ foo.*|[a-zA-Z]bar/i" }
-{ $description "Literal syntax for a regular expression. When this syntax is used, the DFA is compiled at compile-time, rather than on first use." } ;
+{ $syntax "R/ foo.*|[a-zA-Z]bar/options" }
+{ $description "Literal syntax for a regular expression. When this syntax is used, the DFA is compiled at compile-time, rather than on first use. The syntax for the " { $snippet "options" } " string is documented in " { $link { "regexp" "options" } } "." } ;
HELP: regexp
{ $class-description "The class of regular expressions. To construct these, see " { $link { "regexp" "construction" } } "." } ;
[ t ] [ "abcdefg" "a(?:bcdefg)" <regexp> matches? ] unit-test
[ 3 ] [ "caba" "(?<=b)a" <regexp> first-match from>> ] unit-test
+
+[ t ] [ "\ra" R/ .^a/ms matches? ] unit-test
+[ f ] [ "\ra" R/ .^a/mds matches? ] unit-test
+[ t ] [ "\na" R/ .^a/ms matches? ] unit-test
+[ t ] [ "\na" R/ .^a/mds matches? ] unit-test
+
+[ t ] [ "a\r" R/ a$./ms matches? ] unit-test
+[ f ] [ "a\r" R/ a$./mds matches? ] unit-test
+[ t ] [ "a\n" R/ a$./ms matches? ] unit-test
+[ t ] [ "a\n" R/ a$./mds matches? ] unit-test
sequences.private strings sets assocs prettyprint.backend
prettyprint.custom make lexer namespaces parser arrays fry locals
regexp.parser splitting sorting regexp.ast regexp.negation
-regexp.compiler words call call.private math.ranges ;
+regexp.compiler compiler.units words call call.private math.ranges ;
IN: regexp
TUPLE: regexp
: match-index-from ( i string regexp -- index/f )
! This word is unsafe. It assumes that i is a fixnum
! and that string is a string.
- dup dfa>> execute-unsafe( index string regexp -- i/f ) ;
+ dup dfa>> execute-unsafe( index string regexp -- i/f ) ; inline
GENERIC: end/start ( string regexp -- end start )
M: regexp end/start drop length 0 ;
GENERIC: compile-regexp ( regex -- regexp )
: regexp-initial-word ( i string regexp -- i/f )
- compile-regexp match-index-from ;
+ [ compile-regexp ] with-compilation-unit match-index-from ;
-: do-compile-regexp ( regexp -- regexp )
+M: regexp compile-regexp ( regexp -- regexp )
dup '[
dup \ regexp-initial-word =
[ drop _ get-ast ast>dfa dfa>word ] when
] change-dfa ;
-M: regexp compile-regexp ( regexp -- regexp )
- do-compile-regexp ;
-
M: reverse-regexp compile-regexp ( regexp -- regexp )
- t backwards? [ do-compile-regexp ] with-variable ;
+ t backwards? [ call-next-method ] with-variable ;
DEFER: compile-next-match
: next-initial-word ( i string regexp -- i start end string )
- compile-next-match do-next-match ;
+ [ compile-next-match ] with-compilation-unit do-next-match ;
: compile-next-match ( regexp -- regexp )
dup '[
dup \ next-initial-word = [
drop _ [ compile-regexp dfa>> def>> ] [ reverse-regexp? ] bi
'[ { array-capacity string regexp } declare _ _ next-match ]
- (( i string regexp -- i start end string )) simple-define-temp
+ (( i string regexp -- i start end string )) define-temp
] when
] change-next-match ;
}
} ;
+ARTICLE: "smtp-gmail" "Setting up SMTP with gmail"
+"If you plan to send all email from the same address, then setting variables in the global namespace is the best option. The code example below does this approach and is meant to go in your " { $link "factor-boot-rc" } "." $nl
+"Several variables need to be set for sending outgoing mail through gmail. First, we set the login and password to a " { $link <plain-auth> } " tuple with our login. Next, we set the gmail server address with an " { $link <inet> } " object. Finally, we tell the SMTP library to use a secure connection."
+{ $code
+ "USING: smtp namespaces io.sockets ;"
+ ""
+ "\"my.gmail.address@gmail.com\" \"secret-password\" <plain-auth> smtp-auth set-global"
+ ""
+ "\"smtp.gmail.com\" 587 <inet> smtp-server set-global"
+ ""
+ "t smtp-tls? set-global"
+} ;
+
+
ARTICLE: "smtp" "SMTP client library"
"The " { $vocab-link "smtp" } " vocabulary sends e-mail via an SMTP server."
$nl
{ $subsection email }
{ $subsection <email> }
"Sending an email:"
-{ $subsection send-email } ;
+{ $subsection send-email }
+"More topics:"
+{ $subsection "smtp-gmail" } ;
ABOUT: "smtp"
USING: help.markup help.syntax words alien.c-types assocs
-kernel ;
+kernel call call.private tools.deploy.config ;
IN: tools.deploy
ARTICLE: "prepare-deploy" "Preparing to deploy an application"
{ $subsection "deploy-config" }
{ $subsection "deploy-flags" } ;
-ARTICLE: "tools.deploy" "Application deployment"
-"The stand-alone application deployment tool compiles a vocabulary down to a native executable which runs the vocabulary's " { $link POSTPONE: MAIN: } " hook. Deployed executables do not depend on Factor being installed, and do not expose any source code, and thus are suitable for delivering commercial end-user applications."
-$nl
-"For example, we can deploy the " { $vocab-link "hello-world" } " demo which comes with Factor:"
+ARTICLE: "tools.deploy.usage" "Deploy tool usage"
+"Once the necessary deployment flags have been set, the application can be deployed:"
+{ $subsection deploy }
+"For example, you can deploy the " { $vocab-link "hello-ui" } " demo which comes with Factor. Note that this demo already has a deployment configuration, so nothing needs to be configured:"
{ $code "\"hello-ui\" deploy" }
{ $list
{ "On Mac OS X, this yields a program named " { $snippet "Hello world.app" } "." }
{ "On Windows, it yields a directory named " { $snippet "Hello world" } " containing a program named " { $snippet "hello-ui.exe" } "." }
{ "On Unix-like systems (Linux, BSD, Solaris, etc), it yields a directory named " { $snippet "Hello world" } " containing a program named " { $snippet "hello-ui" } "." }
}
-"In all cases, running the program displays a window with a message."
-$nl
+"On all platforms, running the program will display a window with a message." ;
+
+ARTICLE: "tools.deploy.impl" "Deploy tool implementation"
"The deployment tool works by bootstrapping a fresh image, loading the vocabulary into this image, then applying various heuristics to strip the image down to minimal size."
$nl
+"The deploy tool generates " { $emphasis "staging images" } " containing major subsystems, and uses the staging images to derive the final application image. The first time an application is deployed using a major subsystem, such as the UI, a new staging image is made, which can take a few minutes. Subsequent deployments of applications using this subsystem will be much faster." ;
+
+ARTICLE: "tools.deploy.caveats" "Deploy tool caveats"
+{ $heading "Behavior of " { $link boa } }
+"In deployed applications, the " { $link boa } " word does not verify that the parameters on the stack satisfy the tuple's slot declarations, if any. This reduces deploy image size but can make bugs harder to track down. Make sure your program is fully debugged before deployment."
+{ $heading "Behavior of " { $link POSTPONE: execute( } }
+"Similarly, the " { $link POSTPONE: execute( } " word does not check word stack effects in deployed applications, since stack effects are stripped out, and so it behaves exactly like " { $link POSTPONE: execute-unsafe( } "."
+{ $heading "Error reporting" }
+"If the " { $link deploy-reflection } " level in the configuration is low enough, the debugger is stripped out, and error messages can be rather cryptic. Increase the reflection level to get readable error messages."
+{ $heading "Choosing the right deploy flags" }
+"Finding the correct deploy flags is a trial and error process; you must find a tradeoff between deployed image size and correctness. If your program uses dynamic language features, you may need to elect to strip out fewer subsystems in order to have full functionality." ;
+
+ARTICLE: "tools.deploy" "Application deployment"
+"The stand-alone application deployment tool, implemented in the " { $vocab-link "tools.deploy" } " vocablary, compiles a vocabulary down to a native executable which runs the vocabulary's " { $link POSTPONE: MAIN: } " hook. Deployed executables do not depend on Factor being installed, and do not expose any source code, and thus are suitable for delivering commercial end-user applications."
+$nl
+"Most of the time, the words in the " { $vocab-link "tools.deploy" } " vocabulary should not be used directly; instead, use " { $link "ui.tools.deploy" } "."
+$nl
"You must explicitly specify major subsystems which are required, as well as the level of reflection support needed. This is done by modifying the deployment configuration prior to deployment."
{ $subsection "prepare-deploy" }
-"Once the necessary deployment flags have been set, the application can be deployed:"
-{ $subsection deploy }
-{ $see-also "ui.tools.deploy" } ;
+{ $subsection "tools.deploy.usage" }
+{ $subsection "tools.deploy.impl" }
+{ $subsection "tools.deploy.caveats" } ;
ABOUT: "tools.deploy"
\r
[ ] [ "http://localhost/quit" add-port http-get 2drop ] unit-test\r
\r
-[ ] [\r
- "tools.deploy.test.6" shake-and-bake\r
- run-temp-image\r
-] unit-test\r
-\r
-[ ] [\r
- "tools.deploy.test.7" shake-and-bake\r
- run-temp-image\r
-] unit-test\r
-\r
-[ ] [\r
- "tools.deploy.test.8" shake-and-bake\r
- run-temp-image\r
-] unit-test\r
-\r
-[ ] [\r
- "tools.deploy.test.9" shake-and-bake\r
- run-temp-image\r
-] unit-test\r
-\r
-[ ] [\r
- "tools.deploy.test.10" shake-and-bake\r
- run-temp-image\r
-] unit-test\r
-\r
-[ ] [\r
- "tools.deploy.test.11" shake-and-bake\r
- run-temp-image\r
-] unit-test
\ No newline at end of file
+{\r
+ "tools.deploy.test.6"\r
+ "tools.deploy.test.7"\r
+ "tools.deploy.test.8"\r
+ "tools.deploy.test.9"\r
+ "tools.deploy.test.10"\r
+ "tools.deploy.test.11"\r
+ "tools.deploy.test.12"\r
+} [\r
+ [ ] swap [\r
+ shake-and-bake\r
+ run-temp-image\r
+ ] curry unit-test\r
+] each
\ No newline at end of file
run-file
] when ;
+: strip-call ( -- )
+ "call" vocab [
+ "Stripping stack effect checking from call( and execute(" show
+ "vocab:tools/deploy/shaker/strip-call.factor"
+ run-file
+ ] when ;
+
: strip-cocoa ( -- )
"cocoa" vocab [
"Stripping unused Cocoa methods" show
command-line:main-vocab-hook
compiled-crossref
compiled-generic-crossref
- recompile-hook
- update-tuples-hook
- remake-generics-hook
+ compiler-impl
definition-observers
definitions:crossref
interactive-vocabs
init-stripper
strip-default-methods
strip-libc
+ strip-call
strip-cocoa
strip-debugger
compute-next-methods
--- /dev/null
+! Copyright (C) 2009 Slava Pestov
+! See http://factorcode.org/license.txt for BSD license.
+IN: tools.deploy.shaker.call
+
+IN: call
+USE: call.private
+
+: execute-effect ( word effect -- ) execute-effect-unsafe ; inline
\ No newline at end of file
--- /dev/null
+! Copyright (C) 2009 Slava Pestov.
+! See http://factorcode.org/license.txt for BSD license.
+USING: call math.parser io math ;
+IN: tools.deploy.test.12
+
+: execute-test ( a b w -- c ) execute( a b -- c ) ;
+
+: foo ( -- ) 1 2 \ + execute-test number>string print ;
+
+MAIN: foo
\ No newline at end of file
--- /dev/null
+Slava Pestov
\ No newline at end of file
--- /dev/null
+USING: tools.deploy.config ;
+H{
+ { deploy-c-types? f }
+ { deploy-reflection 1 }
+ { "stop-after-last-window?" t }
+ { deploy-word-props? f }
+ { deploy-math? f }
+ { deploy-unicode? f }
+ { deploy-io 2 }
+ { deploy-ui? f }
+ { deploy-name "tools.deploy.test.12" }
+ { deploy-compiler? f }
+ { deploy-word-defs? f }
+ { deploy-threads? f }
+}
--- /dev/null
+! Copyright (C) 2009 Slava Pestov.
+! See http://factorcode.org/license.txt for BSD license.
+USING: regexp kernel io ;
+IN: tools.deploy.test.13
+
+: regexp-test ( a -- b ) <regexp> "xyz" swap matches? ;
+
+: main ( -- ) "x.z" regexp-test "X" "Y" ? print ;
+
+MAIN: main
\ No newline at end of file
--- /dev/null
+Slava Pestov
\ No newline at end of file
--- /dev/null
+USING: tools.deploy.config ;
+H{
+ { deploy-threads? t }
+ { deploy-compiler? t }
+ { deploy-math? t }
+ { deploy-io 2 }
+ { "stop-after-last-window?" t }
+ { deploy-c-types? f }
+ { deploy-name "tools.deploy.test.13" }
+ { deploy-word-props? f }
+ { deploy-unicode? f }
+ { deploy-word-defs? f }
+ { deploy-reflection 4 }
+ { deploy-ui? f }
+}
vocabs.loader io combinators calendar accessors math.parser
io.streams.string ui.tools.operations quotations strings arrays
prettyprint words vocabs sorting sets classes math alien urls
-splitting ascii combinators.short-circuit ;
+splitting ascii combinators.short-circuit alarms words.symbol ;
IN: tools.scaffold
SYMBOL: developer-name
{ "ch" "a character" }
{ "word" word }
{ "array" array }
+ { "alarm" alarm }
{ "duration" duration }
{ "path" "a pathname string" }
{ "vocab" "a vocabulary specifier" }
: ($values.) ( array -- )
[
- " { " write
+ "{ " write
dup array? [ first ] when
dup lookup-type [
[ unparse write bl ]
] if
] when* ;
+: symbol-description. ( word -- )
+ drop
+ "{ $var-description \"\" } ;" print ;
+
: $description. ( word -- )
drop
"{ $description \"\" } ;" print ;
+: docs-body. ( word/symbol -- )
+ dup symbol? [
+ symbol-description.
+ ] [
+ [ $values. ] [ $description. ] bi
+ ] if ;
+
: docs-header. ( word -- )
"HELP: " write name>> print ;
: (help.) ( word -- )
- [ docs-header. ] [ $values. ] [ $description. ] tri ;
+ [ docs-header. ] [ docs-body. ] bi ;
: interesting-words ( vocab -- array )
words
ui.backend.cocoa.views core-foundation core-foundation.run-loop
core-graphics.types threads math.rectangles fry libc
generalizations alien.c-types cocoa.views
-combinators io.thread locals ;
+combinators io.thread locals call ;
IN: ui.backend.cocoa
TUPLE: handle ;
"UI" assert.app [
[
init-clipboard
- cocoa-init-hook get call
+ cocoa-init-hook get call( -- )
start-ui
f io-thread-running? set-global
init-thread-timer
--- /dev/null
+Slava Pestov
\ No newline at end of file
--- /dev/null
+! Copyright (C) 2009 Slava Pestov.
+! See http://factorcode.org/license.txt for BSD license.
+USING: accessors kernel sequences namespaces ui.gadgets.frames
+ui.pens.image ui.gadgets.icons ui.gadgets.grids ;
+IN: ui.gadgets.corners
+
+CONSTANT: @center { 1 1 }
+CONSTANT: @left { 0 1 }
+CONSTANT: @right { 2 1 }
+CONSTANT: @top { 1 0 }
+CONSTANT: @bottom { 1 2 }
+
+CONSTANT: @top-left { 0 0 }
+CONSTANT: @top-right { 2 0 }
+CONSTANT: @bottom-left { 0 2 }
+CONSTANT: @bottom-right { 2 2 }
+
+SYMBOL: name
+
+: corner-image ( name -- image )
+ [ name get "-" ] dip 3append theme-image ;
+
+: corner-icon ( name -- icon )
+ corner-image <icon> ;
+
+: /-----\ ( corner -- corner )
+ "top-left" corner-icon @top-left grid-add
+ "top-middle" corner-icon @top grid-add
+ "top-right" corner-icon @top-right grid-add ;
+
+: |-----| ( gadget corner -- corner )
+ "left-edge" corner-icon @left grid-add
+ swap @center grid-add
+ "right-edge" corner-icon @right grid-add ;
+
+: \-----/ ( corner -- corner )
+ "bottom-left" corner-icon @bottom-left grid-add
+ "bottom-middle" corner-icon @bottom grid-add
+ "bottom-right" corner-icon @bottom-right grid-add ;
+
+: make-corners ( class name quot -- corners )
+ [ [ [ 3 3 ] dip new-frame { 1 1 } >>filled-cell ] dip name ] dip
+ with-variable ; inline
\ No newline at end of file
--- /dev/null
+IN: ui.gadgets.labeled.tests
+USING: ui.gadgets ui.gadgets.labeled accessors tools.test ;
+
+[ t ] [ <gadget> "Hey" <labeled-gadget> content>> gadget? ] unit-test
\ No newline at end of file
! See http://factorcode.org/license.txt for BSD license.
USING: accessors kernel sequences colors fonts ui.gadgets
ui.gadgets.frames ui.gadgets.grids ui.gadgets.icons ui.gadgets.labels
-ui.gadgets.borders ui.pens.image ;
+ui.gadgets.borders ui.pens.image ui.gadgets.corners ui.render ;
IN: ui.gadgets.labeled
TUPLE: labeled-gadget < frame content ;
<PRIVATE
-CONSTANT: @center { 1 1 }
-CONSTANT: @left { 0 1 }
-CONSTANT: @right { 2 1 }
-CONSTANT: @top { 1 0 }
-CONSTANT: @bottom { 1 2 }
-
-CONSTANT: @top-left { 0 0 }
-CONSTANT: @top-right { 2 0 }
-CONSTANT: @bottom-left { 0 2 }
-CONSTANT: @bottom-right { 2 2 }
-
-: labeled-image ( name -- image )
- "labeled-block-" prepend theme-image ;
-
-: labeled-icon ( name -- icon )
- labeled-image <icon> ;
-
-CONSTANT: labeled-title-background
- T{ rgba f
- 0.7843137254901961
- 0.7686274509803922
- 0.7176470588235294
- 1.0
- }
-
: <labeled-title> ( gadget -- label )
>label
- [ labeled-title-background font-with-background ] change-font
+ [ panel-background-color font-with-background ] change-font
{ 0 2 } <border>
- "title-middle" labeled-image
+ "title-middle" corner-image
<image-pen> t >>fill? >>interior ;
: /-FOO-\ ( title labeled -- labeled )
- "title-left" labeled-icon @top-left grid-add
+ "title-left" corner-icon @top-left grid-add
swap <labeled-title> @top grid-add
- "title-right" labeled-icon @top-right grid-add ;
-
-: |-----| ( gadget labeled -- labeled )
- "left-edge" labeled-icon @left grid-add
- swap [ >>content ] [ @center grid-add ] bi
- "right-edge" labeled-icon @right grid-add ;
-
-: \-----/ ( labeled -- labeled )
- "bottom-left" labeled-icon @bottom-left grid-add
- "bottom-middle" labeled-icon @bottom grid-add
- "bottom-right" labeled-icon @bottom-right grid-add ;
+ "title-right" corner-icon @top-right grid-add ;
M: labeled-gadget focusable-child* content>> ;
PRIVATE>
: <labeled-gadget> ( gadget title -- newgadget )
- 3 3 labeled-gadget new-frame
- { 1 1 } >>filled-cell
+ labeled-gadget "labeled-block" [
+ pick >>content
/-FOO-\
|-----|
- \-----/ ;
+ \-----/
+ ] make-corners ;
! Copyright (C) 2005, 2009 Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
-USING: colors.constants kernel locals math.rectangles
-namespaces sequences ui.commands ui.gadgets ui.gadgets.borders
-ui.gadgets.buttons ui.gadgets.glass ui.gadgets.packs
-ui.gadgets.worlds ui.gestures ui.operations ui.pens ui.pens.solid
-opengl math.vectors words accessors math math.order sorting ;
+USING: colors.constants kernel locals math.rectangles namespaces
+sequences ui.commands ui.gadgets ui.gadgets.borders ui.gadgets.buttons
+ui.gadgets.glass ui.gadgets.packs ui.gadgets.frames ui.gadgets.worlds
+ui.gadgets.frames ui.gadgets.corners ui.gestures ui.operations
+ui.render ui.pens ui.pens.solid opengl math.vectors words accessors
+math math.order sorting ;
IN: ui.gadgets.menus
: show-menu ( owner menu -- )
dim>> [ { 0 0.5 } v* ] [ { 1 0.5 } v* ] bi
[ [ >integer ] map ] bi@ gl-line ;
+: <menu-items> ( items -- gadget )
+ [ <filled-pile> ] dip add-gadgets
+ panel-background-color <solid> >>interior ;
+
PRIVATE>
SINGLETON: ----
: menu-theme ( gadget -- gadget )
COLOR: light-gray <solid> >>interior ;
+: <menu> ( gadgets -- menu )
+ <menu-items>
+ frame "menu-background" [
+ /-----\
+ |-----|
+ \-----/
+ ] make-corners ;
+
: <commands-menu> ( target hook commands -- menu )
- [ <filled-pile> ] 3dip
- [ <menu-item> add-gadget ] with with each
- { 5 5 } <border> menu-theme ;
+ [ <menu-item> ] with with map <menu> ;
: show-commands-menu ( target commands -- )
[ dup [ ] ] dip <commands-menu> show-menu ;
C: <pane-stream> pane-stream
+M: pane-stream stream-element-type drop +character+ ;
+
<PRIVATE
: clear-selection ( pane -- pane )
CONSTANT: selection-color T{ rgba f 0.8 0.8 1.0 1.0 }
+CONSTANT: panel-background-color
+ T{ rgba f
+ 0.7843137254901961
+ 0.7686274509803922
+ 0.7176470588235294
+ 1.0
+ }
+
CONSTANT: focus-border-color COLOR: dark-gray
[ 2drop ] [ [ value>> ] dip show-summary ] if
] [ call-next-method ] if ;
+M: interactor stream-element-type drop +character+ ;
+
GENERIC: (print-input) ( object -- )
M: input (print-input)
"UI update" spawn drop ;
: start-ui ( quot -- )
- call notify-ui-thread start-ui-thread ;
+ call( -- ) notify-ui-thread start-ui-thread ;
: restore-windows ( -- )
[
] "ui" add-init-hook
: with-ui ( quot -- )
- ui-running? [ call ] [ '[ init-ui @ ] (with-ui) ] if ;
+ ui-running? [ call( -- ) ] [ '[ init-ui @ ] (with-ui) ] if ;
HOOK: beep ui-backend ( -- )
\ No newline at end of file
math.parser hash2 math.order byte-arrays words namespaces words
compiler.units parser io.encodings.ascii values interval-maps
ascii sets combinators locals math.ranges sorting make
-strings.parser io.encodings.utf8 ;
+strings.parser io.encodings.utf8 memoize ;
IN: unicode.data
VALUE: simple-lower
"Zs" "Zl" "Zp"
"Cc" "Cf" "Cs" "Co" }
+MEMO: categories-map ( -- hashtable )
+ categories <enum> [ swap ] H{ } assoc-map-as ;
+
CONSTANT: num-chars HEX: 2FA1E
! the maximum unicode char in the first 3 planes
] assoc-each table ;
:: process-category ( data -- category-listing )
- [let | table [ num-chars <byte-array> ] |
- 2 data (process-data) [| char cat |
- cat categories index char table ?set-nth
- ] assoc-each table fill-ranges ] ;
+ num-chars <byte-array> :> table
+ 2 data (process-data) [| char cat |
+ cat categories-map at char table ?set-nth
+ ] assoc-each table fill-ranges ;
: process-names ( data -- names-hash )
1 swap (process-data) [
[ "foo" ] [ "<!DOCTYPE foo [<!ENTITY bar 'foo'>]><x>&bar;</x>" string>xml children>string ] unit-test
[ T{ xml-chunk f V{ "hello" } } ] [ "hello" string>xml-chunk ] unit-test
[ "1.1" ] [ "<?xml version='1.1'?><x/>" string>xml prolog>> version>> ] unit-test
+[ "ß" ] [ "<x>ß</x>" <string-reader> read-xml children>string ] unit-test
! Copyright (C) 2005, 2009 Daniel Ehrenberg
! See http://factorcode.org/license.txt for BSD license.
-USING: help.markup help.syntax xml.data sequences strings ;
+USING: help.markup help.syntax xml.data sequences strings multiline ;
IN: xml.traversal
ABOUT: "xml.traversal"
ARTICLE: "xml.traversal" "Utilities for traversing XML"
"The " { $vocab-link "xml.traversal" } " vocabulary provides utilities for traversing an XML DOM tree and viewing the contents of a single tag. The following words are defined:"
$nl
- "Note: the difference between deep-tag-named and tag-named is that the former searches recursively among all children and children of children of the tag, while the latter only looks at the direct children, and is therefore more efficient."
+ { $subsection { "xml.traversal" "intro" } }
{ $subsection tag-named }
{ $subsection tags-named }
{ $subsection deep-tag-named }
{ $subsection first-child-tag }
{ $subsection assert-tag } ;
+ARTICLE: { "xml.traversal" "intro" } "An example of XML processing"
+"To illustrate how to use the XML library, we develop a simple Atom parser in Factor. Atom is an XML-based syndication format, like RSS. To see the full version of what we develop here, look at " { $snippet "basis/syndication" } " at the " { $snippet "atom1.0" } " word. First, we want to load a file and get a DOM tree for it."
+{ $code <" "file.xml" file>xml "> }
+"No encoding descriptor is needed, because XML files contain sufficient information to auto-detect the encoding. Next, we want to extract information from the tree. To get the title, we can use the following:"
+{ $code <" "title" tag-named children>string "> }
+"The " { $link tag-named } " word finds the first tag named " { $snippet "title" } " in the top level (just under the main tag). Then, with a tag on the stack, its children are asserted to be a string, and the string is returned." $nl
+"For a slightly more complicated example, we can look at how entries are parsed. To get a sequence of tags with the name " { $snippet "entry" } ":"
+{ $code <" "entry" tags-named "> }
+"Imagine that, for each of these, we want to get the URL of the entry. In Atom, the URLs are in a " { $snippet "link" } " tag which is contained in the " { $snippet "entry" } " tag. There are multiple " { $snippet "link" } " tags, but one of them contains the attribute " { $snippet "rel=alternate" } ", and the " { $snippet "href" } " attribute has the URL. So, given an element of the sequence produced in the above quotation, we run the code:"
+{ $code <" "link" tags-named [ "rel" attr "alternate" = ] find nip "> }
+"to get the link tag on the stack, and"
+{ $code <" "href" attr >url "> }
+"to extract the URL from it." ;
+
HELP: deep-tag-named
{ $values { "tag" "an XML tag or document" } { "name/string" "an XML name or string representing a name" } { "matching-tag" tag } }
{ $description "Finds an XML tag with a matching name, recursively searching children and children of children." }
\r
ARTICLE: { "xml" "reading" } "Reading XML"\r
"The following words are used to read something into an XML document"\r
- { $subsection string>xml }\r
{ $subsection read-xml }\r
{ $subsection read-xml-chunk }\r
+ { $subsection string>xml }\r
{ $subsection string>xml-chunk }\r
{ $subsection file>xml }\r
{ $subsection bytes>xml }\r
{ $subsection pull-event }\r
{ $subsection pull-elem } ;\r
\r
+ARTICLE: { "xml" "namespaces" } "Working with XML namespaces"\r
+"The Factor XML parser implements XML namespaces, and provides convenient utilities for working with them. Anywhere in the public API that a name is accepted as an argument, either a string or an XML name is accepted. If a string is used, it is coerced into a name by giving it a null namespace. Names are stored as " { $link name } " tuples, which have slots for the namespace prefix and namespace URL as well as the main part of the tag name." $nl\r
+"To make it easier to create XML names, the parsing word " { $snippet "XML-NS:" } " is provided in the " { $vocab-link "xml.syntax" } " vocabulary." $nl\r
+"When parsing XML, names are automatically augmented with the appropriate namespace URL when the information is available. This does not take into account any XML schema which might allow for such prefixes to be omitted. When generating XML to be written, keep in mind that the XML writer knows only about the literal prefixes and ignores the URLs. It is your job to make sure that they match up correctly, and that there is the appropriate " { $snippet "xmlns" } " declaration." ;\r
+\r
ARTICLE: "xml" "XML parser"\r
"The " { $vocab-link "xml" } " vocabulary implements the XML 1.0 and 1.1 standards, converting strings of text into XML and vice versa. The parser checks for well-formedness but is not validating. There is only partial support for processing DTDs."\r
{ $subsection { "xml" "reading" } }\r
{ $subsection { "xml" "events" } }\r
+ { $subsection { "xml" "namespaces" } }\r
{ $vocab-subsection "Writing XML" "xml.writer" }\r
{ $vocab-subsection "XML parsing errors" "xml.errors" }\r
{ $vocab-subsection "XML entities" "xml.entities" }\r
io.streams.string kernel namespaces sequences strings io.encodings.utf8
xml.data xml.errors xml.elements ascii xml.entities
xml.writer xml.state xml.autoencoding assocs xml.tokenize
-combinators.short-circuit xml.name splitting io.streams.byte-array ;
+combinators.short-circuit xml.name splitting io.streams.byte-array
+combinators ;
IN: xml
<PRIVATE
xml-stack get first second
] with-state ; inline
+: make-xml ( stream quot -- xml )
+ 0 read-seq make-xml-doc ; inline
+
PRIVATE>
: each-element ( stream quot: ( xml-elem -- ) -- )
] with-state ; inline
: read-xml ( stream -- xml )
- [ start-document [ process ] when* ]
- 0 read-seq make-xml-doc ;
+ dup stream-element-type {
+ { +character+ [ [ check ] make-xml ] }
+ { +byte+ [ [ start-document [ process ] when* ] make-xml ] }
+ } case ;
: read-xml-chunk ( stream -- seq )
[ check ] 1 read-seq <xml-chunk> ;
: string>xml ( string -- xml )
- <string-reader> [ check ] 0 read-seq make-xml-doc ;
+ <string-reader> read-xml ;
: string>xml-chunk ( string -- xml )
<string-reader> read-xml-chunk ;
{ "linux-ppc" "ppc/linux" }
{ "macosx-ppc" "ppc/macosx" }
{ "arm" "arm" }
-} at "/bootstrap.factor" 3append parse-file
+} ?at [ "Bad architecture: " prepend throw ] unless
+"/bootstrap.factor" 3append parse-file
"vocab:bootstrap/layouts/layouts.factor" parse-file
dictionary
new-classes
changed-definitions changed-generics
- remake-generics forgotten-definitions
+ outdated-generics forgotten-definitions
root-cache source-files update-map implementors-map
} [ H{ } clone swap set ] each
! Vocabulary for slot accessors
"accessors" create-vocab drop
-! Trivial recompile hook. We don't want to touch the code heap
-! during stage1 bootstrap, it would just waste time.
-[ drop { } ] recompile-hook set
+dummy-compiler compiler-impl set
call
call
MIXIN: empty-mixin
[ f ] [ "hi" empty-mixin? ] unit-test
+
+MIXIN: move-instance-declaration-mixin
+
+[ ] [ "IN: classes.mixin.tests.a USE: strings USE: classes.mixin.tests INSTANCE: string move-instance-declaration-mixin" <string-reader> "move-mixin-test-1" parse-stream drop ] unit-test
+
+[ ] [ "IN: classes.mixin.tests.b USE: strings USE: classes.mixin.tests INSTANCE: string move-instance-declaration-mixin" <string-reader> "move-mixin-test-2" parse-stream drop ] unit-test
+
+[ ] [ "IN: classes.mixin.tests.a" <string-reader> "move-mixin-test-1" parse-stream drop ] unit-test
+
+[ { string } ] [ move-instance-declaration-mixin members ] unit-test
\ No newline at end of file
-! Copyright (C) 2004, 2008 Slava Pestov.
+! Copyright (C) 2004, 2009 Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
USING: classes classes.union words kernel sequences
definitions combinators arrays assocs generic accessors ;
drop
] [
[ { } redefine-mixin-class ]
+ [ H{ } clone "instances" set-word-prop ]
[ update-classes ]
- bi
+ tri
] if ;
TUPLE: check-mixin-class class ;
[ [ update-class ] each ]
[ implementors [ remake-generic ] each ] bi ;
+: (add-mixin-instance) ( class mixin -- )
+ [ [ suffix ] change-mixin-class ]
+ [ [ f ] 2dip "instances" word-prop set-at ]
+ 2bi ;
+
: add-mixin-instance ( class mixin -- )
#! Note: we call update-classes on the new member, not the
#! mixin. This ensures that we only have to update the
#! updated by transitivity; the mixins usages appear in
#! class-usages of the member, now that it's been added.
[ 2drop ] [
- [ [ suffix ] change-mixin-class ] 2keep
- [ nip ] [ [ new-class? ] either? ] 2bi [
- update-classes/new
- ] [
- update-classes
- ] if
+ [ (add-mixin-instance) ] 2keep
+ [ nip ] [ [ new-class? ] either? ] 2bi
+ [ update-classes/new ] [ update-classes ] if
] if-mixin-member? ;
+: (remove-mixin-instance) ( class mixin -- )
+ [ [ swap remove ] change-mixin-class ]
+ [ "instances" word-prop delete-at ]
+ 2bi ;
+
: remove-mixin-instance ( class mixin -- )
#! The order of the three clauses is important here. The last
#! one must come after the other two so that the entries it
#! adds to changed-generics are not overwritten.
[
- [ [ swap remove ] change-mixin-class ]
+ [ (remove-mixin-instance) ]
[ nip update-classes ]
[ class-usages update-methods ]
2tri
! Definition protocol implementation ensures that removing an
! INSTANCE: declaration from a source file updates the mixin.
-TUPLE: mixin-instance loc class mixin ;
-
-M: mixin-instance equal?
- {
- { [ over mixin-instance? not ] [ f ] }
- { [ 2dup [ class>> ] bi@ = not ] [ f ] }
- { [ 2dup [ mixin>> ] bi@ = not ] [ f ] }
- [ t ]
- } cond 2nip ;
+TUPLE: mixin-instance class mixin ;
-M: mixin-instance hashcode*
- [ class>> ] [ mixin>> ] bi 2array hashcode* ;
+C: <mixin-instance> mixin-instance
-: <mixin-instance> ( class mixin -- definition )
- mixin-instance new
- swap >>mixin
- swap >>class ;
+: >mixin-instance< ( mixin-instance -- class mixin )
+ [ class>> ] [ mixin>> ] bi ; inline
-M: mixin-instance where loc>> ;
+M: mixin-instance where >mixin-instance< "instances" word-prop at ;
-M: mixin-instance set-where (>>loc) ;
+M: mixin-instance set-where >mixin-instance< "instances" word-prop set-at ;
M: mixin-instance definer drop \ INSTANCE: f ;
M: mixin-instance definition drop f ;
M: mixin-instance forget*
- [ class>> ] [ mixin>> ] bi
+ >mixin-instance<
dup mixin-class? [ remove-mixin-instance ] [ 2drop ] if ;
namespaces make sequences sequences.private strings vectors
words quotations memory combinators generic classes
classes.algebra classes.builtin classes.private slots.private
-slots compiler.units math.private accessors assocs effects ;
+slots math.private accessors assocs effects ;
IN: classes.tuple
PREDICATE: tuple-class < class
: apply-slot-permutation ( old-values triples -- new-values )
[ first3 update-slot ] with map ;
+SYMBOL: outdated-tuples
+
: permute-slots ( old-values layout -- new-values )
[ first all-slots ] [ outdated-tuples get at ] bi
compute-slot-permutation
dup [ update-tuple ] map become
] if ;
-[ update-tuples ] update-tuples-hook set-global
-
: update-tuples-after ( class -- )
[ all-slots ] [ tuple-layout ] bi outdated-tuples get set-at ;
"Forward reference checking (see " { $link "definition-checking" } "):"
{ $subsection forward-reference? }
"A hook to be called at the end of the compilation unit. If the optimizing compiler is loaded, this compiles new words with the " { $link "compiler" } ":"
-{ $subsection recompile-hook }
+{ $subsection recompile }
"Low-level compiler interface exported by the Factor VM:"
{ $subsection modify-code-heap } ;
$nl
"Since compilation is relatively expensive, you should try to batch up as many definitions into one compilation unit as possible." } ;
-HELP: recompile-hook
-{ $var-description "Quotation with stack effect " { $snippet "( words -- )" } ", called at the end of " { $link with-compilation-unit } "." } ;
+HELP: recompile
+{ $values { "words" "a sequence of words" } { "alist" "an association list mapping words to compiled definitions" } }
+{ $contract "Internal word which compiles words. Called at the end of " { $link with-compilation-unit } "." } ;
HELP: no-compilation-unit
{ $values { "word" word } }
USING: definitions compiler.units tools.test arrays sequences words kernel
accessors namespaces fry ;
+[ [ [ ] define-temp ] with-compilation-unit ] must-infer
+[ [ [ ] define-temp ] with-nested-compilation-unit ] must-infer
+
[ flushed-dependency ] [ f flushed-dependency strongest-dependency ] unit-test
[ flushed-dependency ] [ flushed-dependency f strongest-dependency ] unit-test
[ inlined-dependency ] [ flushed-dependency inlined-dependency strongest-dependency ] unit-test
-! Copyright (C) 2008 Slava Pestov.
+! Copyright (C) 2008, 2009 Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
USING: accessors arrays kernel continuations assocs namespaces
sequences words vocabs definitions hashtables init sets
-math math.order classes classes.algebra ;
+math math.order classes classes.algebra classes.tuple
+classes.tuple.private generic ;
IN: compiler.units
SYMBOL: old-definitions
[ new-definitions get assoc-stack not ]
[ drop f ] if ;
-SYMBOL: recompile-hook
+SYMBOL: compiler-impl
+
+HOOK: recompile compiler-impl ( words -- alist )
+
+! Non-optimizing compiler
+M: f recompile [ f ] { } map>assoc ;
+
+! Trivial compiler. We don't want to touch the code heap
+! during stage1 bootstrap, it would just waste time.
+SINGLETON: dummy-compiler
+
+M: dummy-compiler recompile drop { } ;
: <definitions> ( -- pair ) { H{ } H{ } } [ clone ] map ;
dup changed-definitions get update
dup dup changed-vocabs update ;
-: compile ( words -- )
- recompile-hook get call modify-code-heap ;
-
-SYMBOL: outdated-tuples
-SYMBOL: update-tuples-hook
-SYMBOL: remake-generics-hook
+: compile ( words -- ) recompile modify-code-heap ;
: index>= ( obj1 obj2 seq -- ? )
[ index ] curry bi@ >= ;
changed-generics get compiled-generic-usages
append assoc-combine keys ;
-: call-recompile-hook ( -- )
- to-recompile recompile-hook get call ;
-
-: call-remake-generics-hook ( -- )
- remake-generics-hook get call ;
-
-: call-update-tuples-hook ( -- )
- update-tuples-hook get call ;
-
: unxref-forgotten-definitions ( -- )
forgotten-definitions get
keys [ word? ] filter
[ delete-compiled-xref ] each ;
: finish-compilation-unit ( -- )
- call-remake-generics-hook
- call-recompile-hook
- call-update-tuples-hook
+ remake-generics
+ to-recompile recompile
+ update-tuples
unxref-forgotten-definitions
modify-code-heap ;
[
H{ } clone changed-definitions set
H{ } clone changed-generics set
- H{ } clone remake-generics set
+ H{ } clone outdated-generics set
H{ } clone outdated-tuples set
H{ } clone new-classes set
[ finish-compilation-unit ] [ ] cleanup
[
H{ } clone changed-definitions set
H{ } clone changed-generics set
- H{ } clone remake-generics set
+ H{ } clone outdated-generics set
H{ } clone forgotten-definitions set
H{ } clone outdated-tuples set
H{ } clone new-classes set
notify-definition-observers
] [ ] cleanup
] with-scope ; inline
-
-: default-recompile-hook ( words -- alist )
- [ f ] { } map>assoc ;
-
-recompile-hook [ [ default-recompile-hook ] ] initialize
SYMBOL: changed-generics
-SYMBOL: remake-generics
+SYMBOL: outdated-generics
SYMBOL: new-classes
generic.math assocs hashtables io kernel math namespaces parser
prettyprint sequences strings tools.test vectors words
quotations classes classes.algebra classes.tuple continuations
-layouts classes.union sorting compiler.units eval multiline ;
+layouts classes.union sorting compiler.units eval multiline
+io.streams.string ;
IN: generic.tests
GENERIC: foobar ( x -- y )
[ ] [ [ { integer c-n-m-cache } forget ] with-compilation-unit ] unit-test
[ 2 ] [ 2 c-n-m-cache ] unit-test
+
+! Moving a method from one vocab to another doesn't always work
+GENERIC: move-method-generic ( a -- b )
+
+[ ] [ "IN: generic.tests.a USE: strings USE: generic.tests M: string move-method-generic ;" <string-reader> "move-method-test-1" parse-stream drop ] unit-test
+
+[ ] [ "IN: generic.tests.b USE: strings USE: generic.tests M: string move-method-generic ;" <string-reader> "move-method-test-2" parse-stream drop ] unit-test
+
+[ ] [ "IN: generic.tests.a" <string-reader> "move-method-test-1" parse-stream drop ] unit-test
+
+[ { string } ] [ \ move-method-generic order ] unit-test
\ No newline at end of file
USING: accessors words kernel sequences namespaces make assocs
hashtables definitions kernel.private classes classes.private
classes.algebra quotations arrays vocabs effects combinators
-sets compiler.units ;
+sets ;
IN: generic
! Method combination protocol
[ dup "combination" word-prop perform-combination ]
bi ;
-[
- remake-generics get keys
- [ generic? ] filter [ make-generic ] each
-] remake-generics-hook set-global
-
: method ( class generic -- method/f )
"methods" word-prop at ;
[ [ [ class-or ] when* ] change-at ] [ no-compilation-unit ] if* ;
: remake-generic ( generic -- )
- dup remake-generics get set-in-unit ;
+ dup outdated-generics get set-in-unit ;
+
+: remake-generics ( -- )
+ outdated-generics get keys [ generic? ] filter [ make-generic ] each ;
: with-methods ( class generic quot -- )
[ drop changed-generic ]
"Combinators to change the encoding:"
{ $subsection with-encoded-output }
{ $subsection with-decoded-input }
-{ $see-also "encodings-introduction" "stream-elements" } ;
+{ $see-also "encodings-introduction" } ;
ABOUT: "io.encodings"
] when
] when nip ; inline
+M: decoder stream-element-type
+ drop +character+ ;
+
M: decoder stream-read1
dup >decoder< decode-char fix-read1 ;
: >encoder< ( encoder -- stream encoding )
[ stream>> ] [ code>> ] bi ; inline
+M: encoder stream-element-type
+ drop +character+ ;
+
M: encoder stream-write1
>encoder< encode-char ;
classes strings continuations destructors math byte-arrays ;
IN: io
+HELP: +byte+
+{ $description "A stream element type. See " { $link stream-element-type } " for explanation." } ;
+
+HELP: +character+
+{ $description "A stream element type. See " { $link stream-element-type } " for explanation." } ;
+
+HELP: stream-element-type
+{ $values { "stream" "a stream" } { "type" { $link +byte+ } " or " { $link +character+ } } }
+{ $description
+ "Outputs one of the following two values:"
+ { $list
+ { { $link +byte+ } " - indicates that stream elements are integers in the range " { $snippet "[0,255]" } "; they represent bytes. Reading a sequence of elements produces a " { $link byte-array } "." }
+ { { $link +character+ } " - indicates that stream elements are non-negative integers, representing Unicode code points. Reading a sequence of elements produces a " { $link string } "." }
+ }
+ "Most external streams are binary streams, and can be wrapped in string streams once a suitable encoding has been provided; see " { $link "io.encodings" } "."
+
+} ;
+
HELP: stream-readln
{ $values { "stream" "an input stream" } { "str/f" "a string or " { $link f } } }
{ $contract "Reads a line of input from the stream. Outputs " { $link f } " on stream exhaustion." }
{ $description "Copies the contents of one stream into another, closing both streams when done." }
$io-error ;
-
HELP: stream-seek
{ $values
{ "n" integer } { "seek-type" "a seek singleton" } { "stream" "a stream" }
$nl
"All streams must implement the " { $link dispose } " word in addition to the stream protocol."
$nl
+"The following word is required for all input and output streams:"
+{ $subsection stream-element-type }
"These words are required for binary and string input streams:"
{ $subsection stream-read1 }
{ $subsection stream-read }
{ $subsection stream-nl }
"This word is for streams that allow seeking:"
{ $subsection stream-seek }
-"For a discussion of the distinction between binary and string streams, see " { $link "stream-elements" } "."
{ $see-also "io.timeouts" } ;
ARTICLE: "stdio-motivation" "Motivation for default streams"
{ $subsection read }
{ $subsection read-until }
{ $subsection read-partial }
-"If the default input stream is a string stream (" { $link "stream-elements" } "), lines of text can be read:"
+"If the default input stream is a character stream (" { $link stream-element-type } " outputs " { $link +character+ } "), lines of text can be read:"
{ $subsection readln }
"Seeking on the default input stream:"
{ $subsection seek-input }
{ $subsection flush }
{ $subsection write1 }
{ $subsection write }
-"If the default output stream is a string stream (" { $link "stream-elements" } "), lines of text can be written:"
+"If the default output stream is a character stream (" { $link stream-element-type } " outputs " { $link +character+ } "), lines of text can be written:"
{ $subsection readln }
{ $subsection print }
{ $subsection nl }
"Copying the contents of one stream to another:"
{ $subsection stream-copy } ;
-ARTICLE: "stream-elements" "Stream elements"
-"There are two types of streams:"
-{ $list
- { { $strong "Binary streams" } " - the elements are integers between 0 and 255, inclusive; they represent bytes. Reading a sequence of elements produces a " { $link byte-array } "." }
- { { $strong "String streams" } " - the elements are non-negative integers, representing Unicode code points. Reading a sequence of elements produces a " { $link string } "." }
-}
-"Most external streams are binary streams, and can be wrapped in string streams once a suitable encoding has been provided; see " { $link "io.encodings" } "." ;
-
ARTICLE: "streams" "Streams"
-"Input and output centers on the concept of a " { $emphasis "stream" } ", which is a source or sink of elements."
-{ $subsection "stream-elements" }
+"Input and output centers on the concept of a " { $emphasis "stream" } ", which is a source or sink of " { $emphasis "elements" } "."
+$nl
"A stream can either be passed around on the stack or bound to a dynamic variable and used as one of the two implicit " { $emphasis "default streams" } "."
{ $subsection "stream-protocol" }
{ $subsection "stdio" }
continuations destructors assocs ;
IN: io
+SYMBOLS: +byte+ +character+ ;
+
+GENERIC: stream-element-type ( stream -- type )
+
GENERIC: stream-read1 ( stream -- elt )
GENERIC: stream-read ( n stream -- seq )
GENERIC: stream-read-until ( seps stream -- seq sep/f )
: <c-writer> ( handle -- stream ) f c-writer boa ;
-M: c-writer stream-write1
- dup check-disposed
- handle>> fputc ;
+M: c-writer stream-element-type drop +byte+ ;
-M: c-writer stream-write
- dup check-disposed
- handle>> fwrite ;
+M: c-writer stream-write1 dup check-disposed handle>> fputc ;
-M: c-writer stream-flush
- dup check-disposed
- handle>> fflush ;
+M: c-writer stream-write dup check-disposed handle>> fwrite ;
-M: c-writer dispose*
- handle>> fclose ;
+M: c-writer stream-flush dup check-disposed handle>> fflush ;
+
+M: c-writer dispose* handle>> fclose ;
TUPLE: c-reader handle disposed ;
: <c-reader> ( handle -- stream ) f c-reader boa ;
-M: c-reader stream-read
- dup check-disposed
- handle>> fread ;
+M: c-reader stream-element-type drop +byte+ ;
-M: c-reader stream-read-partial
- stream-read ;
+M: c-reader stream-read dup check-disposed handle>> fread ;
-M: c-reader stream-read1
- dup check-disposed
- handle>> fgetc ;
+M: c-reader stream-read-partial stream-read ;
+
+M: c-reader stream-read1 dup check-disposed handle>> fgetc ;
: read-until-loop ( stream delim -- ch )
over stream-read1 dup [
M: null-stream dispose drop ;
+M: null-reader stream-element-type drop +byte+ ;
M: null-reader stream-readln drop f ;
M: null-reader stream-read1 drop f ;
M: null-reader stream-read-until 2drop f f ;
M: null-reader stream-read 2drop f ;
+M: null-writer stream-element-type drop +byte+ ;
M: null-writer stream-write1 2drop ;
M: null-writer stream-write 2drop ;
M: null-writer stream-flush drop ;
! Copyright (C) 2009 Daniel Ehrenberg
! See http://factorcode.org/license.txt for BSD license.
-USING: sequences io kernel accessors math math.order ;
+USING: sequences io io.streams.plain kernel accessors math math.order
+growable destructors ;
IN: io.streams.sequence
+! Readers
SLOT: underlying
SLOT: i
: sequence-read-until ( separators stream -- seq sep/f )
[ find-sep ] keep
[ sequence-read ] [ next ] bi swap ; inline
+
+! Writers
+M: growable dispose drop ;
+
+M: growable stream-write1 push ;
+M: growable stream-write push-all ;
+M: growable stream-flush drop ;
+
+INSTANCE: growable plain-writer
\ No newline at end of file
sequences strings io.files io.pathnames definitions
continuations sorting classes.tuple compiler.units debugger
vocabs vocabs.loader accessors eval combinators lexer
-vocabs.parser words.symbol ;
+vocabs.parser words.symbol multiline ;
IN: parser.tests
\ run-file must-infer
! Two similar bugs
! Replace : def with something in << >>
-[ [ ] ] [
+/* [ [ ] ] [
"IN: parser.tests : was-once-a-word-bug ( -- ) ;"
<string-reader> "was-once-a-word-test" parse-stream
] unit-test
<string-reader> "was-once-a-word-test" parse-stream
] unit-test
-[ t ] [ "was-once-a-word-bug" "parser.tests" lookup >boolean ] unit-test
+[ t ] [ "was-once-a-word-bug" "parser.tests" lookup >boolean ] unit-test */
! Replace : def with DEFER:
[ [ ] ] [
ui.gadgets.buttons\r
ui.gadgets.packs\r
ui.gadgets.grids\r
+ ui.gadgets.corners\r
ui.gestures\r
ui.gadgets.scrollers\r
splitting\r
! menu\r
! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r
\r
-USE: ui.gadgets.labeled.private\r
-\r
: menu-rotations-4D ( -- gadget )\r
3 3 <frame>\r
{ 1 1 } >>filled-cell\r
-4DNav : simmple tool to navigate thru a 4D space view as projections on 4 3D spaces.
\ No newline at end of file
+Simple tool to navigate through a 4D space with projections on 4 3D spaces
[
[
[ [ 1array $vocab-link ] with-cell ]
- [ [ 1000000 /f pprint-cell ] [ "failed" write ] if* ] bi*
+ [ [ 1000000 /f pprint-cell ] [ [ "failed" write ] with-cell ] if* ] bi*
] with-row
] assoc-each
- ] tabular-output ;
+ ] tabular-output nl ;
: benchmarks ( -- )
run-benchmarks benchmarks. ;
--- /dev/null
+USING: accessors arrays cocoa.dialogs combinators continuations
+fry grouping io.encodings.utf8 io.files io.styles kernel math
+math.parser models models.arrow models.history namespaces random
+sequences splitting ui ui.gadgets.alerts ui.gadgets.book-extras
+ui.gadgets.books ui.gadgets.buttons ui.gadgets.frames
+ui.gadgets.grids ui.gadgets.labels ui.gadgets.tracks ui.gestures
+ui.gadgets.corners ;
+
+IN: drills
+SYMBOLS: it startLength ;
+: big ( gadget -- gadget ) { "sans-serif" plain 30 } >>font ;
+: card ( model quot -- button ) <arrow> <label-control> big [ next ] <book-btn> ;
+: op ( quot str -- gadget ) <label> big swap <book-bevel-btn> ;
+
+: show ( model -- gadget ) dup it set-global [ random ] <arrow>
+ { [ [ first ] card ]
+ [ [ [ second ] [ drop [ "malformed input" throw ] "Malformed Input" alert ] recover ] card ]
+ [ '[ |<< [ it get [
+ _ value>> swap remove
+ [ [ it get go-back ] "Drill Complete" alert return ] when-empty
+ ] change-model ] with-return ] "Yes" op ]
+ [ '[ |<< it get _ model-changed ] "No" op ] } cleave
+2array { 1 0 } <track> swap [ 0.5 track-add ] each
+3array <book*> 3 3 <frame> { 450 175 } >>pref-dim swap @center grid-add
+it get [ length startLength get swap - number>string "/" startLength get number>string 3append ] <arrow> <label-control> @bottom grid-add ;
+
+: drill ( -- ) [
+ open-panel [
+ [ utf8 file-lines [ "\t" split
+ [ " " split 4 group [ " " join ] map ] map ] map ] map concat dup [ [ first ] [ second ] bi swap 2array ] map append
+ [ length startLength set-global ] keep <history> [ add-history ] [ show ] bi
+ "Got it?" open-window
+ ] when*
+] with-ui ;
+
+
+MAIN: drill
+
+
+! FIXME: command-line opening
+! TODO: Menu bar
+! TODO: Pious hot-buttons
\ No newline at end of file
--- /dev/null
+unportable
: fjsc-compile* ( string -- string )
'statement' parse ast>> fjsc-compile ;
-: fc* ( string -- string )
+: fc* ( string -- )
[
- 'statement' parse ast>> values>> do-expressions
+ 'statement' parse ast>> values>> do-expressions
] { } make [ write ] each ;
IN: game-input.tests
-USING: game-input tools.test kernel system ;
+USING: game-input tools.test kernel system threads ;
os windows? os macosx? or [
[ ] [ open-game-input ] unit-test
+ [ ] [ yield ] unit-test
[ ] [ close-game-input ] unit-test
] when
\ No newline at end of file
: db-path ( -- path ) "IpToCountry.csv" temp-file ;
-: db-url ( -- url ) "http://software77.net/cgi-bin/ip-country/geo-ip.pl?action=download" ;
+CONSTANT: db-url "http://software77.net/cgi-bin/ip-country/geo-ip.pl?action=download"
: download-db ( -- path )
db-path dup exists? [
--- /dev/null
+Doug Coleman
\ No newline at end of file
--- /dev/null
+! Copyright (C) 2009 Doug Coleman.
+! See http://factorcode.org/license.txt for BSD license.
+USING: combinators combinators.smart csv io.encodings.8-bit
+math.parser memoize sequences kernel unicode.categories money ;
+IN: geobytes
+
+! GeoBytes is not free software.
+! Please read their license should you choose to use it.
+! This is just a binding to the GeoBytes CSV files.
+! Download and install GeoBytes yourself should you wish to use it.
+! http://www.geobytes.com/GeoWorldMap.zip
+
+CONSTANT: geobytes-cities-path "resource:GeoWorldMap/Cities.txt"
+CONSTANT: geobytes-countries-path "resource:GeoWorldMap/Countries.txt"
+CONSTANT: geobytes-regions-path "resource:GeoWorldMap/Regions.txt"
+CONSTANT: geobytes-version-path "resource:GeoWorldMap/version.txt"
+
+TUPLE: country country-id country fips104 iso2 iso3 ison internet capital map-reference
+nationality-singular nationality-plural currency currency-code population title
+comment ;
+
+TUPLE: region region-id country-id region code adm1-code ;
+
+TUPLE: city city-id country-id region-id city longitude latitude timezone code ;
+
+TUPLE: version component version rows ;
+
+MEMO: load-countries ( -- seq )
+ geobytes-countries-path latin1 file>csv rest-slice [
+ [
+ {
+ [ string>number ]
+ [ ]
+ [ ]
+ [ ]
+ [ ]
+ [ ]
+ [ ]
+ [ ]
+ [ ]
+ [ ]
+ [ ]
+ [ ]
+ [ ]
+ [ string>number ]
+ [ ]
+ [ ]
+ } spread country boa
+ ] input<sequence
+ ] map ;
+
+MEMO: load-regions ( -- seq )
+ geobytes-regions-path latin1 file>csv rest-slice [
+ [
+ {
+ [ string>number ]
+ [ string>number ]
+ [ ]
+ [ ]
+ [ [ blank? ] trim ]
+ } spread region boa
+ ] input<sequence
+ ] map ;
+
+MEMO: load-cities ( -- seq )
+ geobytes-cities-path latin1 file>csv rest-slice [
+ [
+ {
+ [ string>number ]
+ [ string>number ]
+ [ string>number ]
+ [ ]
+ [ parse-decimal ]
+ [ parse-decimal ]
+ [ ]
+ [ string>number ]
+ } spread city boa
+ ] input<sequence
+ ] map ;
+
+MEMO: load-version ( -- seq )
+ geobytes-version-path latin1 file>csv rest-slice [
+ [
+ {
+ [ ]
+ [ ]
+ [ string>number ]
+ } spread version boa
+ ] input<sequence
+ ] map ;
--- /dev/null
+City, country, region database using database from http://www.geobytes.com/GeoWorldMap.zip
--- /dev/null
+enterprise
[ "hello" ] [ "hello" [ take-rest ] string-parse ] unit-test
[ "hi" " how are you?" ] [ "hi how are you?" [ [ get-char blank? ] take-until take-rest ] string-parse ] unit-test
[ "foo" ";bar" ] [ "foo;bar" [ CHAR: ; take-char take-rest ] string-parse ] unit-test
-! [ "foo " " bar" ] [ "foo and bar" [ "and" take-string take-rest ] string-parse ] unit-test
+[ "foo " " bar" ]
+[ "foo and bar" [ "and" take-string take-rest ] string-parse ] unit-test
] [ drop ] if ; inline recursive
: take-until ( quot: ( -- ? ) -- )
- [ get-i ] dip skip-until get-i
+ get-i [ skip-until ] dip get-i
state get string>> subseq ;
: string-matches? ( string circular -- ? )
- get-char over push-circular sequence= ;
+ get-char over push-growing-circular sequence= ;
: take-string ( match -- string )
- dup length <circular-string>
+ dup length <growing-circular>
[ 2dup string-matches? ] take-until nip
dup length rot length 1- - head next ;
--- /dev/null
+Slava Pestov
\ No newline at end of file
--- /dev/null
+IN: method-chains.tests
+USING: method-chains tools.test arrays strings sequences kernel namespaces ;
+
+GENERIC: testing ( a b -- c )
+
+M: sequence testing nip reverse ;
+AFTER: string testing append ;
+BEFORE: array testing over prefix "a" set ;
+
+[ V{ 3 2 1 } ] [ 3 V{ 1 2 3 } testing ] unit-test
+[ "heyyeh" ] [ 4 "yeh" testing ] unit-test
+[ { 4 2 0 } ] [ 5 { 0 2 4 } testing ] unit-test
+[ { 5 0 2 4 } ] [ "a" get ] unit-test
\ No newline at end of file
--- /dev/null
+! Copyright (C) 2009 Slava Pestov.
+! See http://factorcode.org/license.txt for BSD license.
+USING: kernel generic generic.parser words fry ;
+IN: method-chains
+
+: AFTER: (M:) dupd '[ [ _ (call-next-method) ] _ bi ] define ; parsing
+: BEFORE: (M:) over '[ _ [ _ (call-next-method) ] bi ] define ; parsing
+++ /dev/null
-Doug Coleman
-Slava Pestov
+++ /dev/null
-USING: parser-combinators.regexp tools.test kernel ;
-IN: parser-combinators.regexp.tests
-
-[ f ] [ "b" "a*" f <regexp> matches? ] unit-test
-[ t ] [ "" "a*" f <regexp> matches? ] unit-test
-[ t ] [ "a" "a*" f <regexp> matches? ] unit-test
-[ t ] [ "aaaaaaa" "a*" f <regexp> matches? ] unit-test
-[ f ] [ "ab" "a*" f <regexp> matches? ] unit-test
-
-[ t ] [ "abc" "abc" f <regexp> matches? ] unit-test
-[ t ] [ "a" "a|b|c" f <regexp> matches? ] unit-test
-[ t ] [ "b" "a|b|c" f <regexp> matches? ] unit-test
-[ t ] [ "c" "a|b|c" f <regexp> matches? ] unit-test
-[ f ] [ "c" "d|e|f" f <regexp> matches? ] unit-test
-
-[ f ] [ "aa" "a|b|c" f <regexp> matches? ] unit-test
-[ f ] [ "bb" "a|b|c" f <regexp> matches? ] unit-test
-[ f ] [ "cc" "a|b|c" f <regexp> matches? ] unit-test
-[ f ] [ "cc" "d|e|f" f <regexp> matches? ] unit-test
-
-[ f ] [ "" "a+" f <regexp> matches? ] unit-test
-[ t ] [ "a" "a+" f <regexp> matches? ] unit-test
-[ t ] [ "aa" "a+" f <regexp> matches? ] unit-test
-
-[ t ] [ "" "a?" f <regexp> matches? ] unit-test
-[ t ] [ "a" "a?" f <regexp> matches? ] unit-test
-[ f ] [ "aa" "a?" f <regexp> matches? ] unit-test
-
-[ f ] [ "" "." f <regexp> matches? ] unit-test
-[ t ] [ "a" "." f <regexp> matches? ] unit-test
-[ t ] [ "." "." f <regexp> matches? ] unit-test
-! [ f ] [ "\n" "." f <regexp> matches? ] unit-test
-
-[ f ] [ "" ".+" f <regexp> matches? ] unit-test
-[ t ] [ "a" ".+" f <regexp> matches? ] unit-test
-[ t ] [ "ab" ".+" f <regexp> matches? ] unit-test
-
-[ t ] [ "" "a|b*|c+|d?" f <regexp> matches? ] unit-test
-[ t ] [ "a" "a|b*|c+|d?" f <regexp> matches? ] unit-test
-[ t ] [ "c" "a|b*|c+|d?" f <regexp> matches? ] unit-test
-[ t ] [ "cc" "a|b*|c+|d?" f <regexp> matches? ] unit-test
-[ f ] [ "ccd" "a|b*|c+|d?" f <regexp> matches? ] unit-test
-[ t ] [ "d" "a|b*|c+|d?" f <regexp> matches? ] unit-test
-
-[ t ] [ "foo" "foo|bar" f <regexp> matches? ] unit-test
-[ t ] [ "bar" "foo|bar" f <regexp> matches? ] unit-test
-[ f ] [ "foobar" "foo|bar" f <regexp> matches? ] unit-test
-
-[ f ] [ "" "(a)" f <regexp> matches? ] unit-test
-[ t ] [ "a" "(a)" f <regexp> matches? ] unit-test
-[ f ] [ "aa" "(a)" f <regexp> matches? ] unit-test
-[ t ] [ "aa" "(a*)" f <regexp> matches? ] unit-test
-
-[ f ] [ "aababaaabbac" "(a|b)+" f <regexp> matches? ] unit-test
-[ t ] [ "ababaaabba" "(a|b)+" f <regexp> matches? ] unit-test
-
-[ f ] [ "" "a{1}" f <regexp> matches? ] unit-test
-[ t ] [ "a" "a{1}" f <regexp> matches? ] unit-test
-[ f ] [ "aa" "a{1}" f <regexp> matches? ] unit-test
-
-[ f ] [ "a" "a{2,}" f <regexp> matches? ] unit-test
-[ t ] [ "aaa" "a{2,}" f <regexp> matches? ] unit-test
-[ t ] [ "aaaa" "a{2,}" f <regexp> matches? ] unit-test
-[ t ] [ "aaaaa" "a{2,}" f <regexp> matches? ] unit-test
-
-[ t ] [ "" "a{,2}" f <regexp> matches? ] unit-test
-[ t ] [ "a" "a{,2}" f <regexp> matches? ] unit-test
-[ t ] [ "aa" "a{,2}" f <regexp> matches? ] unit-test
-[ f ] [ "aaa" "a{,2}" f <regexp> matches? ] unit-test
-[ f ] [ "aaaa" "a{,2}" f <regexp> matches? ] unit-test
-[ f ] [ "aaaaa" "a{,2}" f <regexp> matches? ] unit-test
-
-[ f ] [ "" "a{1,3}" f <regexp> matches? ] unit-test
-[ t ] [ "a" "a{1,3}" f <regexp> matches? ] unit-test
-[ t ] [ "aa" "a{1,3}" f <regexp> matches? ] unit-test
-[ t ] [ "aaa" "a{1,3}" f <regexp> matches? ] unit-test
-[ f ] [ "aaaa" "a{1,3}" f <regexp> matches? ] unit-test
-
-[ f ] [ "" "[a]" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[a]" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[abc]" f <regexp> matches? ] unit-test
-[ f ] [ "b" "[a]" f <regexp> matches? ] unit-test
-[ f ] [ "d" "[abc]" f <regexp> matches? ] unit-test
-[ t ] [ "ab" "[abc]{1,2}" f <regexp> matches? ] unit-test
-[ f ] [ "abc" "[abc]{1,2}" f <regexp> matches? ] unit-test
-
-[ f ] [ "" "[^a]" f <regexp> matches? ] unit-test
-[ f ] [ "a" "[^a]" f <regexp> matches? ] unit-test
-[ f ] [ "a" "[^abc]" f <regexp> matches? ] unit-test
-[ t ] [ "b" "[^a]" f <regexp> matches? ] unit-test
-[ t ] [ "d" "[^abc]" f <regexp> matches? ] unit-test
-[ f ] [ "ab" "[^abc]{1,2}" f <regexp> matches? ] unit-test
-[ f ] [ "abc" "[^abc]{1,2}" f <regexp> matches? ] unit-test
-
-[ t ] [ "]" "[]]" f <regexp> matches? ] unit-test
-[ f ] [ "]" "[^]]" f <regexp> matches? ] unit-test
-
-! [ "^" "[^]" f <regexp> matches? ] must-fail
-[ t ] [ "^" "[]^]" f <regexp> matches? ] unit-test
-[ t ] [ "]" "[]^]" f <regexp> matches? ] unit-test
-
-[ t ] [ "[" "[[]" f <regexp> matches? ] unit-test
-[ f ] [ "^" "[^^]" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[^^]" f <regexp> matches? ] unit-test
-
-[ t ] [ "-" "[-]" f <regexp> matches? ] unit-test
-[ f ] [ "a" "[-]" f <regexp> matches? ] unit-test
-[ f ] [ "-" "[^-]" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[^-]" f <regexp> matches? ] unit-test
-
-[ t ] [ "-" "[-a]" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[-a]" f <regexp> matches? ] unit-test
-[ t ] [ "-" "[a-]" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[a-]" f <regexp> matches? ] unit-test
-[ f ] [ "b" "[a-]" f <regexp> matches? ] unit-test
-[ f ] [ "-" "[^-]" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[^-]" f <regexp> matches? ] unit-test
-
-[ f ] [ "-" "[a-c]" f <regexp> matches? ] unit-test
-[ t ] [ "-" "[^a-c]" f <regexp> matches? ] unit-test
-[ t ] [ "b" "[a-c]" f <regexp> matches? ] unit-test
-[ f ] [ "b" "[^a-c]" f <regexp> matches? ] unit-test
-
-[ t ] [ "-" "[a-c-]" f <regexp> matches? ] unit-test
-[ f ] [ "-" "[^a-c-]" f <regexp> matches? ] unit-test
-
-[ t ] [ "\\" "[\\\\]" f <regexp> matches? ] unit-test
-[ f ] [ "a" "[\\\\]" f <regexp> matches? ] unit-test
-[ f ] [ "\\" "[^\\\\]" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[^\\\\]" f <regexp> matches? ] unit-test
-
-[ t ] [ "0" "[\\d]" f <regexp> matches? ] unit-test
-[ f ] [ "a" "[\\d]" f <regexp> matches? ] unit-test
-[ f ] [ "0" "[^\\d]" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[^\\d]" f <regexp> matches? ] unit-test
-
-[ t ] [ "a" "[a-z]{1,}|[A-Z]{2,4}|b*|c|(f|g)*" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[a-z]{1,2}|[A-Z]{3,3}|b*|c|(f|g)*" f <regexp> matches? ] unit-test
-[ t ] [ "a" "[a-z]{1,2}|[A-Z]{3,3}" f <regexp> matches? ] unit-test
-
-[ t ] [ "1000" "\\d{4,6}" f <regexp> matches? ] unit-test
-[ t ] [ "1000" "[0-9]{4,6}" f <regexp> matches? ] unit-test
-
-[ t ] [ "abc" "\\p{Lower}{3}" f <regexp> matches? ] unit-test
-[ f ] [ "ABC" "\\p{Lower}{3}" f <regexp> matches? ] unit-test
-[ t ] [ "ABC" "\\p{Upper}{3}" f <regexp> matches? ] unit-test
-[ f ] [ "abc" "\\p{Upper}{3}" f <regexp> matches? ] unit-test
-
-[ f ] [ "abc" "[\\p{Upper}]{3}" f <regexp> matches? ] unit-test
-[ t ] [ "ABC" "[\\p{Upper}]{3}" f <regexp> matches? ] unit-test
-
-[ t ] [ "" "\\Q\\E" f <regexp> matches? ] unit-test
-[ f ] [ "a" "\\Q\\E" f <regexp> matches? ] unit-test
-[ t ] [ "|*+" "\\Q|*+\\E" f <regexp> matches? ] unit-test
-[ f ] [ "abc" "\\Q|*+\\E" f <regexp> matches? ] unit-test
-
-[ t ] [ "S" "\\0123" f <regexp> matches? ] unit-test
-[ t ] [ "SXY" "\\0123XY" f <regexp> matches? ] unit-test
-[ t ] [ "x" "\\x78" f <regexp> matches? ] unit-test
-[ f ] [ "y" "\\x78" f <regexp> matches? ] unit-test
-[ t ] [ "x" "\\u000078" f <regexp> matches? ] unit-test
-[ f ] [ "y" "\\u000078" f <regexp> matches? ] unit-test
-
-[ t ] [ "ab" "a+b" f <regexp> matches? ] unit-test
-[ f ] [ "b" "a+b" f <regexp> matches? ] unit-test
-[ t ] [ "aab" "a+b" f <regexp> matches? ] unit-test
-[ f ] [ "abb" "a+b" f <regexp> matches? ] unit-test
-
-[ t ] [ "abbbb" "ab*" f <regexp> matches? ] unit-test
-[ t ] [ "a" "ab*" f <regexp> matches? ] unit-test
-[ f ] [ "abab" "ab*" f <regexp> matches? ] unit-test
-
-[ f ] [ "x" "\\." f <regexp> matches? ] unit-test
-[ t ] [ "." "\\." f <regexp> matches? ] unit-test
-
-[ t ] [ "aaaab" "a+ab" f <regexp> matches? ] unit-test
-[ f ] [ "aaaxb" "a+ab" f <regexp> matches? ] unit-test
-[ t ] [ "aaacb" "a+cb" f <regexp> matches? ] unit-test
-[ f ] [ "aaaab" "a++ab" f <regexp> matches? ] unit-test
-[ t ] [ "aaacb" "a++cb" f <regexp> matches? ] unit-test
-
-[ 3 ] [ "aaacb" "a*" f <regexp> match-head ] unit-test
-[ 1 ] [ "aaacb" "a+?" f <regexp> match-head ] unit-test
-[ 2 ] [ "aaacb" "aa?" f <regexp> match-head ] unit-test
-[ 1 ] [ "aaacb" "aa??" f <regexp> match-head ] unit-test
-[ 3 ] [ "aacb" "aa?c" f <regexp> match-head ] unit-test
-[ 3 ] [ "aacb" "aa??c" f <regexp> match-head ] unit-test
-
-[ t ] [ "aaa" "AAA" t <regexp> matches? ] unit-test
-[ f ] [ "aax" "AAA" t <regexp> matches? ] unit-test
-[ t ] [ "aaa" "A*" t <regexp> matches? ] unit-test
-[ f ] [ "aaba" "A*" t <regexp> matches? ] unit-test
-[ t ] [ "b" "[AB]" t <regexp> matches? ] unit-test
-[ f ] [ "c" "[AB]" t <regexp> matches? ] unit-test
-[ t ] [ "c" "[A-Z]" t <regexp> matches? ] unit-test
-[ f ] [ "3" "[A-Z]" t <regexp> matches? ] unit-test
-
-[ ] [
- "(0[lL]?|[1-9]\\d{0,9}(\\d{0,9}[lL])?|0[xX]\\p{XDigit}{1,8}(\\p{XDigit}{0,8}[lL])?|0[0-7]{1,11}([0-7]{0,11}[lL])?|([0-9]+\\.[0-9]*|\\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?|[0-9]+([eE][+-]?[0-9]+[fFdD]?|([eE][+-]?[0-9]+)?[fFdD]))"
- f <regexp> drop
-] unit-test
-
-[ t ] [ "fxxbar" "(?!foo).{3}bar" f <regexp> matches? ] unit-test
-[ f ] [ "foobar" "(?!foo).{3}bar" f <regexp> matches? ] unit-test
-
-[ 3 ] [ "foobar" "foo(?=bar)" f <regexp> match-head ] unit-test
-[ f ] [ "foobxr" "foo(?=bar)" f <regexp> match-head ] unit-test
-
-[ f ] [ "foobxr" "foo\\z" f <regexp> match-head ] unit-test
-[ 3 ] [ "foo" "foo\\z" f <regexp> match-head ] unit-test
-
-[ 3 ] [ "foo bar" "foo\\b" f <regexp> match-head ] unit-test
-[ f ] [ "fooxbar" "foo\\b" f <regexp> matches? ] unit-test
-[ t ] [ "foo" "foo\\b" f <regexp> matches? ] unit-test
-[ t ] [ "foo bar" "foo\\b bar" f <regexp> matches? ] unit-test
-[ f ] [ "fooxbar" "foo\\bxbar" f <regexp> matches? ] unit-test
-[ f ] [ "foo" "foo\\bbar" f <regexp> matches? ] unit-test
-
-[ f ] [ "foo bar" "foo\\B" f <regexp> matches? ] unit-test
-[ 3 ] [ "fooxbar" "foo\\B" f <regexp> match-head ] unit-test
-[ t ] [ "foo" "foo\\B" f <regexp> matches? ] unit-test
-[ f ] [ "foo bar" "foo\\B bar" f <regexp> matches? ] unit-test
-[ t ] [ "fooxbar" "foo\\Bxbar" f <regexp> matches? ] unit-test
-[ f ] [ "foo" "foo\\Bbar" f <regexp> matches? ] unit-test
-
-[ t ] [ "s@f" "[a-z.-]@[a-z]" f <regexp> matches? ] unit-test
-[ f ] [ "a" "[a-z.-]@[a-z]" f <regexp> matches? ] unit-test
-[ t ] [ ".o" "\\.[a-z]" f <regexp> matches? ] unit-test
-
-! Bug in parsing word
-[ t ] [
- "a"
- R' a'
- matches?
-] unit-test
+++ /dev/null
-USING: arrays combinators kernel lists math math.parser
-namespaces parser lexer parser-combinators
-parser-combinators.simple promises quotations sequences strings
-math.order assocs prettyprint.backend prettyprint.custom memoize
-ascii unicode.categories combinators.short-circuit
-accessors make io ;
-IN: parser-combinators.regexp
-
-<PRIVATE
-
-SYMBOL: ignore-case?
-
-: char=-quot ( ch -- quot )
- ignore-case? get
- [ ch>upper [ swap ch>upper = ] ] [ [ = ] ] if
- curry ;
-
-: char-between?-quot ( ch1 ch2 -- quot )
- ignore-case? get
- [ [ ch>upper ] bi@ [ [ ch>upper ] 2dip between? ] ]
- [ [ between? ] ]
- if 2curry ;
-
-: <@literal ( parser obj -- action ) [ nip ] curry <@ ;
-
-: <@delay ( parser quot -- action ) [ curry ] curry <@ ;
-
-PRIVATE>
-
-: ascii? ( n -- ? )
- 0 HEX: 7f between? ;
-
-: octal-digit? ( n -- ? )
- CHAR: 0 CHAR: 7 between? ;
-
-: decimal-digit? ( n -- ? )
- CHAR: 0 CHAR: 9 between? ;
-
-: hex-digit? ( n -- ? )
- dup decimal-digit?
- over CHAR: a CHAR: f between? or
- swap CHAR: A CHAR: F between? or ;
-
-: control-char? ( n -- ? )
- dup 0 HEX: 1f between?
- swap HEX: 7f = or ;
-
-: punct? ( n -- ? )
- "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" member? ;
-
-: c-identifier-char? ( ch -- ? )
- dup alpha? swap CHAR: _ = or ;
-
-: java-blank? ( n -- ? )
- {
- CHAR: \s
- CHAR: \t CHAR: \n CHAR: \r
- HEX: c HEX: 7 HEX: 1b
- } member? ;
-
-: java-printable? ( n -- ? )
- dup alpha? swap punct? or ;
-
-: 'ordinary-char' ( -- parser )
- [ "\\^*+?|(){}[$" member? not ] satisfy
- [ char=-quot ] <@ ;
-
-: 'octal-digit' ( -- parser ) [ octal-digit? ] satisfy ;
-
-: 'octal' ( -- parser )
- "0" token 'octal-digit' 1 3 from-m-to-n &>
- [ oct> ] <@ ;
-
-: 'hex-digit' ( -- parser ) [ hex-digit? ] satisfy ;
-
-: 'hex' ( -- parser )
- "x" token 'hex-digit' 2 exactly-n &>
- "u" token 'hex-digit' 6 exactly-n &> <|>
- [ hex> ] <@ ;
-
-: satisfy-tokens ( assoc -- parser )
- [ [ token ] dip <@literal ] { } assoc>map <or-parser> ;
-
-: 'simple-escape-char' ( -- parser )
- {
- { "\\" CHAR: \\ }
- { "t" CHAR: \t }
- { "n" CHAR: \n }
- { "r" CHAR: \r }
- { "f" HEX: c }
- { "a" HEX: 7 }
- { "e" HEX: 1b }
- } [ char=-quot ] assoc-map satisfy-tokens ;
-
-: 'predefined-char-class' ( -- parser )
- {
- { "d" [ digit? ] }
- { "D" [ digit? not ] }
- { "s" [ java-blank? ] }
- { "S" [ java-blank? not ] }
- { "w" [ c-identifier-char? ] }
- { "W" [ c-identifier-char? not ] }
- } satisfy-tokens ;
-
-: 'posix-character-class' ( -- parser )
- {
- { "Lower" [ letter? ] }
- { "Upper" [ LETTER? ] }
- { "ASCII" [ ascii? ] }
- { "Alpha" [ Letter? ] }
- { "Digit" [ digit? ] }
- { "Alnum" [ alpha? ] }
- { "Punct" [ punct? ] }
- { "Graph" [ java-printable? ] }
- { "Print" [ java-printable? ] }
- { "Blank" [ " \t" member? ] }
- { "Cntrl" [ control-char? ] }
- { "XDigit" [ hex-digit? ] }
- { "Space" [ java-blank? ] }
- } satisfy-tokens "p{" "}" surrounded-by ;
-
-: 'simple-escape' ( -- parser )
- 'octal'
- 'hex' <|>
- "c" token [ LETTER? ] satisfy &> <|>
- any-char-parser <|>
- [ char=-quot ] <@ ;
-
-: 'escape' ( -- parser )
- "\\" token
- 'simple-escape-char'
- 'predefined-char-class' <|>
- 'posix-character-class' <|>
- 'simple-escape' <|> &> ;
-
-: 'any-char' ( -- parser )
- "." token [ drop t ] <@literal ;
-
-: 'char' ( -- parser )
- 'any-char' 'escape' 'ordinary-char' <|> <|> [ satisfy ] <@ ;
-
-DEFER: 'regexp'
-
-TUPLE: group-result str ;
-
-C: <group-result> group-result
-
-: 'non-capturing-group' ( -- parser )
- "?:" token 'regexp' &> ;
-
-: 'positive-lookahead-group' ( -- parser )
- "?=" token 'regexp' &> [ ensure ] <@ ;
-
-: 'negative-lookahead-group' ( -- parser )
- "?!" token 'regexp' &> [ ensure-not ] <@ ;
-
-: 'simple-group' ( -- parser )
- 'regexp' [ [ <group-result> ] <@ ] <@ ;
-
-: 'group' ( -- parser )
- 'non-capturing-group'
- 'positive-lookahead-group'
- 'negative-lookahead-group'
- 'simple-group' <|> <|> <|>
- "(" ")" surrounded-by ;
-
-: 'range' ( -- parser )
- [ CHAR: ] = not ] satisfy "-" token <&
- [ CHAR: ] = not ] satisfy <&>
- [ first2 char-between?-quot ] <@ ;
-
-: 'character-class-term' ( -- parser )
- 'range'
- 'escape' <|>
- [ "\\]" member? not ] satisfy [ char=-quot ] <@ <|> ;
-
-: 'positive-character-class' ( -- parser )
- "]" token [ CHAR: ] = ] <@literal 'character-class-term' <*> <&:>
- 'character-class-term' <+> <|>
- [ [ 1|| ] curry ] <@ ;
-
-: 'negative-character-class' ( -- parser )
- "^" token 'positive-character-class' &>
- [ [ not ] append ] <@ ;
-
-: 'character-class' ( -- parser )
- 'negative-character-class' 'positive-character-class' <|>
- "[" "]" surrounded-by [ satisfy ] <@ ;
-
-: 'escaped-seq' ( -- parser )
- any-char-parser <*>
- [ ignore-case? get <token-parser> ] <@
- "\\Q" "\\E" surrounded-by ;
-
-: 'break' ( quot -- parser )
- satisfy ensure epsilon just <|> ;
-
-: 'break-escape' ( -- parser )
- "$" token [ "\r\n" member? ] 'break' <@literal
- "\\b" token [ blank? ] 'break' <@literal <|>
- "\\B" token [ blank? not ] 'break' <@literal <|>
- "\\z" token epsilon just <@literal <|> ;
-
-: 'simple' ( -- parser )
- 'escaped-seq'
- 'break-escape' <|>
- 'group' <|>
- 'character-class' <|>
- 'char' <|> ;
-
-: 'exactly-n' ( -- parser )
- 'integer' [ exactly-n ] <@delay ;
-
-: 'at-least-n' ( -- parser )
- 'integer' "," token <& [ at-least-n ] <@delay ;
-
-: 'at-most-n' ( -- parser )
- "," token 'integer' &> [ at-most-n ] <@delay ;
-
-: 'from-m-to-n' ( -- parser )
- 'integer' "," token <& 'integer' <&> [ first2 from-m-to-n ] <@delay ;
-
-: 'greedy-interval' ( -- parser )
- 'exactly-n' 'at-least-n' <|> 'at-most-n' <|> 'from-m-to-n' <|> ;
-
-: 'interval' ( -- parser )
- 'greedy-interval'
- 'greedy-interval' "?" token <& [ "reluctant {}" print ] <@ <|>
- 'greedy-interval' "+" token <& [ "possessive {}" print ] <@ <|>
- "{" "}" surrounded-by ;
-
-: 'repetition' ( -- parser )
- ! Posessive
- "*+" token [ <!*> ] <@literal
- "++" token [ <!+> ] <@literal <|>
- "?+" token [ <!?> ] <@literal <|>
- ! Reluctant
- "*?" token [ <(*)> ] <@literal <|>
- "+?" token [ <(+)> ] <@literal <|>
- "??" token [ <(?)> ] <@literal <|>
- ! Greedy
- "*" token [ <*> ] <@literal <|>
- "+" token [ <+> ] <@literal <|>
- "?" token [ <?> ] <@literal <|> ;
-
-: 'dummy' ( -- parser )
- epsilon [ ] <@literal ;
-
-MEMO: 'term' ( -- parser )
- 'simple'
- 'repetition' 'interval' 'dummy' <|> <|> <&> [ first2 call ] <@
- <!+> [ <and-parser> ] <@ ;
-
-LAZY: 'regexp' ( -- parser )
- 'term' "|" token nonempty-list-of [ <or-parser> ] <@ ;
-! "^" token 'term' "|" token nonempty-list-of [ <or-parser> ] <@
-! &> [ "caret" print ] <@ <|>
-! 'term' "|" token nonempty-list-of [ <or-parser> ] <@
-! "$" token <& [ "dollar" print ] <@ <|>
-! "^" token 'term' "|" token nonempty-list-of [ <or-parser> ] <@ &>
-! "$" token [ "caret dollar" print ] <@ <& <|> ;
-
-TUPLE: regexp source parser ignore-case? ;
-
-: <regexp> ( string ignore-case? -- regexp )
- [
- ignore-case? [
- dup 'regexp' just parse-1
- ] with-variable
- ] keep regexp boa ;
-
-: do-ignore-case ( string regexp -- string regexp )
- dup ignore-case?>> [ [ >upper ] dip ] when ;
-
-: matches? ( string regexp -- ? )
- do-ignore-case parser>> just parse nil? not ;
-
-: match-head ( string regexp -- end )
- do-ignore-case parser>> parse dup nil?
- [ drop f ] [ car unparsed>> from>> ] if ;
-
-! Literal syntax for regexps
-: parse-options ( string -- ? )
- #! Lame
- {
- { "" [ f ] }
- { "i" [ t ] }
- } case ;
-
-: parse-regexp ( accum end -- accum )
- lexer get dup skip-blank
- [ [ index-from dup 1+ swap ] 2keep swapd subseq swap ] change-lexer-column
- lexer get dup still-parsing-line?
- [ (parse-token) parse-options ] [ drop f ] if
- <regexp> parsed ;
-
-: R! CHAR: ! parse-regexp ; parsing
-: R" CHAR: " parse-regexp ; parsing
-: R# CHAR: # parse-regexp ; parsing
-: R' CHAR: ' parse-regexp ; parsing
-: R( CHAR: ) parse-regexp ; parsing
-: R/ CHAR: / parse-regexp ; parsing
-: R@ CHAR: @ parse-regexp ; parsing
-: R[ CHAR: ] parse-regexp ; parsing
-: R` CHAR: ` parse-regexp ; parsing
-: R{ CHAR: } parse-regexp ; parsing
-: R| CHAR: | parse-regexp ; parsing
-
-: find-regexp-syntax ( string -- prefix suffix )
- {
- { "R/ " "/" }
- { "R! " "!" }
- { "R\" " "\"" }
- { "R# " "#" }
- { "R' " "'" }
- { "R( " ")" }
- { "R@ " "@" }
- { "R[ " "]" }
- { "R` " "`" }
- { "R{ " "}" }
- { "R| " "|" }
- } swap [ subseq? not nip ] curry assoc-find drop ;
-
-M: regexp pprint*
- [
- dup source>>
- dup find-regexp-syntax swap % swap % %
- dup ignore-case?>> [ "i" % ] when
- ] "" make
- swap present-text ;
+++ /dev/null
-Regular expressions
+++ /dev/null
-parsing
-text
--- /dev/null
+Sam Anklesaria
\ No newline at end of file
--- /dev/null
+USING: peg.ebnf help.syntax help.markup strings ;
+IN: peg-lexer
+
+HELP: ON-BNF:
+{ $syntax "ON-BNF: word ... ;ON-BNF" }
+{ $description "Creates a parsing word using a parser for lexer control, adding the resulting ast to the stack. Parser syntax is as in " { $link POSTPONE: EBNF: } } ;
+
+HELP: create-bnf
+{ $values { "name" string } { "parser" parser } }
+{ $description "Runtime equivalent of " { $link POSTPONE: ON-BNF: } " also useful with manually constructed parsers." } ;
+
+HELP: factor
+{ $values { "input" string } { "ast" "a sequence of tokens" } }
+{ $description "Tokenizer that acts like standard factor lexer, separating tokens by whitespace." } ;
\ No newline at end of file
--- /dev/null
+USING: tools.test peg-lexer.test-parsers ;
+IN: peg-lexer.tests
+
+{ V{ "1234" "-end" } } [
+ test1 1234-end
+] unit-test
+
+{ V{ 1234 53 } } [
+ test2 12345
+] unit-test
+
+{ V{ "heavy" "duty" "testing" } } [
+ test3 heavy duty testing
+] unit-test
\ No newline at end of file
--- /dev/null
+USING: hashtables assocs sequences locals math accessors multiline delegate strings
+delegate.protocols kernel peg peg.ebnf lexer namespaces combinators parser words ;
+IN: peg-lexer
+
+TUPLE: lex-hash hash ;
+CONSULT: assoc-protocol lex-hash hash>> ;
+: <lex-hash> ( a -- lex-hash ) lex-hash boa ;
+
+: pos-or-0 ( neg? -- pos/0 ) dup 0 < [ drop 0 ] when ;
+
+:: prepare-pos ( v i -- c l )
+ [let | n [ i v head-slice ] |
+ v CHAR: \n n last-index -1 or 1+ -
+ n [ CHAR: \n = ] count 1+ ] ;
+
+: store-pos ( v a -- ) input swap at prepare-pos
+ lexer get [ (>>line) ] keep (>>column) ;
+
+M: lex-hash set-at swap {
+ { pos [ store-pos ] }
+ [ swap hash>> set-at ] } case ;
+
+:: at-pos ( t l c -- p ) t l head-slice [ length ] map sum l 1- + c + ;
+
+M: lex-hash at* swap {
+ { input [ drop lexer get text>> "\n" join t ] }
+ { pos [ drop lexer get [ text>> ] [ line>> 1- ] [ column>> 1+ ] tri at-pos t ] }
+ [ swap hash>> at* ] } case ;
+
+: with-global-lexer ( quot -- result )
+ [ f lrstack set
+ V{ } clone error-stack set H{ } clone \ heads set
+ H{ } clone \ packrat set ] f make-assoc <lex-hash>
+ swap bind ; inline
+
+: parse* ( parser -- ast ) compile
+ [ execute [ error-stack get first throw ] unless* ] with-global-lexer
+ ast>> ;
+
+: create-bnf ( name parser -- ) reset-tokenizer [ lexer get skip-blank parse* parsed ] curry
+ define word make-parsing ;
+
+: ON-BNF: CREATE-WORD reset-tokenizer ";ON-BNF" parse-multiline-string parse-ebnf
+ main swap at create-bnf ; parsing
+
+! Tokenizer like standard factor lexer
+EBNF: factor
+space = " " | "\n" | "\t"
+spaces = space* => [[ drop ignore ]]
+chunk = (!(space) .)+ => [[ >string ]]
+expr = spaces chunk
+;EBNF
\ No newline at end of file
--- /dev/null
+Use peg to write parsing words
--- /dev/null
+reflection
\ No newline at end of file
--- /dev/null
+USING: peg-lexer math.parser strings ;
+IN: peg-lexer.test-parsers
+
+ON-BNF: test1
+ num = [1-4]* => [[ >string ]]
+ expr = num ( "-end" | "-done" )
+;ON-BNF
+
+ON-BNF: test2
+ num = [1-4]* => [[ >string string>number ]]
+ expr= num [5-9]
+;ON-BNF
+
+ON-BNF: test3
+ tokenizer = <foreign factor>
+ expr= "heavy" "duty" "testing"
+;ON-BNF
\ No newline at end of file
--- /dev/null
+Doug Coleman
--- /dev/null
+! Copyright (C) 2009 Doug Coleman.
+! See http://factorcode.org/license.txt for BSD license.
+USING: assocs help.markup help.syntax kernel urls alarms calendar ;
+IN: site-watcher
+
+HELP: run-site-watcher
+{ $description "Starts the site-watcher on the assoc stored in " { $link sites } "." } ;
+
+HELP: running-site-watcher
+{ $var-description "A symbol storing the alarm of a running site-watcher if started with the " { $link run-site-watcher } " word. To prevent multiple site-watchers from running, this variable is checked before allowing another site-watcher to start." } ;
+
+HELP: site-watcher-from
+{ $var-description "The email address from which site-watcher sends emails." } ;
+
+HELP: sites
+{ $var-description "A symbol storing an assoc of URLs, data about a site, and who to notify if a site goes down." } ;
+
+HELP: watch-site
+{ $values
+ { "emails" "a string containing an email address, or an array of such" }
+ { "url" url }
+}
+{ $description "Adds a new site to the watch assoc stored in " { $link sites } ", or adds email addresses to an already watched site." } ;
+
+HELP: watch-sites
+{ $values
+ { "assoc" assoc }
+ { "alarm" alarm }
+}
+{ $description "Runs the site-watcher on the input assoc and returns the alarm that times the site check loop. This alarm may be turned off with " { $link cancel-alarm } ", thus stopping the site-watcher." } ;
+
+HELP: site-watcher-frequency
+{ $var-description "A " { $link duration } " specifying how long to wait between checking sites." } ;
+
+HELP: unwatch-site
+{ $values
+ { "emails" "a string containing an email, or an array of such" }
+ { "url" url }
+}
+{ $description "Removes an email address from being notified when a site's goes down. If this email was the last one watching the site, removes the site as well." } ;
+
+HELP: delete-site
+{ $values
+ { "url" url }
+}
+{ $description "Removes a watched site from the " { $link sites } " assoc." } ;
+
+ARTICLE: "site-watcher" "Site watcher"
+"The " { $vocab-link "site-watcher" } " vocabulary monitors websites and sends email when a site goes down or comes up." $nl
+"To monitor a site:"
+{ $subsection watch-site }
+"To stop email addresses from being notified if a site's status changes:"
+{ $subsection unwatch-site }
+"To stop monitoring a site for all email addresses:"
+{ $subsection delete-site }
+"To run site-watcher using the sites variable:"
+{ $subsection run-site-watcher }
+;
+
+ABOUT: "site-watcher"
--- /dev/null
+! Copyright (C) 2009 Doug Coleman.
+! See http://factorcode.org/license.txt for BSD license.
+USING: accessors alarms assocs calendar combinators
+continuations fry http.client io.streams.string kernel init
+namespaces prettyprint smtp arrays sequences math math.parser
+strings sets ;
+IN: site-watcher
+
+SYMBOL: sites
+
+SYMBOL: site-watcher-from
+
+sites [ H{ } clone ] initialize
+
+TUPLE: watching emails url last-up up? send-email? error ;
+
+<PRIVATE
+
+: ?1array ( array/object -- array )
+ dup array? [ 1array ] unless ; inline
+
+: <watching> ( emails url -- watching )
+ watching new
+ swap >>url
+ swap ?1array >>emails
+ now >>last-up
+ t >>up? ;
+
+ERROR: not-watching-site url status ;
+
+: set-site-flags ( watching new-up? -- watching )
+ [ over up?>> = [ t >>send-email? ] unless ] keep >>up? ;
+
+: site-bad ( watching error -- )
+ >>error f set-site-flags drop ;
+
+: site-good ( watching -- )
+ f >>error
+ t set-site-flags
+ now >>last-up drop ;
+
+: check-sites ( assoc -- )
+ [
+ swap '[ _ http-get 2drop site-good ] [ site-bad ] recover
+ ] assoc-each ;
+
+: site-up-email ( email watching -- email )
+ last-up>> now swap time- duration>minutes 60 /mod
+ [ >integer number>string ] bi@
+ [ " hours, " append ] [ " minutes" append ] bi* append
+ "Site was down for (at least): " prepend >>body ;
+
+: ?unparse ( string/object -- string )
+ dup string? [ unparse ] unless ; inline
+
+: site-down-email ( email watching -- email )
+ error>> ?unparse >>body ;
+
+: send-report ( watching -- )
+ [ <email> ] dip
+ {
+ [ emails>> >>to ]
+ [ drop site-watcher-from get "factor.site.watcher@gmail.com" or >>from ]
+ [ dup up?>> [ site-up-email ] [ site-down-email ] if ]
+ [ [ url>> ] [ up?>> "up" "down" ? ] bi " is " glue >>subject ]
+ [ f >>send-email? drop ]
+ } cleave send-email ;
+
+: report-sites ( assoc -- )
+ [ nip send-email?>> ] assoc-filter
+ [ nip send-report ] assoc-each ;
+
+PRIVATE>
+
+SYMBOL: site-watcher-frequency
+site-watcher-frequency [ 5 minutes ] initialize
+
+: watch-sites ( assoc -- alarm )
+ '[
+ _ [ check-sites ] [ report-sites ] bi
+ ] site-watcher-frequency get every ;
+
+: watch-site ( emails url -- )
+ sites get ?at [
+ [ [ ?1array ] dip append prune ] change-emails drop
+ ] [
+ <watching> dup url>> sites get set-at
+ ] if ;
+
+: delete-site ( url -- )
+ sites get delete-at ;
+
+: unwatch-site ( emails url -- )
+ [ ?1array ] dip
+ sites get ?at [
+ [ diff ] change-emails dup emails>> empty? [
+ url>> delete-site
+ ] [
+ drop
+ ] if
+ ] [
+ nip delete-site
+ ] if ;
+
+SYMBOL: running-site-watcher
+
+: run-site-watcher ( -- )
+ running-site-watcher get-global [
+ sites get-global watch-sites running-site-watcher set-global
+ ] unless ;
+
+[ f running-site-watcher set-global ] "site-watcher" add-init-hook
+
+MAIN: run-site-watcher
-! Copyright (C) 2007, 2008 Slava Pestov.
+! Copyright (C) 2007, 2009 Slava Pestov.
! See http://factorcode.org/license.txt for BSD license.
USING: arrays hashtables help.markup help.stylesheet io
io.styles kernel math models namespaces sequences ui ui.gadgets
ui.gadgets.books ui.gadgets.panes ui.gestures ui.pens.gradient
-parser accessors colors ;
+parser accessors colors fry ;
IN: slides
CONSTANT: stylesheet
} set-gestures
: slides-window ( slides -- )
- [ <slides> "Slides" open-window ] with-ui ;
+ '[ _ <slides> "Slides" open-window ] with-ui ;
--- /dev/null
+Alex Chapman
+Daniel Ehrenberg
--- /dev/null
+Alex Chapman
+Daniel Ehrenberg
--- /dev/null
+USING: help.syntax help.markup assocs ;
+IN: trees.avl
+
+HELP: AVL{
+{ $syntax "AVL{ { key value }... }" }
+{ $values { "key" "a key" } { "value" "a value" } }
+{ $description "Literal syntax for an AVL tree." } ;
+
+HELP: <avl>
+{ $values { "tree" avl } }
+{ $description "Creates an empty AVL tree" } ;
+
+HELP: >avl
+{ $values { "assoc" assoc } { "avl" avl } }
+{ $description "Converts any " { $link assoc } " into an AVL tree." } ;
+
+HELP: avl
+{ $class-description "This is the class for AVL trees. These conform to the assoc protocol and have efficient (logarithmic time) storage and retrieval operations." } ;
+
+ARTICLE: "trees.avl" "AVL trees"
+"This is a library for AVL trees, with logarithmic time storage and retrieval operations. These trees conform to the assoc protocol."
+{ $subsection avl }
+{ $subsection <avl> }
+{ $subsection >avl }
+{ $subsection POSTPONE: AVL{ } ;
+
+ABOUT: "trees.avl"
--- /dev/null
+USING: kernel tools.test trees trees.avl math random sequences
+assocs accessors ;
+IN: trees.avl.tests
+
+[ "key1" 0 "key2" 0 ] [
+ T{ avl-node f "key1" f f T{ avl-node f "key2" f f 1 } 2 }
+ [ single-rotate ] go-left
+ [ left>> dup key>> swap balance>> ] keep
+ dup key>> swap balance>>
+] unit-test
+
+[ "key1" 0 "key2" 0 ] [
+ T{ avl-node f "key1" f f T{ avl-node f "key2" f f f 1 } 2 }
+ [ select-rotate ] go-left
+ [ left>> dup key>> swap balance>> ] keep
+ dup key>> swap balance>>
+] unit-test
+
+[ "key1" 0 "key2" 0 ] [
+ T{ avl-node f "key1" f T{ avl-node f "key2" f f f -1 } f -2 }
+ [ single-rotate ] go-right
+ [ right>> dup key>> swap balance>> ] keep
+ dup key>> swap balance>>
+] unit-test
+
+[ "key1" 0 "key2" 0 ] [
+ T{ avl-node f "key1" f T{ avl-node f "key2" f f f -1 } f -2 }
+ [ select-rotate ] go-right
+ [ right>> dup key>> swap balance>> ] keep
+ dup key>> swap balance>>
+] unit-test
+
+[ "key1" -1 "key2" 0 "key3" 0 ]
+[ T{ avl-node f "key1" f f
+ T{ avl-node f "key2" f
+ T{ avl-node f "key3" f f f 1 } f -1 } 2 }
+ [ double-rotate ] go-left
+ [ left>> dup key>> swap balance>> ] keep
+ [ right>> dup key>> swap balance>> ] keep
+ dup key>> swap balance>> ] unit-test
+[ "key1" 0 "key2" 0 "key3" 0 ]
+[ T{ avl-node f "key1" f f
+ T{ avl-node f "key2" f
+ T{ avl-node f "key3" f f f 0 } f -1 } 2 }
+ [ double-rotate ] go-left
+ [ left>> dup key>> swap balance>> ] keep
+ [ right>> dup key>> swap balance>> ] keep
+ dup key>> swap balance>> ] unit-test
+[ "key1" 0 "key2" 1 "key3" 0 ]
+[ T{ avl-node f "key1" f f
+ T{ avl-node f "key2" f
+ T{ avl-node f "key3" f f f -1 } f -1 } 2 }
+ [ double-rotate ] go-left
+ [ left>> dup key>> swap balance>> ] keep
+ [ right>> dup key>> swap balance>> ] keep
+ dup key>> swap balance>> ] unit-test
+
+[ "key1" 1 "key2" 0 "key3" 0 ]
+[ T{ avl-node f "key1" f
+ T{ avl-node f "key2" f f
+ T{ avl-node f "key3" f f f -1 } 1 } f -2 }
+ [ double-rotate ] go-right
+ [ right>> dup key>> swap balance>> ] keep
+ [ left>> dup key>> swap balance>> ] keep
+ dup key>> swap balance>> ] unit-test
+[ "key1" 0 "key2" 0 "key3" 0 ]
+[ T{ avl-node f "key1" f
+ T{ avl-node f "key2" f f
+ T{ avl-node f "key3" f f f 0 } 1 } f -2 }
+ [ double-rotate ] go-right
+ [ right>> dup key>> swap balance>> ] keep
+ [ left>> dup key>> swap balance>> ] keep
+ dup key>> swap balance>> ] unit-test
+[ "key1" 0 "key2" -1 "key3" 0 ]
+[ T{ avl-node f "key1" f
+ T{ avl-node f "key2" f f
+ T{ avl-node f "key3" f f f 1 } 1 } f -2 }
+ [ double-rotate ] go-right
+ [ right>> dup key>> swap balance>> ] keep
+ [ left>> dup key>> swap balance>> ] keep
+ dup key>> swap balance>> ] unit-test
+
+[ "eight" ] [
+ <avl> "seven" 7 pick set-at
+ "eight" 8 pick set-at "nine" 9 pick set-at
+ root>> value>>
+] unit-test
+
+[ "another eight" ] [ ! ERROR!
+ <avl> "seven" 7 pick set-at
+ "another eight" 8 pick set-at 8 swap at
+] unit-test
+
+: test-tree ( -- tree )
+ AVL{
+ { 7 "seven" }
+ { 9 "nine" }
+ { 4 "four" }
+ { 4 "replaced four" }
+ { 7 "replaced seven" }
+ } clone ;
+
+! test set-at, at, at*
+[ t ] [ test-tree avl? ] unit-test
+[ "seven" ] [ <avl> "seven" 7 pick set-at 7 swap at ] unit-test
+[ "seven" t ] [ <avl> "seven" 7 pick set-at 7 swap at* ] unit-test
+[ f f ] [ <avl> "seven" 7 pick set-at 8 swap at* ] unit-test
+[ "seven" ] [ <avl> "seven" 7 pick set-at 7 swap at ] unit-test
+[ "replacement" ] [ <avl> "seven" 7 pick set-at "replacement" 7 pick set-at 7 swap at ] unit-test
+[ "nine" ] [ test-tree 9 swap at ] unit-test
+[ "replaced four" ] [ test-tree 4 swap at ] unit-test
+[ "replaced seven" ] [ test-tree 7 swap at ] unit-test
+
+! test delete-at--all errors!
+[ f ] [ test-tree 9 over delete-at 9 swap at ] unit-test
+[ "replaced seven" ] [ test-tree 9 over delete-at 7 swap at ] unit-test
+[ "nine" ] [ test-tree 7 over delete-at 4 over delete-at 9 swap at ] unit-test
--- /dev/null
+! Copyright (C) 2007 Alex Chapman
+! See http://factorcode.org/license.txt for BSD license.
+USING: combinators kernel generic math math.functions
+math.parser namespaces io sequences trees
+assocs parser accessors math.order prettyprint.custom ;
+IN: trees.avl
+
+TUPLE: avl < tree ;
+
+: <avl> ( -- tree )
+ avl new-tree ;
+
+TUPLE: avl-node < node balance ;
+
+: <avl-node> ( key value -- node )
+ avl-node new-node
+ 0 >>balance ;
+
+: increase-balance ( node amount -- )
+ swap [ + ] change-balance drop ;
+
+: rotate ( node -- node )
+ dup node+link dup node-link pick set-node+link
+ tuck set-node-link ;
+
+: single-rotate ( node -- node )
+ 0 over (>>balance) 0 over node+link
+ (>>balance) rotate ;
+
+: pick-balances ( a node -- balance balance )
+ balance>> {
+ { [ dup zero? ] [ 2drop 0 0 ] }
+ { [ over = ] [ neg 0 ] }
+ [ 0 swap ]
+ } cond ;
+
+: double-rotate ( node -- node )
+ [
+ node+link [
+ node-link current-side get neg
+ over pick-balances rot 0 swap (>>balance)
+ ] keep (>>balance)
+ ] keep swap >>balance
+ dup node+link [ rotate ] with-other-side
+ over set-node+link rotate ;
+
+: select-rotate ( node -- node )
+ dup node+link balance>> current-side get =
+ [ double-rotate ] [ single-rotate ] if ;
+
+: balance-insert ( node -- node taller? )
+ dup balance>> {
+ { [ dup zero? ] [ drop f ] }
+ { [ dup abs 2 = ]
+ [ sgn neg [ select-rotate ] with-side f ] }
+ { [ drop t ] [ t ] } ! balance is -1 or 1, tree is taller
+ } cond ;
+
+DEFER: avl-set
+
+: avl-insert ( value key node -- node taller? )
+ 2dup key>> before? left right ? [
+ [ node-link avl-set ] keep swap
+ [ tuck set-node-link ] dip
+ [ dup current-side get increase-balance balance-insert ]
+ [ f ] if
+ ] with-side ;
+
+: (avl-set) ( value key node -- node taller? )
+ 2dup key>> = [
+ -rot pick (>>key) over (>>value) f
+ ] [ avl-insert ] if ;
+
+: avl-set ( value key node -- node taller? )
+ [ (avl-set) ] [ swap <avl-node> t ] if* ;
+
+M: avl set-at ( value key node -- node )
+ [ avl-set drop ] change-root drop ;
+
+: delete-select-rotate ( node -- node shorter? )
+ dup node+link balance>> zero? [
+ current-side get neg over (>>balance)
+ current-side get over node+link (>>balance) rotate f
+ ] [
+ select-rotate t
+ ] if ;
+
+: rebalance-delete ( node -- node shorter? )
+ dup balance>> {
+ { [ dup zero? ] [ drop t ] }
+ { [ dup abs 2 = ] [ sgn neg [ delete-select-rotate ] with-side ] }
+ { [ drop t ] [ f ] } ! balance is -1 or 1, tree is not shorter
+ } cond ;
+
+: balance-delete ( node -- node shorter? )
+ current-side get over balance>> {
+ { [ dup zero? ] [ drop neg over (>>balance) f ] }
+ { [ dupd = ] [ drop 0 >>balance t ] }
+ [ dupd neg increase-balance rebalance-delete ]
+ } cond ;
+
+: avl-replace-with-extremity ( to-replace node -- node shorter? )
+ dup node-link [
+ swapd avl-replace-with-extremity [ over set-node-link ] dip
+ [ balance-delete ] [ f ] if
+ ] [
+ [ copy-node-contents drop ] keep node+link t
+ ] if* ;
+
+: replace-with-a-child ( node -- node shorter? )
+ #! assumes that node is not a leaf, otherwise will recurse forever
+ dup node-link [
+ dupd [ avl-replace-with-extremity ] with-other-side
+ [ over set-node-link ] dip [ balance-delete ] [ f ] if
+ ] [
+ [ replace-with-a-child ] with-other-side
+ ] if* ;
+
+: avl-delete-node ( node -- node shorter? )
+ #! delete this node, returning its replacement, and whether this subtree is
+ #! shorter as a result
+ dup leaf? [
+ drop f t
+ ] [
+ left [ replace-with-a-child ] with-side
+ ] if ;
+
+GENERIC: avl-delete ( key node -- node shorter? deleted? )
+
+M: f avl-delete ( key f -- f f f ) nip f f ;
+
+: (avl-delete) ( key node -- node shorter? deleted? )
+ tuck node-link avl-delete [
+ [ over set-node-link ] dip [ balance-delete ] [ f ] if
+ ] dip ;
+
+M: avl-node avl-delete ( key node -- node shorter? deleted? )
+ 2dup key>> key-side dup zero? [
+ drop nip avl-delete-node t
+ ] [
+ [ (avl-delete) ] with-side
+ ] if ;
+
+M: avl delete-at ( key node -- )
+ [ avl-delete 2drop ] change-root drop ;
+
+M: avl new-assoc 2drop <avl> ;
+
+: >avl ( assoc -- avl )
+ T{ avl f f 0 } assoc-clone-like ;
+
+M: avl assoc-like
+ drop dup avl? [ >avl ] unless ;
+
+: AVL{
+ \ } [ >avl ] parse-literal ; parsing
+
+M: avl pprint-delims drop \ AVL{ \ } ;
--- /dev/null
+Balanced AVL trees
--- /dev/null
+collections
--- /dev/null
+Mackenzie Straight
+Daniel Ehrenberg
--- /dev/null
+USING: help.syntax help.markup assocs ;
+IN: trees.splay
+
+HELP: SPLAY{
+{ $syntax "SPLAY{ { key value }... }" }
+{ $values { "key" "a key" } { "value" "a value" } }
+{ $description "Literal syntax for an splay tree." } ;
+
+HELP: <splay>
+{ $values { "tree" splay } }
+{ $description "Creates an empty splay tree" } ;
+
+HELP: >splay
+{ $values { "assoc" assoc } { "tree" splay } }
+{ $description "Converts any " { $link assoc } " into an splay tree." } ;
+
+HELP: splay
+{ $class-description "This is the class for splay trees. Splay trees have amortized average-case logarithmic time storage and retrieval operations, and better complexity on more skewed lookup distributions, though in bad situations they can degrade to linear time, resembling a linked list. These conform to the assoc protocol." } ;
+
+ARTICLE: "trees.splay" "Splay trees"
+"This is a library for splay trees. Splay trees have amortized average-case logarithmic time storage and retrieval operations, and better complexity on more skewed lookup distributions, though in bad situations they can degrade to linear time, resembling a linked list. These trees conform to the assoc protocol."
+{ $subsection splay }
+{ $subsection <splay> }
+{ $subsection >splay }
+{ $subsection POSTPONE: SPLAY{ } ;
+
+ABOUT: "trees.splay"
--- /dev/null
+! Copyright (c) 2005 Mackenzie Straight.
+! See http://factorcode.org/license.txt for BSD license.
+USING: kernel tools.test trees.splay math namespaces assocs
+sequences random sets make grouping ;
+IN: trees.splay.tests
+
+: randomize-numeric-splay-tree ( splay-tree -- )
+ 100 [ drop 100 random swap at drop ] with each ;
+
+: make-numeric-splay-tree ( n -- splay-tree )
+ <splay> [ [ conjoin ] curry each ] keep ;
+
+[ t ] [
+ 100 make-numeric-splay-tree dup randomize-numeric-splay-tree
+ [ [ drop , ] assoc-each ] { } make [ < ] monotonic?
+] unit-test
+
+[ 10 ] [ 10 make-numeric-splay-tree keys length ] unit-test
+[ 10 ] [ 10 make-numeric-splay-tree values length ] unit-test
+
+[ f ] [ <splay> f 4 pick set-at 4 swap at ] unit-test
+
+! Ensure that f can be a value
+[ t ] [ <splay> f 4 pick set-at 4 swap key? ] unit-test
+
+[
+{ { 1 "a" } { 2 "b" } { 3 "c" } { 4 "d" } { 5 "e" } { 6 "f" } }
+] [
+{
+ { 4 "d" } { 5 "e" } { 6 "f" }
+ { 1 "a" } { 2 "b" } { 3 "c" }
+} >splay >alist
+] unit-test
--- /dev/null
+! Copyright (c) 2005 Mackenzie Straight.
+! See http://factorcode.org/license.txt for BSD license.
+USING: arrays kernel math namespaces sequences assocs parser
+trees generic math.order accessors prettyprint.custom ;
+IN: trees.splay
+
+TUPLE: splay < tree ;
+
+: <splay> ( -- tree )
+ \ splay new-tree ;
+
+: rotate-right ( node -- node )
+ dup left>>
+ [ right>> swap (>>left) ] 2keep
+ [ (>>right) ] keep ;
+
+: rotate-left ( node -- node )
+ dup right>>
+ [ left>> swap (>>right) ] 2keep
+ [ (>>left) ] keep ;
+
+: link-right ( left right key node -- left right key node )
+ swap [ [ swap (>>left) ] 2keep
+ nip dup left>> ] dip swap ;
+
+: link-left ( left right key node -- left right key node )
+ swap [ rot [ (>>right) ] 2keep
+ drop dup right>> swapd ] dip swap ;
+
+: cmp ( key node -- obj node -1/0/1 )
+ 2dup key>> key-side ;
+
+: lcmp ( key node -- obj node -1/0/1 )
+ 2dup left>> key>> key-side ;
+
+: rcmp ( key node -- obj node -1/0/1 )
+ 2dup right>> key>> key-side ;
+
+DEFER: (splay)
+
+: splay-left ( left right key node -- left right key node )
+ dup left>> [
+ lcmp 0 < [ rotate-right ] when
+ dup left>> [ link-right (splay) ] when
+ ] when ;
+
+: splay-right ( left right key node -- left right key node )
+ dup right>> [
+ rcmp 0 > [ rotate-left ] when
+ dup right>> [ link-left (splay) ] when
+ ] when ;
+
+: (splay) ( left right key node -- left right key node )
+ cmp dup 0 <
+ [ drop splay-left ] [ 0 > [ splay-right ] when ] if ;
+
+: assemble ( head left right node -- root )
+ [ right>> swap (>>left) ] keep
+ [ left>> swap (>>right) ] keep
+ [ swap left>> swap (>>right) ] 2keep
+ [ swap right>> swap (>>left) ] keep ;
+
+: splay-at ( key node -- node )
+ [ T{ node } clone dup dup ] 2dip
+ (splay) nip assemble ;
+
+: splay ( key tree -- )
+ [ root>> splay-at ] keep (>>root) ;
+
+: splay-split ( key tree -- node node )
+ 2dup splay root>> cmp 0 < [
+ nip dup left>> swap f over (>>left)
+ ] [
+ nip dup right>> swap f over (>>right) swap
+ ] if ;
+
+: get-splay ( key tree -- node ? )
+ 2dup splay root>> cmp 0 = [
+ nip t
+ ] [
+ 2drop f f
+ ] if ;
+
+: get-largest ( node -- node )
+ dup [ dup right>> [ nip get-largest ] when* ] when ;
+
+: splay-largest ( node -- node )
+ dup [ dup get-largest key>> swap splay-at ] when ;
+
+: splay-join ( n2 n1 -- node )
+ splay-largest [
+ [ (>>right) ] keep
+ ] [
+ drop f
+ ] if* ;
+
+: remove-splay ( key tree -- )
+ tuck get-splay nip [
+ dup dec-count
+ dup right>> swap left>> splay-join
+ swap (>>root)
+ ] [ drop ] if* ;
+
+: set-splay ( value key tree -- )
+ 2dup get-splay [ 2nip (>>value) ] [
+ drop dup inc-count
+ 2dup splay-split rot
+ [ [ swapd ] dip node boa ] dip (>>root)
+ ] if ;
+
+: new-root ( value key tree -- )
+ 1 >>count
+ [ swap <node> ] dip (>>root) ;
+
+M: splay set-at ( value key tree -- )
+ dup root>> [ set-splay ] [ new-root ] if ;
+
+M: splay at* ( key tree -- value ? )
+ dup root>> [
+ get-splay [ dup [ value>> ] when ] dip
+ ] [
+ 2drop f f
+ ] if ;
+
+M: splay delete-at ( key tree -- )
+ dup root>> [ remove-splay ] [ 2drop ] if ;
+
+M: splay new-assoc
+ 2drop <splay> ;
+
+: >splay ( assoc -- tree )
+ T{ splay f f 0 } assoc-clone-like ;
+
+: SPLAY{
+ \ } [ >splay ] parse-literal ; parsing
+
+M: splay assoc-like
+ drop dup splay? [ >splay ] unless ;
+
+M: splay pprint-delims drop \ SPLAY{ \ } ;
--- /dev/null
+Splay trees
--- /dev/null
+collections
+trees
--- /dev/null
+Binary search trees
--- /dev/null
+collections
+trees
--- /dev/null
+USING: help.syntax help.markup assocs ;
+IN: trees
+
+HELP: TREE{
+{ $syntax "TREE{ { key value }... }" }
+{ $values { "key" "a key" } { "value" "a value" } }
+{ $description "Literal syntax for an unbalanced tree." } ;
+
+HELP: <tree>
+{ $values { "tree" tree } }
+{ $description "Creates an empty unbalanced binary tree" } ;
+
+HELP: >tree
+{ $values { "assoc" assoc } { "tree" tree } }
+{ $description "Converts any " { $link assoc } " into an unbalanced binary tree." } ;
+
+HELP: tree
+{ $class-description "This is the class for unbalanced binary search trees. It is not usually intended to be used directly but rather as a basis for other trees." } ;
+
+ARTICLE: "trees" "Binary search trees"
+"This is a library for unbalanced binary search trees. It is not intended to be used directly in most situations but rather as a base class for new trees, because performance can degrade to linear time storage/retrieval by the number of keys. These binary search trees conform to the assoc protocol."
+{ $subsection tree }
+{ $subsection <tree> }
+{ $subsection >tree }
+{ $subsection POSTPONE: TREE{ } ;
+
+ABOUT: "trees"
--- /dev/null
+USING: trees assocs tools.test kernel sequences ;
+IN: trees.tests
+
+: test-tree ( -- tree )
+ TREE{
+ { 7 "seven" }
+ { 9 "nine" }
+ { 4 "four" }
+ { 4 "replaced four" }
+ { 7 "replaced seven" }
+ } clone ;
+
+! test set-at, at, at*
+[ "seven" ] [ <tree> "seven" 7 pick set-at 7 swap at ] unit-test
+[ "seven" t ] [ <tree> "seven" 7 pick set-at 7 swap at* ] unit-test
+[ f f ] [ <tree> "seven" 7 pick set-at 8 swap at* ] unit-test
+[ "seven" ] [ <tree> "seven" 7 pick set-at 7 swap at ] unit-test
+[ "replacement" ] [ <tree> "seven" 7 pick set-at "replacement" 7 pick set-at 7 swap at ] unit-test
+[ "replaced four" ] [ test-tree 4 swap at ] unit-test
+[ "nine" ] [ test-tree 9 swap at ] unit-test
+
+! test delete-at
+[ f ] [ test-tree 9 over delete-at 9 swap at ] unit-test
+[ "replaced seven" ] [ test-tree 9 over delete-at 7 swap at ] unit-test
+[ "replaced four" ] [ test-tree 9 over delete-at 4 swap at ] unit-test
+[ "nine" "replaced four" ] [ test-tree 7 over delete-at 9 over at 4 rot at ] unit-test
+[ "nine" ] [ test-tree 7 over delete-at 4 over delete-at 9 swap at ] unit-test
--- /dev/null
+! Copyright (C) 2007 Alex Chapman
+! See http://factorcode.org/license.txt for BSD license.
+USING: kernel generic math sequences arrays io namespaces
+prettyprint.private kernel.private assocs random combinators
+parser math.order accessors deques make prettyprint.custom ;
+IN: trees
+
+TUPLE: tree root count ;
+
+: new-tree ( class -- tree )
+ new
+ f >>root
+ 0 >>count ; inline
+
+: <tree> ( -- tree )
+ tree new-tree ;
+
+INSTANCE: tree assoc
+
+TUPLE: node key value left right ;
+
+: new-node ( key value class -- node )
+ new
+ swap >>value
+ swap >>key ;
+
+: <node> ( key value -- node )
+ node new-node ;
+
+SYMBOL: current-side
+
+CONSTANT: left -1
+CONSTANT: right 1
+
+: key-side ( k1 k2 -- n )
+ <=> {
+ { +lt+ [ -1 ] }
+ { +eq+ [ 0 ] }
+ { +gt+ [ 1 ] }
+ } case ;
+
+: go-left? ( -- ? ) current-side get left eq? ;
+
+: inc-count ( tree -- ) [ 1+ ] change-count drop ;
+
+: dec-count ( tree -- ) [ 1- ] change-count drop ;
+
+: node-link@ ( node ? -- node )
+ go-left? xor [ left>> ] [ right>> ] if ;
+
+: set-node-link@ ( left parent ? -- )
+ go-left? xor [ (>>left) ] [ (>>right) ] if ;
+
+: node-link ( node -- child ) f node-link@ ;
+
+: set-node-link ( child node -- ) f set-node-link@ ;
+
+: node+link ( node -- child ) t node-link@ ;
+
+: set-node+link ( child node -- ) t set-node-link@ ;
+
+: with-side ( side quot -- )
+ [ swap current-side set call ] with-scope ; inline
+
+: with-other-side ( quot -- )
+ current-side get neg swap with-side ; inline
+
+: go-left ( quot -- ) left swap with-side ; inline
+
+: go-right ( quot -- ) right swap with-side ; inline
+
+: leaf? ( node -- ? )
+ [ left>> ] [ right>> ] bi or not ;
+
+: random-side ( -- side )
+ left right 2array random ;
+
+: choose-branch ( key node -- key node-left/right )
+ 2dup key>> key-side [ node-link ] with-side ;
+
+: node-at* ( key node -- value ? )
+ [
+ 2dup key>> = [
+ nip value>> t
+ ] [
+ choose-branch node-at*
+ ] if
+ ] [ drop f f ] if* ;
+
+M: tree at* ( key tree -- value ? )
+ root>> node-at* ;
+
+: node-set ( value key node -- node )
+ 2dup key>> key-side dup 0 eq? [
+ drop nip swap >>value
+ ] [
+ [
+ [ node-link [ node-set ] [ swap <node> ] if* ] keep
+ [ set-node-link ] keep
+ ] with-side
+ ] if ;
+
+M: tree set-at ( value key tree -- )
+ [ [ node-set ] [ swap <node> ] if* ] change-root drop ;
+
+: valid-node? ( node -- ? )
+ [
+ dup dup left>> [ key>> swap key>> before? ] when*
+ [
+ dup dup right>> [ key>> swap key>> after? ] when* ] dip and swap
+ dup left>> valid-node? swap right>> valid-node? and and
+ ] [ t ] if* ;
+
+: valid-tree? ( tree -- ? ) root>> valid-node? ;
+
+: (node>alist) ( node -- )
+ [
+ [ left>> (node>alist) ]
+ [ [ key>> ] [ value>> ] bi 2array , ]
+ [ right>> (node>alist) ]
+ tri
+ ] when* ;
+
+M: tree >alist [ root>> (node>alist) ] { } make ;
+
+M: tree clear-assoc
+ 0 >>count
+ f >>root drop ;
+
+: copy-node-contents ( new old -- new )
+ [ key>> >>key ]
+ [ value>> >>value ] bi ;
+
+! Deletion
+DEFER: delete-node
+
+: (prune-extremity) ( parent node -- new-extremity )
+ dup node-link [
+ rot drop (prune-extremity)
+ ] [
+ tuck delete-node swap set-node-link
+ ] if* ;
+
+: prune-extremity ( node -- new-extremity )
+ #! remove and return the leftmost or rightmost child of this node.
+ #! assumes at least one child
+ dup node-link (prune-extremity) ;
+
+: replace-with-child ( node -- node )
+ dup node-link copy-node-contents dup node-link delete-node over set-node-link ;
+
+: replace-with-extremity ( node -- node )
+ dup node-link dup node+link [
+ ! predecessor/successor is not the immediate child
+ [ prune-extremity ] with-other-side copy-node-contents
+ ] [
+ ! node-link is the predecessor/successor
+ drop replace-with-child
+ ] if ;
+
+: delete-node-with-two-children ( node -- node )
+ #! randomised to minimise tree unbalancing
+ random-side [ replace-with-extremity ] with-side ;
+
+: delete-node ( node -- node )
+ #! delete this node, returning its replacement
+ dup left>> [
+ dup right>> [
+ delete-node-with-two-children
+ ] [
+ left>> ! left but no right
+ ] if
+ ] [
+ dup right>> [
+ right>> ! right but not left
+ ] [
+ drop f ! no children
+ ] if
+ ] if ;
+
+: delete-bst-node ( key node -- node )
+ 2dup key>> key-side dup 0 eq? [
+ drop nip delete-node
+ ] [
+ [ tuck node-link delete-bst-node over set-node-link ] with-side
+ ] if ;
+
+M: tree delete-at
+ [ delete-bst-node ] change-root drop ;
+
+M: tree new-assoc
+ 2drop <tree> ;
+
+M: tree clone dup assoc-clone-like ;
+
+: >tree ( assoc -- tree )
+ T{ tree f f 0 } assoc-clone-like ;
+
+M: tree assoc-like drop dup tree? [ >tree ] unless ;
+
+: TREE{
+ \ } [ >tree ] parse-literal ; parsing
+
+M: tree assoc-size count>> ;
+M: tree pprint-delims drop \ TREE{ \ } ;
+M: tree >pprint-sequence >alist ;
+M: tree pprint-narrow? drop t ;
--- /dev/null
+USING: accessors ui ui.gadgets ui.gadgets.labels ui.gadgets.buttons ui.gadgets.packs locals sequences io.styles ;
+IN: ui.gadgets.alerts
+:: alert ( quot string -- ) <pile> { 10 10 } >>gap 1 >>align string <label> { "sans-serif" plain 18 } >>font { 200 100 } >>pref-dim add-gadget
+ "okay" [ close-window ] quot append <border-button> add-gadget "" open-window ;
\ No newline at end of file
--- /dev/null
+USING: accessors kernel fry math models ui.gadgets ui.gadgets.books ui.gadgets.buttons ;
+IN: ui.gadgets.book-extras
+: <book*> ( pages -- book ) 0 <model> <book> ;
+: |<< ( book -- ) 0 swap set-control-value ;
+: next ( book -- ) model>> [ 1 + ] change-model ;
+: prev ( book -- ) model>> [ 1 - ] change-model ;
+: (book-t) ( quot -- quot ) '[ : owner ( gadget -- book ) parent>> dup book? [ owner ] unless ; owner @ ] ;
+: <book-btn> ( label quot -- button ) (book-t) <button> ;
+: <book-bevel-btn> ( label quot -- button ) (book-t) <border-button> ;
+: >>> ( label -- button ) [ next ] <book-btn> ;
+: <<< ( label -- button ) [ prev ] <book-btn> ;
\ No newline at end of file
#! On Windows, white is { 253 253 253 } ?
[ 10 /i ] map ;
-: stride ( bitmap -- n ) width>> 3 * ;
-
: bitmap= ( bitmap1 bitmap2 -- ? )
- [
- dup [ [ height>> ] [ stride ] bi * ] [ array>> length ] bi = [
- [ [ array>> ] [ stride 4 align ] bi group ] [ stride ] bi
- '[ _ head twiddle ] map
- ] unless
- ] bi@ = ;
+ [ bitmap>> twiddle ] bi@ = ;
: check-rendering ( gadget -- )
screenshot
[ render-output set-global ]
[
- "resource:extra/ui/render/test/reference.bmp" load-image
+ "vocab:ui/render/test/reference.bmp" load-image
bitmap= "is perfect" "needs work" ?
"Your UI rendering " prepend
message-window
3array <grid>
{ 5 5 } >>gap
COLOR: blue <grid-lines> >>boundary
- add-gadget
- <gadget>
- { 14 14 } >>dim
- COLOR: black <checkmark-paint> >>interior
- COLOR: black <solid> >>boundary
- { 4 4 } <border>
add-gadget ;
: ui-render-test ( -- )
--- /dev/null
+USING: accessors sequences namespaces ui.render opengl fry kernel ;
+IN: ui.utils
+SYMBOLS: width height ;
+: store-dim ( gadget -- ) dim>> [ first width set ] [ second height set ] bi ;
+: with-dim ( gadget quot -- ) '[ _ store-dim @ ] with-scope ; inline
+: with-w/h ( gadget quot -- ) '[ origin get _ with-translation ] with-dim ; inline
\ No newline at end of file
TUPLE: irclog-app < dispatcher ;
-: irc-link ( -- string )
+: irc-link ( channel -- string )
gmt -7 hours convert-timezone >date<
[ unparse 2 tail ] 2dip
- "http://bespin.org/~nef/logs/concatenative/%02s.%02d.%02d"
+ "http://bespin.org/~nef/logs/%s/%02s.%02d.%02d"
sprintf ;
: <display-irclog-action> ( -- action )
<action>
- [ irc-link <redirect> ] >>display ;
+ [ "concatenative" irc-link <redirect> ] >>display ;
: <irclog-app> ( -- dispatcher )
irclog-app new-dispatcher
! LINKS, ETC
! ! !
-: pastebin-url ( -- url )
- URL" $pastebin/list" ;
+CONSTANT: pastebin-url URL" $pastebin/"
: paste-url ( id -- url )
"$pastebin/paste" >url swap "id" set-query-param ;
"id" value <paste> delete-tuples
"id" value f <annotation> delete-tuples
] with-transaction
- URL" $pastebin/list" <redirect>
+ pastebin-url <redirect>
] >>submit
<protected>
--- /dev/null
+Slava Pestov
\ No newline at end of file
--- /dev/null
+<?xml version='1.0' ?>
+
+<t:chloe xmlns:t="http://factorcode.org/chloe/1.0">
+
+<html>
+ <head>
+ <title>SiteWatcher</title>
+ </head>
+ <body>
+ <h1>SiteWatcher</h1>
+ <h2>It tells you if your web site goes down.</h2>
+ <table>
+ <t:bind-each t:name="sites">
+ <tr>
+ <td> <t:label t:name="url" /> </td>
+ <td> <t:button t:action="$site-watcher-app/remove" t:for="url">Remove</t:button> </td>
+ </tr>
+ </t:bind-each>
+ </table>
+ <p>
+ <t:button t:action="$site-watcher-app/check">Check now</t:button>
+ </p>
+ <hr />
+ <h3>Add a new site</h3>
+ <t:form t:action="$site-watcher-app/add">
+ <table>
+ <tr>
+ <th>URL:</th>
+ <td> <t:field t:name="url" t:size="80" /> </td>
+ </tr>
+ <tr>
+ <th>E-mail:</th>
+ <td> <t:field t:name="email" t:size="80" /> </td>
+ </tr>
+ </table>
+ <p> <button type="submit">Done</button> </p>
+ </t:form>
+ </body>
+</html>
+
+</t:chloe>
--- /dev/null
+! Copyright (C) 2009 Slava Pestov.
+! See http://factorcode.org/license.txt for BSD license.
+USING: accessors furnace.actions furnace.alloy furnace.redirection
+html.forms http.server http.server.dispatchers namespaces site-watcher
+site-watcher.private kernel urls validators db.sqlite assocs ;
+IN: webapps.site-watcher
+
+TUPLE: site-watcher-app < dispatcher ;
+
+CONSTANT: site-list-url URL" $site-watcher-app/"
+
+: <site-list-action> ( -- action )
+ <page-action>
+ { site-watcher-app "site-list" } >>template
+ [
+ begin-form
+ sites get values "sites" set-value
+ ] >>init ;
+
+: <add-site-action> ( -- action )
+ <action>
+ [
+ { { "url" [ v-url ] } { "email" [ v-email ] } } validate-params
+ ] >>validate
+ [
+ "email" value "url" value watch-site
+ site-list-url <redirect>
+ ] >>submit ;
+
+: <remove-site-action> ( -- action )
+ <action>
+ [
+ { { "url" [ v-url ] } } validate-params
+ ] >>validate
+ [
+ "url" value delete-site
+ site-list-url <redirect>
+ ] >>submit ;
+
+: <check-sites-action> ( -- action )
+ <action>
+ [
+ sites get [ check-sites ] [ report-sites ] bi
+ site-list-url <redirect>
+ ] >>submit ;
+
+: <site-watcher-app> ( -- dispatcher )
+ site-watcher-app new-dispatcher
+ <site-list-action> "" add-responder
+ <add-site-action> "add" add-responder
+ <remove-site-action> "remove" add-responder
+ <check-sites-action> "check" add-responder ;
+
+<site-watcher-app> "resource:test.db" <sqlite-db> <alloy> main-responder set-global
\ No newline at end of file
<t:title><t:label t:name="title" /></t:title>
<div class="description">
- <t:farkup t:name="parsed" t:parsed="true" />
+ <t:farkup t:name="content" />
</div>
<p>
: <article> ( title -- article ) article new swap >>title ;
-TUPLE: revision id title author date content parsed description ;
+TUPLE: revision id title author date content description ;
revision "REVISIONS" {
{ "id" "ID" INTEGER +db-assigned-id+ }
{ "author" "AUTHOR" { VARCHAR 256 } +not-null+ } ! uid
{ "date" "DATE" TIMESTAMP +not-null+ }
{ "content" "CONTENT" TEXT +not-null+ }
- { "parsed" "PARSED" FACTOR-BLOB +not-null+ } ! Farkup AST
{ "description" "DESCRIPTION" TEXT }
} define-persistent
: <revision> ( id -- revision )
revision new swap >>id ;
-: compute-html ( revision -- )
- dup content>> parse-farkup >>parsed drop ;
-
: validate-title ( -- )
{ { "title" [ v-one-line ] } } validate-params ;
[ title>> ] [ id>> ] bi article boa insert-tuple ;
: add-revision ( revision -- )
- [ compute-html ]
[ insert-tuple ]
[
dup title>> <article> select-tuple
[ amend-article ] [ add-article ] if*
]
- tri ;
+ bi ;
: <edit-article-action> ( -- action )
<page-action>
(modify-syntax-entry ?\r " " table)
(modify-syntax-entry ?\ " " table)
(modify-syntax-entry ?\n " " table)
- (modify-syntax-entry ?\( "()" table)
- (modify-syntax-entry ?\) ")(" table)
table))
(defconst fuel-syntax--syntactic-keywords
- `(;; CHARs:
- ("\\(CHAR:\\|POSTPONE:\\|\\\\\\) \\(.\\)\\( \\|$\\)" (2 "w"))
- ;; Comments:
+ `(;; Comments
("\\_<\\(#?!\\) .*\\(\n\\|$\\)" (1 "<") (2 ">"))
("\\_<\\(#?!\\)\\(\n\\|$\\)" (1 "<") (2 ">"))
- ;; Strings
+ ;; Strings and chars
+ ("CHAR: \\(\"\\) [^\\\"]*?\\(\"\\)\\([^\\\"]\\|\\\\.\\)*?\\(\"\\)"
+ (1 "w") (2 "\"") (4 "\""))
+ ("\\(CHAR:\\|POSTPONE:\\|\\\\\\) \\(.\\)\\( \\|$\\)" (2 "w"))
("\\( \\|^\\)\\(DLL\\|P\\|SBUF\\)\\(\"\\)\\([^\n\r\f\\\"]\\|\\\\.\\)*?\\(\"\\)"
(3 "\"") (5 "\""))
- ("\\(\"\\)\\([^\n\r\f\\\"]\\|\\\\.\\)*?\\(\"\\)" (1 "\"") (3 "\""))
+ ("\\_<\\(\"\\)\\([^\n\r\f\\\"]\\|\\\\.\\)*?\\(\"\\)" (1 "\"") (3 "\""))
("\\_<<\\(\"\\)\\_>" (1 "<b"))
("\\_<\\(\"\\)>\\_>" (1 ">b"))
;; Multiline constructs
;; Parenthesis:
("\\_<\\((\\)\\_>" (1 "()"))
("\\_<\\()\\)\\_>" (1 ")("))
+ ("\\_<(\\((\\)\\_>" (1 "()"))
+ ("\\_<\\()\\))\\_>" (1 ")("))
;; Quotations:
("\\_<'\\(\\[\\)\\_>" (1 "(]")) ; fried
("\\_<\\(\\[\\)\\_>" (1 "(]"))
+++ /dev/null
-Alex Chapman
-Daniel Ehrenberg
+++ /dev/null
-Alex Chapman
-Daniel Ehrenberg
+++ /dev/null
-USING: help.syntax help.markup assocs ;
-IN: trees.avl
-
-HELP: AVL{
-{ $syntax "AVL{ { key value }... }" }
-{ $values { "key" "a key" } { "value" "a value" } }
-{ $description "Literal syntax for an AVL tree." } ;
-
-HELP: <avl>
-{ $values { "tree" avl } }
-{ $description "Creates an empty AVL tree" } ;
-
-HELP: >avl
-{ $values { "assoc" assoc } { "avl" avl } }
-{ $description "Converts any " { $link assoc } " into an AVL tree." } ;
-
-HELP: avl
-{ $class-description "This is the class for AVL trees. These conform to the assoc protocol and have efficient (logarithmic time) storage and retrieval operations." } ;
-
-ARTICLE: { "avl" "intro" } "AVL trees"
-"This is a library for AVL trees, with logarithmic time storage and retrieval operations. These trees conform to the assoc protocol."
-{ $subsection avl }
-{ $subsection <avl> }
-{ $subsection >avl }
-{ $subsection POSTPONE: AVL{ } ;
-
-ABOUT: { "avl" "intro" }
+++ /dev/null
-USING: kernel tools.test trees trees.avl math random sequences assocs ;
-IN: trees.avl.tests
-
-[ "key1" 0 "key2" 0 ] [
- T{ avl-node f "key1" f f T{ avl-node f "key2" f f 1 } 2 }
- [ single-rotate ] go-left
- [ node-left dup node-key swap avl-node-balance ] keep
- dup node-key swap avl-node-balance
-] unit-test
-
-[ "key1" 0 "key2" 0 ] [
- T{ avl-node f "key1" f f T{ avl-node f "key2" f f f 1 } 2 }
- [ select-rotate ] go-left
- [ node-left dup node-key swap avl-node-balance ] keep
- dup node-key swap avl-node-balance
-] unit-test
-
-[ "key1" 0 "key2" 0 ] [
- T{ avl-node f "key1" f T{ avl-node f "key2" f f f -1 } f -2 }
- [ single-rotate ] go-right
- [ node-right dup node-key swap avl-node-balance ] keep
- dup node-key swap avl-node-balance
-] unit-test
-
-[ "key1" 0 "key2" 0 ] [
- T{ avl-node f "key1" f T{ avl-node f "key2" f f f -1 } f -2 }
- [ select-rotate ] go-right
- [ node-right dup node-key swap avl-node-balance ] keep
- dup node-key swap avl-node-balance
-] unit-test
-
-[ "key1" -1 "key2" 0 "key3" 0 ]
-[ T{ avl-node f "key1" f f
- T{ avl-node f "key2" f
- T{ avl-node f "key3" f f f 1 } f -1 } 2 }
- [ double-rotate ] go-left
- [ node-left dup node-key swap avl-node-balance ] keep
- [ node-right dup node-key swap avl-node-balance ] keep
- dup node-key swap avl-node-balance ] unit-test
-[ "key1" 0 "key2" 0 "key3" 0 ]
-[ T{ avl-node f "key1" f f
- T{ avl-node f "key2" f
- T{ avl-node f "key3" f f f 0 } f -1 } 2 }
- [ double-rotate ] go-left
- [ node-left dup node-key swap avl-node-balance ] keep
- [ node-right dup node-key swap avl-node-balance ] keep
- dup node-key swap avl-node-balance ] unit-test
-[ "key1" 0 "key2" 1 "key3" 0 ]
-[ T{ avl-node f "key1" f f
- T{ avl-node f "key2" f
- T{ avl-node f "key3" f f f -1 } f -1 } 2 }
- [ double-rotate ] go-left
- [ node-left dup node-key swap avl-node-balance ] keep
- [ node-right dup node-key swap avl-node-balance ] keep
- dup node-key swap avl-node-balance ] unit-test
-
-[ "key1" 1 "key2" 0 "key3" 0 ]
-[ T{ avl-node f "key1" f
- T{ avl-node f "key2" f f
- T{ avl-node f "key3" f f f -1 } 1 } f -2 }
- [ double-rotate ] go-right
- [ node-right dup node-key swap avl-node-balance ] keep
- [ node-left dup node-key swap avl-node-balance ] keep
- dup node-key swap avl-node-balance ] unit-test
-[ "key1" 0 "key2" 0 "key3" 0 ]
-[ T{ avl-node f "key1" f
- T{ avl-node f "key2" f f
- T{ avl-node f "key3" f f f 0 } 1 } f -2 }
- [ double-rotate ] go-right
- [ node-right dup node-key swap avl-node-balance ] keep
- [ node-left dup node-key swap avl-node-balance ] keep
- dup node-key swap avl-node-balance ] unit-test
-[ "key1" 0 "key2" -1 "key3" 0 ]
-[ T{ avl-node f "key1" f
- T{ avl-node f "key2" f f
- T{ avl-node f "key3" f f f 1 } 1 } f -2 }
- [ double-rotate ] go-right
- [ node-right dup node-key swap avl-node-balance ] keep
- [ node-left dup node-key swap avl-node-balance ] keep
- dup node-key swap avl-node-balance ] unit-test
-
-[ "eight" ] [
- <avl> "seven" 7 pick set-at
- "eight" 8 pick set-at "nine" 9 pick set-at
- tree-root node-value
-] unit-test
-
-[ "another eight" ] [ ! ERROR!
- <avl> "seven" 7 pick set-at
- "another eight" 8 pick set-at 8 swap at
-] unit-test
-
-: test-tree ( -- tree )
- AVL{
- { 7 "seven" }
- { 9 "nine" }
- { 4 "four" }
- { 4 "replaced four" }
- { 7 "replaced seven" }
- } clone ;
-
-! test set-at, at, at*
-[ t ] [ test-tree avl? ] unit-test
-[ "seven" ] [ <avl> "seven" 7 pick set-at 7 swap at ] unit-test
-[ "seven" t ] [ <avl> "seven" 7 pick set-at 7 swap at* ] unit-test
-[ f f ] [ <avl> "seven" 7 pick set-at 8 swap at* ] unit-test
-[ "seven" ] [ <avl> "seven" 7 pick set-at 7 swap at ] unit-test
-[ "replacement" ] [ <avl> "seven" 7 pick set-at "replacement" 7 pick set-at 7 swap at ] unit-test
-[ "nine" ] [ test-tree 9 swap at ] unit-test
-[ "replaced four" ] [ test-tree 4 swap at ] unit-test
-[ "replaced seven" ] [ test-tree 7 swap at ] unit-test
-
-! test delete-at--all errors!
-[ f ] [ test-tree 9 over delete-at 9 swap at ] unit-test
-[ "replaced seven" ] [ test-tree 9 over delete-at 7 swap at ] unit-test
-[ "nine" ] [ test-tree 7 over delete-at 4 over delete-at 9 swap at ] unit-test
+++ /dev/null
-! Copyright (C) 2007 Alex Chapman
-! See http://factorcode.org/license.txt for BSD license.
-USING: combinators kernel generic math math.functions
-math.parser namespaces io prettyprint.backend sequences trees
-assocs parser accessors math.order ;
-IN: trees.avl
-
-TUPLE: avl < tree ;
-
-: <avl> ( -- tree )
- avl new-tree ;
-
-TUPLE: avl-node < node balance ;
-
-: <avl-node> ( key value -- node )
- avl-node new-node
- 0 >>balance ;
-
-: increase-balance ( node amount -- )
- swap [ + ] change-balance drop ;
-
-: rotate ( node -- node )
- dup node+link dup node-link pick set-node+link
- tuck set-node-link ;
-
-: single-rotate ( node -- node )
- 0 over (>>balance) 0 over node+link
- (>>balance) rotate ;
-
-: pick-balances ( a node -- balance balance )
- balance>> {
- { [ dup zero? ] [ 2drop 0 0 ] }
- { [ over = ] [ neg 0 ] }
- [ 0 swap ]
- } cond ;
-
-: double-rotate ( node -- node )
- [
- node+link [
- node-link current-side get neg
- over pick-balances rot 0 swap (>>balance)
- ] keep (>>balance)
- ] keep swap >>balance
- dup node+link [ rotate ] with-other-side
- over set-node+link rotate ;
-
-: select-rotate ( node -- node )
- dup node+link balance>> current-side get =
- [ double-rotate ] [ single-rotate ] if ;
-
-: balance-insert ( node -- node taller? )
- dup avl-node-balance {
- { [ dup zero? ] [ drop f ] }
- { [ dup abs 2 = ]
- [ sgn neg [ select-rotate ] with-side f ] }
- { [ drop t ] [ t ] } ! balance is -1 or 1, tree is taller
- } cond ;
-
-DEFER: avl-set
-
-: avl-insert ( value key node -- node taller? )
- 2dup node-key before? left right ? [
- [ node-link avl-set ] keep swap
- >r tuck set-node-link r>
- [ dup current-side get increase-balance balance-insert ]
- [ f ] if
- ] with-side ;
-
-: (avl-set) ( value key node -- node taller? )
- 2dup node-key = [
- -rot pick set-node-key over set-node-value f
- ] [ avl-insert ] if ;
-
-: avl-set ( value key node -- node taller? )
- [ (avl-set) ] [ swap <avl-node> t ] if* ;
-
-M: avl set-at ( value key node -- node )
- [ avl-set drop ] change-root drop ;
-
-: delete-select-rotate ( node -- node shorter? )
- dup node+link avl-node-balance zero? [
- current-side get neg over set-avl-node-balance
- current-side get over node+link set-avl-node-balance rotate f
- ] [
- select-rotate t
- ] if ;
-
-: rebalance-delete ( node -- node shorter? )
- dup avl-node-balance {
- { [ dup zero? ] [ drop t ] }
- { [ dup abs 2 = ] [ sgn neg [ delete-select-rotate ] with-side ] }
- { [ drop t ] [ f ] } ! balance is -1 or 1, tree is not shorter
- } cond ;
-
-: balance-delete ( node -- node shorter? )
- current-side get over balance>> {
- { [ dup zero? ] [ drop neg over set-avl-node-balance f ] }
- { [ dupd = ] [ drop 0 >>balance t ] }
- [ dupd neg increase-balance rebalance-delete ]
- } cond ;
-
-: avl-replace-with-extremity ( to-replace node -- node shorter? )
- dup node-link [
- swapd avl-replace-with-extremity >r over set-node-link r>
- [ balance-delete ] [ f ] if
- ] [
- tuck copy-node-contents node+link t
- ] if* ;
-
-: replace-with-a-child ( node -- node shorter? )
- #! assumes that node is not a leaf, otherwise will recurse forever
- dup node-link [
- dupd [ avl-replace-with-extremity ] with-other-side
- >r over set-node-link r> [ balance-delete ] [ f ] if
- ] [
- [ replace-with-a-child ] with-other-side
- ] if* ;
-
-: avl-delete-node ( node -- node shorter? )
- #! delete this node, returning its replacement, and whether this subtree is
- #! shorter as a result
- dup leaf? [
- drop f t
- ] [
- left [ replace-with-a-child ] with-side
- ] if ;
-
-GENERIC: avl-delete ( key node -- node shorter? deleted? )
-
-M: f avl-delete ( key f -- f f f ) nip f f ;
-
-: (avl-delete) ( key node -- node shorter? deleted? )
- tuck node-link avl-delete >r >r over set-node-link r>
- [ balance-delete r> ] [ f r> ] if ;
-
-M: avl-node avl-delete ( key node -- node shorter? deleted? )
- 2dup node-key key-side dup zero? [
- drop nip avl-delete-node t
- ] [
- [ (avl-delete) ] with-side
- ] if ;
-
-M: avl delete-at ( key node -- )
- [ avl-delete 2drop ] change-root drop ;
-
-M: avl new-assoc 2drop <avl> ;
-
-: >avl ( assoc -- avl )
- T{ avl f f 0 } assoc-clone-like ;
-
-M: avl assoc-like
- drop dup avl? [ >avl ] unless ;
-
-: AVL{
- \ } [ >avl ] parse-literal ; parsing
-
-M: avl pprint-delims drop \ AVL{ \ } ;
+++ /dev/null
-Balanced AVL trees
+++ /dev/null
-collections
+++ /dev/null
-Mackenzie Straight
-Daniel Ehrenberg
+++ /dev/null
-USING: help.syntax help.markup assocs ;
-IN: trees.splay
-
-HELP: SPLAY{
-{ $syntax "SPLAY{ { key value }... }" }
-{ $values { "key" "a key" } { "value" "a value" } }
-{ $description "Literal syntax for an splay tree." } ;
-
-HELP: <splay>
-{ $values { "tree" splay } }
-{ $description "Creates an empty splay tree" } ;
-
-HELP: >splay
-{ $values { "assoc" assoc } { "tree" splay } }
-{ $description "Converts any " { $link assoc } " into an splay tree." } ;
-
-HELP: splay
-{ $class-description "This is the class for splay trees. Splay trees have amortized average-case logarithmic time storage and retrieval operations, and better complexity on more skewed lookup distributions, though in bad situations they can degrade to linear time, resembling a linked list. These conform to the assoc protocol." } ;
-
-ARTICLE: { "splay" "intro" } "Splay trees"
-"This is a library for splay trees. Splay trees have amortized average-case logarithmic time storage and retrieval operations, and better complexity on more skewed lookup distributions, though in bad situations they can degrade to linear time, resembling a linked list. These trees conform to the assoc protocol."
-{ $subsection splay }
-{ $subsection <splay> }
-{ $subsection >splay }
-{ $subsection POSTPONE: SPLAY{ } ;
-
-ABOUT: { "splay" "intro" }
+++ /dev/null
-! Copyright (c) 2005 Mackenzie Straight.
-! See http://factorcode.org/license.txt for BSD license.
-USING: kernel tools.test trees.splay math namespaces assocs
-sequences random sets ;
-IN: trees.splay.tests
-
-: randomize-numeric-splay-tree ( splay-tree -- )
- 100 [ drop 100 random swap at drop ] with each ;
-
-: make-numeric-splay-tree ( n -- splay-tree )
- <splay> [ [ conjoin ] curry each ] keep ;
-
-[ t ] [
- 100 make-numeric-splay-tree dup randomize-numeric-splay-tree
- [ [ drop , ] assoc-each ] { } make [ < ] monotonic?
-] unit-test
-
-[ 10 ] [ 10 make-numeric-splay-tree keys length ] unit-test
-[ 10 ] [ 10 make-numeric-splay-tree values length ] unit-test
-
-[ f ] [ <splay> f 4 pick set-at 4 swap at ] unit-test
-
-! Ensure that f can be a value
-[ t ] [ <splay> f 4 pick set-at 4 swap key? ] unit-test
-
-[
-{ { 1 "a" } { 2 "b" } { 3 "c" } { 4 "d" } { 5 "e" } { 6 "f" } }
-] [
-{
- { 4 "d" } { 5 "e" } { 6 "f" }
- { 1 "a" } { 2 "b" } { 3 "c" }
-} >splay >alist
-] unit-test
+++ /dev/null
-! Copyright (c) 2005 Mackenzie Straight.
-! See http://factorcode.org/license.txt for BSD license.
-USING: arrays kernel math namespaces sequences assocs parser
-prettyprint.backend trees generic math.order ;
-IN: trees.splay
-
-TUPLE: splay < tree ;
-
-: <splay> ( -- tree )
- \ splay new-tree ;
-
-: rotate-right ( node -- node )
- dup node-left
- [ node-right swap set-node-left ] 2keep
- [ set-node-right ] keep ;
-
-: rotate-left ( node -- node )
- dup node-right
- [ node-left swap set-node-right ] 2keep
- [ set-node-left ] keep ;
-
-: link-right ( left right key node -- left right key node )
- swap >r [ swap set-node-left ] 2keep
- nip dup node-left r> swap ;
-
-: link-left ( left right key node -- left right key node )
- swap >r rot [ set-node-right ] 2keep
- drop dup node-right swapd r> swap ;
-
-: cmp ( key node -- obj node -1/0/1 )
- 2dup node-key key-side ;
-
-: lcmp ( key node -- obj node -1/0/1 )
- 2dup node-left node-key key-side ;
-
-: rcmp ( key node -- obj node -1/0/1 )
- 2dup node-right node-key key-side ;
-
-DEFER: (splay)
-
-: splay-left ( left right key node -- left right key node )
- dup node-left [
- lcmp 0 < [ rotate-right ] when
- dup node-left [ link-right (splay) ] when
- ] when ;
-
-: splay-right ( left right key node -- left right key node )
- dup node-right [
- rcmp 0 > [ rotate-left ] when
- dup node-right [ link-left (splay) ] when
- ] when ;
-
-: (splay) ( left right key node -- left right key node )
- cmp dup 0 <
- [ drop splay-left ] [ 0 > [ splay-right ] when ] if ;
-
-: assemble ( head left right node -- root )
- [ node-right swap set-node-left ] keep
- [ node-left swap set-node-right ] keep
- [ swap node-left swap set-node-right ] 2keep
- [ swap node-right swap set-node-left ] keep ;
-
-: splay-at ( key node -- node )
- >r >r T{ node } clone dup dup r> r>
- (splay) nip assemble ;
-
-: splay ( key tree -- )
- [ tree-root splay-at ] keep set-tree-root ;
-
-: splay-split ( key tree -- node node )
- 2dup splay tree-root cmp 0 < [
- nip dup node-left swap f over set-node-left
- ] [
- nip dup node-right swap f over set-node-right swap
- ] if ;
-
-: get-splay ( key tree -- node ? )
- 2dup splay tree-root cmp 0 = [
- nip t
- ] [
- 2drop f f
- ] if ;
-
-: get-largest ( node -- node )
- dup [ dup node-right [ nip get-largest ] when* ] when ;
-
-: splay-largest ( node -- node )
- dup [ dup get-largest node-key swap splay-at ] when ;
-
-: splay-join ( n2 n1 -- node )
- splay-largest [
- [ set-node-right ] keep
- ] [
- drop f
- ] if* ;
-
-: remove-splay ( key tree -- )
- tuck get-splay nip [
- dup dec-count
- dup node-right swap node-left splay-join
- swap set-tree-root
- ] [ drop ] if* ;
-
-: set-splay ( value key tree -- )
- 2dup get-splay [ 2nip set-node-value ] [
- drop dup inc-count
- 2dup splay-split rot
- >r >r swapd r> node boa r> set-tree-root
- ] if ;
-
-: new-root ( value key tree -- )
- [ 1 swap set-tree-count ] keep
- >r swap <node> r> set-tree-root ;
-
-M: splay set-at ( value key tree -- )
- dup tree-root [ set-splay ] [ new-root ] if ;
-
-M: splay at* ( key tree -- value ? )
- dup tree-root [
- get-splay >r dup [ node-value ] when r>
- ] [
- 2drop f f
- ] if ;
-
-M: splay delete-at ( key tree -- )
- dup tree-root [ remove-splay ] [ 2drop ] if ;
-
-M: splay new-assoc
- 2drop <splay> ;
-
-: >splay ( assoc -- tree )
- T{ splay f f 0 } assoc-clone-like ;
-
-: SPLAY{
- \ } [ >splay ] parse-literal ; parsing
-
-M: splay assoc-like
- drop dup splay? [ >splay ] unless ;
-
-M: splay pprint-delims drop \ SPLAY{ \ } ;
+++ /dev/null
-Splay trees
+++ /dev/null
-collections
-trees
+++ /dev/null
-Binary search trees
+++ /dev/null
-collections
-trees
+++ /dev/null
-USING: help.syntax help.markup assocs ;
-IN: trees
-
-HELP: TREE{
-{ $syntax "TREE{ { key value }... }" }
-{ $values { "key" "a key" } { "value" "a value" } }
-{ $description "Literal syntax for an unbalanced tree." } ;
-
-HELP: <tree>
-{ $values { "tree" tree } }
-{ $description "Creates an empty unbalanced binary tree" } ;
-
-HELP: >tree
-{ $values { "assoc" assoc } { "tree" tree } }
-{ $description "Converts any " { $link assoc } " into an unbalanced binary tree." } ;
-
-HELP: tree
-{ $class-description "This is the class for unbalanced binary search trees. It is not usually intended to be used directly but rather as a basis for other trees." } ;
-
-ARTICLE: { "trees" "intro" } "Binary search trees"
-"This is a library for unbalanced binary search trees. It is not intended to be used directly in most situations but rather as a base class for new trees, because performance can degrade to linear time storage/retrieval by the number of keys. These binary search trees conform to the assoc protocol."
-{ $subsection tree }
-{ $subsection <tree> }
-{ $subsection >tree }
-{ $subsection POSTPONE: TREE{ } ;
-
-IN: trees
-ABOUT: { "trees" "intro" }
+++ /dev/null
-USING: trees assocs tools.test kernel sequences ;
-IN: trees.tests
-
-: test-tree ( -- tree )
- TREE{
- { 7 "seven" }
- { 9 "nine" }
- { 4 "four" }
- { 4 "replaced four" }
- { 7 "replaced seven" }
- } clone ;
-
-! test set-at, at, at*
-[ "seven" ] [ <tree> "seven" 7 pick set-at 7 swap at ] unit-test
-[ "seven" t ] [ <tree> "seven" 7 pick set-at 7 swap at* ] unit-test
-[ f f ] [ <tree> "seven" 7 pick set-at 8 swap at* ] unit-test
-[ "seven" ] [ <tree> "seven" 7 pick set-at 7 swap at ] unit-test
-[ "replacement" ] [ <tree> "seven" 7 pick set-at "replacement" 7 pick set-at 7 swap at ] unit-test
-[ "replaced four" ] [ test-tree 4 swap at ] unit-test
-[ "nine" ] [ test-tree 9 swap at ] unit-test
-
-! test delete-at
-[ f ] [ test-tree 9 over delete-at 9 swap at ] unit-test
-[ "replaced seven" ] [ test-tree 9 over delete-at 7 swap at ] unit-test
-[ "replaced four" ] [ test-tree 9 over delete-at 4 swap at ] unit-test
-[ "nine" "replaced four" ] [ test-tree 7 over delete-at 9 over at 4 rot at ] unit-test
-[ "nine" ] [ test-tree 7 over delete-at 4 over delete-at 9 swap at ] unit-test
-
+++ /dev/null
-! Copyright (C) 2007 Alex Chapman
-! See http://factorcode.org/license.txt for BSD license.
-USING: kernel generic math sequences arrays io namespaces
-prettyprint.private kernel.private assocs random combinators
-parser prettyprint.backend math.order accessors ;
-IN: trees
-
-TUPLE: tree root count ;
-
-: new-tree ( class -- tree )
- new
- f >>root
- 0 >>count ; inline
-
-: <tree> ( -- tree )
- tree new-tree ;
-
-INSTANCE: tree assoc
-
-TUPLE: node key value left right ;
-
-: new-node ( key value class -- node )
- new swap >>value swap >>key ;
-
-: <node> ( key value -- node )
- node new-node ;
-
-SYMBOL: current-side
-
-: left ( -- symbol ) -1 ; inline
-: right ( -- symbol ) 1 ; inline
-
-: key-side ( k1 k2 -- n )
- <=> {
- { +lt+ [ -1 ] }
- { +eq+ [ 0 ] }
- { +gt+ [ 1 ] }
- } case ;
-
-: go-left? ( -- ? ) current-side get left eq? ;
-
-: inc-count ( tree -- ) [ 1+ ] change-count drop ;
-
-: dec-count ( tree -- ) [ 1- ] change-count drop ;
-
-: node-link@ ( node ? -- node )
- go-left? xor [ left>> ] [ right>> ] if ;
-: set-node-link@ ( left parent ? -- )
- go-left? xor [ set-node-left ] [ set-node-right ] if ;
-
-: node-link ( node -- child ) f node-link@ ;
-: set-node-link ( child node -- ) f set-node-link@ ;
-: node+link ( node -- child ) t node-link@ ;
-: set-node+link ( child node -- ) t set-node-link@ ;
-
-: with-side ( side quot -- ) [ swap current-side set call ] with-scope ; inline
-: with-other-side ( quot -- )
- current-side get neg swap with-side ; inline
-: go-left ( quot -- ) left swap with-side ; inline
-: go-right ( quot -- ) right swap with-side ; inline
-
-: leaf? ( node -- ? )
- [ left>> ] [ right>> ] bi or not ;
-
-: random-side ( -- side ) left right 2array random ;
-
-: choose-branch ( key node -- key node-left/right )
- 2dup node-key key-side [ node-link ] with-side ;
-
-: node-at* ( key node -- value ? )
- [
- 2dup node-key = [
- nip node-value t
- ] [
- choose-branch node-at*
- ] if
- ] [ drop f f ] if* ;
-
-M: tree at* ( key tree -- value ? )
- root>> node-at* ;
-
-: node-set ( value key node -- node )
- 2dup key>> key-side dup 0 eq? [
- drop nip swap >>value
- ] [
- [
- [ node-link [ node-set ] [ swap <node> ] if* ] keep
- [ set-node-link ] keep
- ] with-side
- ] if ;
-
-M: tree set-at ( value key tree -- )
- [ [ node-set ] [ swap <node> ] if* ] change-root drop ;
-
-: valid-node? ( node -- ? )
- [
- dup dup left>> [ node-key swap node-key before? ] when* >r
- dup dup right>> [ node-key swap node-key after? ] when* r> and swap
- dup left>> valid-node? swap right>> valid-node? and and
- ] [ t ] if* ;
-
-: valid-tree? ( tree -- ? ) root>> valid-node? ;
-
-: (node>alist) ( node -- )
- [
- [ left>> (node>alist) ]
- [ [ node-key ] [ node-value ] bi 2array , ]
- [ right>> (node>alist) ]
- tri
- ] when* ;
-
-M: tree >alist [ root>> (node>alist) ] { } make ;
-
-M: tree clear-assoc
- 0 >>count
- f >>root drop ;
-
-: copy-node-contents ( new old -- )
- dup node-key pick set-node-key node-value swap set-node-value ;
-
-! Deletion
-DEFER: delete-node
-
-: (prune-extremity) ( parent node -- new-extremity )
- dup node-link [
- rot drop (prune-extremity)
- ] [
- tuck delete-node swap set-node-link
- ] if* ;
-
-: prune-extremity ( node -- new-extremity )
- #! remove and return the leftmost or rightmost child of this node.
- #! assumes at least one child
- dup node-link (prune-extremity) ;
-
-: replace-with-child ( node -- node )
- dup dup node-link copy-node-contents dup node-link delete-node over set-node-link ;
-
-: replace-with-extremity ( node -- node )
- dup node-link dup node+link [
- ! predecessor/successor is not the immediate child
- [ prune-extremity ] with-other-side dupd copy-node-contents
- ] [
- ! node-link is the predecessor/successor
- drop replace-with-child
- ] if ;
-
-: delete-node-with-two-children ( node -- node )
- #! randomised to minimise tree unbalancing
- random-side [ replace-with-extremity ] with-side ;
-
-: delete-node ( node -- node )
- #! delete this node, returning its replacement
- dup left>> [
- dup right>> [
- delete-node-with-two-children
- ] [
- left>> ! left but no right
- ] if
- ] [
- dup right>> [
- right>> ! right but not left
- ] [
- drop f ! no children
- ] if
- ] if ;
-
-: delete-bst-node ( key node -- node )
- 2dup node-key key-side dup 0 eq? [
- drop nip delete-node
- ] [
- [ tuck node-link delete-bst-node over set-node-link ] with-side
- ] if ;
-
-M: tree delete-at
- [ delete-bst-node ] change-root drop ;
-
-M: tree new-assoc
- 2drop <tree> ;
-
-M: tree clone dup assoc-clone-like ;
-
-: >tree ( assoc -- tree )
- T{ tree f f 0 } assoc-clone-like ;
-
-M: tree assoc-like drop dup tree? [ >tree ] unless ;
-
-: TREE{
- \ } [ >tree ] parse-literal ; parsing
-
-M: tree pprint-delims drop \ TREE{ \ } ;
-M: tree assoc-size count>> ;
-M: tree >pprint-sequence >alist ;
-M: tree pprint-narrow? drop t ;