[Groonga-commit] groonga/groonga at 1733445 [master] doc: add examples for tokenize command

Back to archive index

Kouhei Sutou null+****@clear*****
Tue May 28 12:06:16 JST 2013


Kouhei Sutou	2013-05-28 12:06:16 +0900 (Tue, 28 May 2013)

  New Revision: 17334457b4f19bd6aa9e4180acaa1b229b63a66f
  https://github.com/groonga/groonga/commit/17334457b4f19bd6aa9e4180acaa1b229b63a66f

  Message:
    doc: add examples for tokenize command
    
        % make update-examples

  Added files:
    doc/source/example/reference/commands/tokenize/flags_enable_tokenized_delimiter.log
    doc/source/example/reference/commands/tokenize/normalizer_none.log
    doc/source/example/reference/commands/tokenize/normalizer_use.log
    doc/source/example/reference/commands/tokenize/normalizer_use_with_split_symbol_alpha.log
    doc/source/example/reference/commands/tokenize/simple_example.log
    doc/source/example/reference/commands/tokenize/string_include_spaces.log
    doc/source/example/reference/commands/tokenize/tokenizer_token_trigram.log

  Added: doc/source/example/reference/commands/tokenize/flags_enable_tokenized_delimiter.log (+24 -0) 100644
===================================================================
--- /dev/null
+++ doc/source/example/reference/commands/tokenize/flags_enable_tokenized_delimiter.log    2013-05-28 12:06:16 +0900 (09896a3)
@@ -0,0 +1,24 @@
+Execution example::
+
+  tokenize TokenDelimit "Full￾text Sea￾crch" NormalizerAuto ENABLE_TOKENIZED_DELIMITER
+  # [
+  #   [
+  #     0, 
+  #     1337566253.89858, 
+  #     0.000355720520019531
+  #   ], 
+  #   [
+  #     {
+  #       "position": 0, 
+  #       "value": "full"
+  #     }, 
+  #     {
+  #       "position": 1, 
+  #       "value": "text sea"
+  #     }, 
+  #     {
+  #       "position": 2, 
+  #       "value": "crch"
+  #     }
+  #   ]
+  # ]

  Added: doc/source/example/reference/commands/tokenize/normalizer_none.log (+72 -0) 100644
===================================================================
--- /dev/null
+++ doc/source/example/reference/commands/tokenize/normalizer_none.log    2013-05-28 12:06:16 +0900 (5da5a61)
@@ -0,0 +1,72 @@
+Execution example::
+
+  tokenize TokenBigram "Fulltext Search"
+  # [
+  #   [
+  #     0, 
+  #     1337566253.89858, 
+  #     0.000355720520019531
+  #   ], 
+  #   [
+  #     {
+  #       "position": 0, 
+  #       "value": "Fu"
+  #     }, 
+  #     {
+  #       "position": 1, 
+  #       "value": "ul"
+  #     }, 
+  #     {
+  #       "position": 2, 
+  #       "value": "ll"
+  #     }, 
+  #     {
+  #       "position": 3, 
+  #       "value": "lt"
+  #     }, 
+  #     {
+  #       "position": 4, 
+  #       "value": "te"
+  #     }, 
+  #     {
+  #       "position": 5, 
+  #       "value": "ex"
+  #     }, 
+  #     {
+  #       "position": 6, 
+  #       "value": "xt"
+  #     }, 
+  #     {
+  #       "position": 7, 
+  #       "value": "t "
+  #     }, 
+  #     {
+  #       "position": 8, 
+  #       "value": " S"
+  #     }, 
+  #     {
+  #       "position": 9, 
+  #       "value": "Se"
+  #     }, 
+  #     {
+  #       "position": 10, 
+  #       "value": "ea"
+  #     }, 
+  #     {
+  #       "position": 11, 
+  #       "value": "ar"
+  #     }, 
+  #     {
+  #       "position": 12, 
+  #       "value": "rc"
+  #     }, 
+  #     {
+  #       "position": 13, 
+  #       "value": "ch"
+  #     }, 
+  #     {
+  #       "position": 14, 
+  #       "value": "h"
+  #     }
+  #   ]
+  # ]

  Added: doc/source/example/reference/commands/tokenize/normalizer_use.log (+20 -0) 100644
===================================================================
--- /dev/null
+++ doc/source/example/reference/commands/tokenize/normalizer_use.log    2013-05-28 12:06:16 +0900 (2d2ca20)
@@ -0,0 +1,20 @@
+Execution example::
+
+  tokenize TokenBigram "Fulltext Search" NormalizerAuto
+  # [
+  #   [
+  #     0, 
+  #     1337566253.89858, 
+  #     0.000355720520019531
+  #   ], 
+  #   [
+  #     {
+  #       "position": 0, 
+  #       "value": "fulltext"
+  #     }, 
+  #     {
+  #       "position": 1, 
+  #       "value": "search"
+  #     }
+  #   ]
+  # ]

  Added: doc/source/example/reference/commands/tokenize/normalizer_use_with_split_symbol_alpha.log (+68 -0) 100644
===================================================================
--- /dev/null
+++ doc/source/example/reference/commands/tokenize/normalizer_use_with_split_symbol_alpha.log    2013-05-28 12:06:16 +0900 (be0ed48)
@@ -0,0 +1,68 @@
+Execution example::
+
+  tokenize TokenBigramSplitSymbolAlpha "Fulltext Search" NormalizerAuto
+  # [
+  #   [
+  #     0, 
+  #     1337566253.89858, 
+  #     0.000355720520019531
+  #   ], 
+  #   [
+  #     {
+  #       "position": 0, 
+  #       "value": "fu"
+  #     }, 
+  #     {
+  #       "position": 1, 
+  #       "value": "ul"
+  #     }, 
+  #     {
+  #       "position": 2, 
+  #       "value": "ll"
+  #     }, 
+  #     {
+  #       "position": 3, 
+  #       "value": "lt"
+  #     }, 
+  #     {
+  #       "position": 4, 
+  #       "value": "te"
+  #     }, 
+  #     {
+  #       "position": 5, 
+  #       "value": "ex"
+  #     }, 
+  #     {
+  #       "position": 6, 
+  #       "value": "xt"
+  #     }, 
+  #     {
+  #       "position": 7, 
+  #       "value": "t"
+  #     }, 
+  #     {
+  #       "position": 8, 
+  #       "value": "se"
+  #     }, 
+  #     {
+  #       "position": 9, 
+  #       "value": "ea"
+  #     }, 
+  #     {
+  #       "position": 10, 
+  #       "value": "ar"
+  #     }, 
+  #     {
+  #       "position": 11, 
+  #       "value": "rc"
+  #     }, 
+  #     {
+  #       "position": 12, 
+  #       "value": "ch"
+  #     }, 
+  #     {
+  #       "position": 13, 
+  #       "value": "h"
+  #     }
+  #   ]
+  # ]

  Added: doc/source/example/reference/commands/tokenize/simple_example.log (+72 -0) 100644
===================================================================
--- /dev/null
+++ doc/source/example/reference/commands/tokenize/simple_example.log    2013-05-28 12:06:16 +0900 (5da5a61)
@@ -0,0 +1,72 @@
+Execution example::
+
+  tokenize TokenBigram "Fulltext Search"
+  # [
+  #   [
+  #     0, 
+  #     1337566253.89858, 
+  #     0.000355720520019531
+  #   ], 
+  #   [
+  #     {
+  #       "position": 0, 
+  #       "value": "Fu"
+  #     }, 
+  #     {
+  #       "position": 1, 
+  #       "value": "ul"
+  #     }, 
+  #     {
+  #       "position": 2, 
+  #       "value": "ll"
+  #     }, 
+  #     {
+  #       "position": 3, 
+  #       "value": "lt"
+  #     }, 
+  #     {
+  #       "position": 4, 
+  #       "value": "te"
+  #     }, 
+  #     {
+  #       "position": 5, 
+  #       "value": "ex"
+  #     }, 
+  #     {
+  #       "position": 6, 
+  #       "value": "xt"
+  #     }, 
+  #     {
+  #       "position": 7, 
+  #       "value": "t "
+  #     }, 
+  #     {
+  #       "position": 8, 
+  #       "value": " S"
+  #     }, 
+  #     {
+  #       "position": 9, 
+  #       "value": "Se"
+  #     }, 
+  #     {
+  #       "position": 10, 
+  #       "value": "ea"
+  #     }, 
+  #     {
+  #       "position": 11, 
+  #       "value": "ar"
+  #     }, 
+  #     {
+  #       "position": 12, 
+  #       "value": "rc"
+  #     }, 
+  #     {
+  #       "position": 13, 
+  #       "value": "ch"
+  #     }, 
+  #     {
+  #       "position": 14, 
+  #       "value": "h"
+  #     }
+  #   ]
+  # ]

  Added: doc/source/example/reference/commands/tokenize/string_include_spaces.log (+172 -0) 100644
===================================================================
--- /dev/null
+++ doc/source/example/reference/commands/tokenize/string_include_spaces.log    2013-05-28 12:06:16 +0900 (55e5e0b)
@@ -0,0 +1,172 @@
+Execution example::
+
+  tokenize TokenBigram "Groonga is a fast fulltext earch engine!"
+  # [
+  #   [
+  #     0, 
+  #     1337566253.89858, 
+  #     0.000355720520019531
+  #   ], 
+  #   [
+  #     {
+  #       "position": 0, 
+  #       "value": "Gr"
+  #     }, 
+  #     {
+  #       "position": 1, 
+  #       "value": "ro"
+  #     }, 
+  #     {
+  #       "position": 2, 
+  #       "value": "oo"
+  #     }, 
+  #     {
+  #       "position": 3, 
+  #       "value": "on"
+  #     }, 
+  #     {
+  #       "position": 4, 
+  #       "value": "ng"
+  #     }, 
+  #     {
+  #       "position": 5, 
+  #       "value": "ga"
+  #     }, 
+  #     {
+  #       "position": 6, 
+  #       "value": "a "
+  #     }, 
+  #     {
+  #       "position": 7, 
+  #       "value": " i"
+  #     }, 
+  #     {
+  #       "position": 8, 
+  #       "value": "is"
+  #     }, 
+  #     {
+  #       "position": 9, 
+  #       "value": "s "
+  #     }, 
+  #     {
+  #       "position": 10, 
+  #       "value": " a"
+  #     }, 
+  #     {
+  #       "position": 11, 
+  #       "value": "a "
+  #     }, 
+  #     {
+  #       "position": 12, 
+  #       "value": " f"
+  #     }, 
+  #     {
+  #       "position": 13, 
+  #       "value": "fa"
+  #     }, 
+  #     {
+  #       "position": 14, 
+  #       "value": "as"
+  #     }, 
+  #     {
+  #       "position": 15, 
+  #       "value": "st"
+  #     }, 
+  #     {
+  #       "position": 16, 
+  #       "value": "t "
+  #     }, 
+  #     {
+  #       "position": 17, 
+  #       "value": " f"
+  #     }, 
+  #     {
+  #       "position": 18, 
+  #       "value": "fu"
+  #     }, 
+  #     {
+  #       "position": 19, 
+  #       "value": "ul"
+  #     }, 
+  #     {
+  #       "position": 20, 
+  #       "value": "ll"
+  #     }, 
+  #     {
+  #       "position": 21, 
+  #       "value": "lt"
+  #     }, 
+  #     {
+  #       "position": 22, 
+  #       "value": "te"
+  #     }, 
+  #     {
+  #       "position": 23, 
+  #       "value": "ex"
+  #     }, 
+  #     {
+  #       "position": 24, 
+  #       "value": "xt"
+  #     }, 
+  #     {
+  #       "position": 25, 
+  #       "value": "t "
+  #     }, 
+  #     {
+  #       "position": 26, 
+  #       "value": " e"
+  #     }, 
+  #     {
+  #       "position": 27, 
+  #       "value": "ea"
+  #     }, 
+  #     {
+  #       "position": 28, 
+  #       "value": "ar"
+  #     }, 
+  #     {
+  #       "position": 29, 
+  #       "value": "rc"
+  #     }, 
+  #     {
+  #       "position": 30, 
+  #       "value": "ch"
+  #     }, 
+  #     {
+  #       "position": 31, 
+  #       "value": "h "
+  #     }, 
+  #     {
+  #       "position": 32, 
+  #       "value": " e"
+  #     }, 
+  #     {
+  #       "position": 33, 
+  #       "value": "en"
+  #     }, 
+  #     {
+  #       "position": 34, 
+  #       "value": "ng"
+  #     }, 
+  #     {
+  #       "position": 35, 
+  #       "value": "gi"
+  #     }, 
+  #     {
+  #       "position": 36, 
+  #       "value": "in"
+  #     }, 
+  #     {
+  #       "position": 37, 
+  #       "value": "ne"
+  #     }, 
+  #     {
+  #       "position": 38, 
+  #       "value": "e!"
+  #     }, 
+  #     {
+  #       "position": 39, 
+  #       "value": "!"
+  #     }
+  #   ]
+  # ]

  Added: doc/source/example/reference/commands/tokenize/tokenizer_token_trigram.log (+72 -0) 100644
===================================================================
--- /dev/null
+++ doc/source/example/reference/commands/tokenize/tokenizer_token_trigram.log    2013-05-28 12:06:16 +0900 (2d744e4)
@@ -0,0 +1,72 @@
+Execution example::
+
+  tokenize TokenTrigram "Fulltext Search"
+  # [
+  #   [
+  #     0, 
+  #     1337566253.89858, 
+  #     0.000355720520019531
+  #   ], 
+  #   [
+  #     {
+  #       "position": 0, 
+  #       "value": "Ful"
+  #     }, 
+  #     {
+  #       "position": 1, 
+  #       "value": "ull"
+  #     }, 
+  #     {
+  #       "position": 2, 
+  #       "value": "llt"
+  #     }, 
+  #     {
+  #       "position": 3, 
+  #       "value": "lte"
+  #     }, 
+  #     {
+  #       "position": 4, 
+  #       "value": "tex"
+  #     }, 
+  #     {
+  #       "position": 5, 
+  #       "value": "ext"
+  #     }, 
+  #     {
+  #       "position": 6, 
+  #       "value": "xt "
+  #     }, 
+  #     {
+  #       "position": 7, 
+  #       "value": "t S"
+  #     }, 
+  #     {
+  #       "position": 8, 
+  #       "value": " Se"
+  #     }, 
+  #     {
+  #       "position": 9, 
+  #       "value": "Sea"
+  #     }, 
+  #     {
+  #       "position": 10, 
+  #       "value": "ear"
+  #     }, 
+  #     {
+  #       "position": 11, 
+  #       "value": "arc"
+  #     }, 
+  #     {
+  #       "position": 12, 
+  #       "value": "rch"
+  #     }, 
+  #     {
+  #       "position": 13, 
+  #       "value": "ch"
+  #     }, 
+  #     {
+  #       "position": 14, 
+  #       "value": "h"
+  #     }
+  #   ]
+  # ]
-------------- next part --------------
HTML����������������������������...
Download 



More information about the Groonga-commit mailing list
Back to archive index