Grammalecte  Check-in [18db5d65f0]

Overview
Comment:[core][cli][server][graphspell][fx] use spellchecker instead of ibdawg
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | cli | core | server | fx | graphspell | multid
Files: files | file ages | folders
SHA3-256: 18db5d65f059826efb9bcdcfbdeb764561471eb3b69fb11dd4b4b7d301097d5c
User & Date: olr on 2018-02-13 15:44:31
Other Links: branch diff | manifest | tags
Context
2018-02-13
15:57
[cli] suggest() in spellchecker is a generator check-in: 966babe645 user: olr tags: cli, multid
15:44
[core][cli][server][graphspell][fx] use spellchecker instead of ibdawg check-in: 18db5d65f0 user: olr tags: cli, core, fx, graphspell, multid, server
14:26
[core] use spellchecker instead of ibdawg directly check-in: 62304c0cd5 user: olr tags: core, multid
Changes

Modified gc_core/js/lang_core/gc_engine.js from [dcb651b444] to [454f9a423d].

   331    331               _dOptions = gc_options.getOptions(sContext).gl_shallowCopy();     // duplication necessary, to be able to reset to default
   332    332           }
   333    333           catch (e) {
   334    334               helpers.logerror(e);
   335    335           }
   336    336       },
   337    337   
   338         -    getDictionary: function () {
          338  +    getSpellChecker: function () {
   339    339           return _oSpellChecker;
   340    340       },
   341    341   
   342    342       //// Options
   343    343   
   344    344       setOption: function (sOpt, bVal) {
   345    345           if (_dOptions.has(sOpt)) {
................................................................................
   640    640       exports._rewrite = gc_engine._rewrite;
   641    641       exports.ignoreRule = gc_engine.ignoreRule;
   642    642       exports.resetIgnoreRules = gc_engine.resetIgnoreRules;
   643    643       exports.reactivateRule = gc_engine.reactivateRule;
   644    644       exports.listRules = gc_engine.listRules;
   645    645       exports._getRules = gc_engine._getRules;
   646    646       exports.load = gc_engine.load;
   647         -    exports.getDictionary = gc_engine.getDictionary;
          647  +    exports.getSpellChecker = gc_engine.getSpellChecker;
   648    648       exports.setOption = gc_engine.setOption;
   649    649       exports.setOptions = gc_engine.setOptions;
   650    650       exports.getOptions = gc_engine.getOptions;
   651    651       exports.getDefaultOptions = gc_engine.getDefaultOptions;
   652    652       exports.resetOptions = gc_engine.resetOptions;
   653    653   }

Modified gc_core/py/lang_core/gc_engine.py from [e36972dc16] to [eded256052].

    10     10   
    11     11   from ..graphspell.spellchecker import SpellChecker
    12     12   from ..graphspell.echo import echo
    13     13   from . import gc_options
    14     14   
    15     15   
    16     16   __all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
    17         -            "load", "parse", "getDictionary", \
           17  +            "load", "parse", "getSpellChecker", \
    18     18               "setOption", "setOptions", "getOptions", "getDefaultOptions", "getOptionsLabels", "resetOptions", "displayOptions", \
    19     19               "ignoreRule", "resetIgnoreRules", "reactivateRule", "listRules", "displayRules" ]
    20     20   
    21     21   __version__ = "${version}"
    22     22   
    23     23   
    24     24   lang = "${lang}"
................................................................................
   329    329   
   330    330   
   331    331   def resetOptions ():
   332    332       global _dOptions
   333    333       _dOptions = dict(gc_options.getOptions(_sAppContext))
   334    334   
   335    335   
   336         -def getDictionary ():
          336  +def getSpellChecker ():
   337    337       return _oSpellChecker
   338    338   
   339    339   
   340    340   def _getRules (bParagraph):
   341    341       try:
   342    342           if not bParagraph:
   343    343               return _rules.lSentenceRules

Modified gc_lang/fr/modules-js/lexicographe.js from [be510450a4] to [e3263a5103].

   222    222       ['<', "inférieur à"],
   223    223       ['>', "supérieur à"],
   224    224   ]);
   225    225   
   226    226   
   227    227   class Lexicographe {
   228    228   
   229         -    constructor (oDict, oTokenizer, oLocGraph) {
   230         -        this.oDict = oDict;
          229  +    constructor (oSpellChecker, oTokenizer, oLocGraph) {
          230  +        this.oSpellChecker = oSpellChecker;
   231    231           this.oTokenizer = oTokenizer;
   232    232           this.oLocGraph = JSON.parse(oLocGraph);
   233    233   
   234    234           this._zPartDemForm = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-(là|ci)$", "i");
   235    235           this._aPartDemExceptList = new Set(["celui", "celle", "ceux", "celles", "de", "jusque", "par", "marie-couche-toi"]);
   236    236           this._zInterroVerb = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-(t-(?:il|elle|on)|je|tu|ils?|elles?|on|[nv]ous)$", "i");
   237    237           this._zImperatifVerb = new RegExp("([a-zA-Zà-ö0-9À-Öø-ÿØ-ßĀ-ʯ]+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|y|en|[mts][’'](?:y|en)|les?|la|[mt]oi|leur|lui)$", "i");
................................................................................
   337    337                               sValue: oToken.sValue,
   338    338                               aLabel: ["forme verbale interrogative"],
   339    339                               aSubElem: [
   340    340                                   { sType: oToken.sType, sValue: m[1],       aLabel: this._getMorph(m[1]) },
   341    341                                   { sType: oToken.sType, sValue: "-" + m[2], aLabel: [this._formatSuffix(m[2].toLowerCase())] }
   342    342                               ]
   343    343                           };
   344         -                    } else if (this.oDict.isValidToken(oToken.sValue)) {
          344  +                    } else if (this.oSpellChecker.isValidToken(oToken.sValue)) {
   345    345                           return {
   346    346                               sType: oToken.sType,
   347    347                               sValue: oToken.sValue,
   348    348                               aLabel: this._getMorph(oToken.sValue)
   349    349                           };
   350    350                       } else {
   351    351                           return {
................................................................................
   360    360               helpers.logerror(e);
   361    361           }
   362    362           return null;
   363    363       }
   364    364   
   365    365       _getMorph (sWord) {
   366    366           let aElem = [];
   367         -        for (let s of this.oDict.getMorph(sWord)) {
          367  +        for (let s of this.oSpellChecker.getMorph(sWord)) {
   368    368               if (s.includes(":")) aElem.push(this._formatTags(s));
   369    369           }
   370    370           if (aElem.length == 0) {
   371    371               aElem.push("mot inconnu du dictionnaire");
   372    372           }
   373    373           return aElem;
   374    374       }

Modified gc_lang/fr/modules/lexicographe.py from [7b36598d08] to [75ede82f17].

   153    153       "t'en": " (te) pronom personnel objet + (en) pronom adverbial",
   154    154       "s'en": " (se) pronom personnel objet + (en) pronom adverbial",
   155    155   }
   156    156   
   157    157   
   158    158   class Lexicographe:
   159    159   
   160         -    def __init__ (self, oDict):
   161         -        self.oDict = oDict
          160  +    def __init__ (self, oSpellChecker):
          161  +        self.oSpellChecker = oSpellChecker
   162    162           self._zElidedPrefix = re.compile("(?i)^([dljmtsncç]|quoiqu|lorsqu|jusqu|puisqu|qu)['’](.+)")
   163    163           self._zCompoundWord = re.compile("(?i)(\\w+)-((?:les?|la)-(?:moi|toi|lui|[nv]ous|leur)|t-(?:il|elle|on)|y|en|[mts][’'](?:y|en)|les?|l[aà]|[mt]oi|leur|lui|je|tu|ils?|elles?|on|[nv]ous)$")
   164    164           self._zTag = re.compile("[:;/][\\w*][^:;/]*")
   165    165   
   166    166       def analyzeWord (self, sWord):
   167    167           try:
   168    168               if not sWord:
................................................................................
   179    179                   sWord = m.group(2)
   180    180                   aMorph.append( "{}’ : {}".format(m.group(1), _dPFX.get(m.group(1).lower(), "[?]")) )
   181    181               # mots composés
   182    182               m2 = self._zCompoundWord.match(sWord)
   183    183               if m2:
   184    184                   sWord = m2.group(1)
   185    185               # Morphologies
   186         -            lMorph = self.oDict.getMorph(sWord)
          186  +            lMorph = self.oSpellChecker.getMorph(sWord)
   187    187               if len(lMorph) > 1:
   188    188                   # sublist
   189    189                   aMorph.append( (sWord, [ self.formatTags(s)  for s in lMorph  if ":" in s ]) )
   190    190               elif len(lMorph) == 1:
   191    191                   aMorph.append( "{} : {}".format(sWord, self.formatTags(lMorph[0])) )
   192    192               else:
   193    193                   aMorph.append( "{} :  inconnu du dictionnaire".format(sWord) )

Modified gc_lang/fr/webext/gce_worker.js from [30916bedcd] to [fb2b2e5711].

   133    133       }
   134    134   }
   135    135   
   136    136   
   137    137   
   138    138   let bInitDone = false;
   139    139   
   140         -let oDict = null;
          140  +let oSpellChecker = null;
   141    141   let oTokenizer = null;
   142    142   let oLxg = null;
   143    143   let oTest = null;
   144    144   let oLocution = null;
   145    145   
   146    146   
   147    147   /*
................................................................................
   158    158           if (!bInitDone) {
   159    159               //console.log("[Worker] Loading… Extension path: " + sExtensionPath);
   160    160               conj.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/conj_data.json"));
   161    161               phonet.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/phonet_data.json"));
   162    162               mfsp.init(helpers.loadFile(sExtensionPath + "/grammalecte/fr/mfsp_data.json"));
   163    163               //console.log("[Worker] Modules have been initialized…");
   164    164               gc_engine.load(sContext, sExtensionPath+"grammalecte/graphspell/_dictionaries");
   165         -            oDict = gc_engine.getDictionary();
          165  +            oSpellChecker = gc_engine.getSpellChecker();
   166    166               oTest = new TestGrammarChecking(gc_engine, sExtensionPath+"/grammalecte/fr/tests_data.json");
   167    167               oTokenizer = new Tokenizer("fr");
   168    168   
   169    169               oLocution =  helpers.loadFile(sExtensionPath + "/grammalecte/fr/locutions_data.json");
   170    170   
   171         -            oLxg = new Lexicographe(oDict, oTokenizer, oLocution);
          171  +            oLxg = new Lexicographe(oSpellChecker, oTokenizer, oLocution);
   172    172               if (dOptions !== null) {
   173    173                   gc_engine.setOptions(dOptions);
   174    174               }
   175    175               //tests();
   176    176               bInitDone = true;
   177    177           } else {
   178    178               console.log("[Worker] Already initialized…")
................................................................................
   197    197   }
   198    198   
   199    199   function parseAndSpellcheck (sText, sCountry, bDebug, bContext, dInfo={}) {
   200    200       let i = 0;
   201    201       sText = sText.replace(/­/g, "").normalize("NFC");
   202    202       for (let sParagraph of text.getParagraph(sText)) {
   203    203           let aGrammErr = gc_engine.parse(sParagraph, sCountry, bDebug, bContext);
   204         -        let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oDict);
          204  +        let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oSpellChecker);
   205    205           postMessage(createResponse("parseAndSpellcheck", {sParagraph: sParagraph, iParaNum: i, aGrammErr: aGrammErr, aSpellErr: aSpellErr}, dInfo, false));
   206    206           i += 1;
   207    207       }
   208    208       postMessage(createResponse("parseAndSpellcheck", null, dInfo, true));
   209    209   }
   210    210   
   211    211   function parseAndSpellcheck1 (sParagraph, sCountry, bDebug, bContext, dInfo={}) {
   212    212       sParagraph = sParagraph.replace(/­/g, "").normalize("NFC");
   213    213       let aGrammErr = gc_engine.parse(sParagraph, sCountry, bDebug, bContext);
   214         -    let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oDict);
          214  +    let aSpellErr = oTokenizer.getSpellingErrors(sParagraph, oSpellChecker);
   215    215       postMessage(createResponse("parseAndSpellcheck1", {sParagraph: sParagraph, aGrammErr: aGrammErr, aSpellErr: aSpellErr}, dInfo, true));
   216    216   }
   217    217   
   218    218   function getOptions (dInfo={}) {
   219    219       postMessage(createResponse("getOptions", gc_engine.getOptions(), dInfo, true));
   220    220   }
   221    221   
................................................................................
   288    288       postMessage(createResponse("fullTests", sMsg, dInfo, true));
   289    289   }
   290    290   
   291    291   
   292    292   // Spellchecker
   293    293   
   294    294   function getSpellSuggestions (sWord, dInfo) {
   295         -    if (!oDict) {
          295  +    if (!oSpellChecker) {
   296    296           postMessage(createResponse("getSpellSuggestions", "# Error. Dictionary not loaded.", dInfo, true));
   297    297           return;
   298    298       }
   299         -    let aSugg = oDict.suggest(sWord);
   300         -    postMessage(createResponse("getSpellSuggestions", {sWord: sWord, aSugg: aSugg}, dInfo, true));
          299  +    let i = 1;
          300  +    for (let aSugg of oSpellChecker.suggest(sWord)) {
          301  +        postMessage(createResponse("getSpellSuggestions", {sWord: sWord, aSugg: aSugg, iSugg: i}, dInfo, true));
          302  +        i += 1;
          303  +    }
   301    304   }
   302    305   
   303    306   
   304    307   // Lexicographer
   305    308   
   306    309   function getListOfTokens (sText, dInfo={}) {
   307    310       try {

Modified grammalecte-cli.py from [07800caa2b] to [93945e25e2].

    40     40       if sys.platform == "win32":
    41     41           # Apparently, the console transforms «’» in «'».
    42     42           # So we reverse it to avoid many useless warnings.
    43     43           sText = sText.replace("'", "’")
    44     44       return sText
    45     45   
    46     46   
    47         -def _getErrors (sText, oTokenizer, oDict, bContext=False, bSpellSugg=False, bDebug=False):
           47  +def _getErrors (sText, oTokenizer, oSpellChecker, bContext=False, bSpellSugg=False, bDebug=False):
    48     48       "returns a tuple: (grammar errors, spelling errors)"
    49     49       aGrammErrs = gce.parse(sText, "FR", bDebug=bDebug, bContext=bContext)
    50     50       aSpellErrs = []
    51     51       for dToken in oTokenizer.genTokens(sText):
    52         -        if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']):
           52  +        if dToken['sType'] == "WORD" and not oSpellChecker.isValidToken(dToken['sValue']):
    53     53               if bSpellSugg:
    54         -                dToken['aSuggestions'] = oDict.suggest(dToken['sValue'])
           54  +                dToken['aSuggestions'] = oSpellChecker.suggest(dToken['sValue'])
    55     55               aSpellErrs.append(dToken)
    56     56       return aGrammErrs, aSpellErrs
    57     57   
    58     58   
    59         -def generateText (sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100):
    60         -    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, False, bSpellSugg, bDebug)
           59  +def generateText (sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100):
           60  +    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oSpellChecker, False, bSpellSugg, bDebug)
    61     61       if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
    62     62           return ""
    63     63       return txt.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)
    64     64   
    65     65   
    66         -def generateJSON (iIndex, sText, oTokenizer, oDict, bContext=False, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, lLineSet=None, bReturnText=False):
    67         -    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, bContext, bSpellSugg, bDebug)
           66  +def generateJSON (iIndex, sText, oTokenizer, oSpellChecker, bContext=False, bDebug=False, bEmptyIfNoErrors=False, bSpellSugg=False, lLineSet=None, bReturnText=False):
           67  +    aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oSpellChecker, bContext, bSpellSugg, bDebug)
    68     68       aGrammErrs = list(aGrammErrs)
    69     69       if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
    70     70           return ""
    71     71       if lLineSet:
    72     72           aGrammErrs, aSpellErrs = txt.convertToXY(aGrammErrs, aSpellErrs, lLineSet)
    73     73           return json.dumps({ "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
    74     74       if bReturnText:
................................................................................
   126    126       xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules")
   127    127       xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true")
   128    128       xArgs = xParser.parse_args()
   129    129   
   130    130       gce.load()
   131    131       if not xArgs.json:
   132    132           echo("Grammalecte v{}".format(gce.version))
   133         -    oDict = gce.getDictionary()
          133  +    oSpellChecker = gce.getSpellChecker()
   134    134       oTokenizer = tkz.Tokenizer("fr")
   135         -    oLexGraphe = lxg.Lexicographe(oDict)
          135  +    oLexGraphe = lxg.Lexicographe(oSpellChecker)
   136    136       if xArgs.textformatter or xArgs.textformatteronly:
   137    137           oTF = tf.TextFormatter()
   138    138   
   139    139       if xArgs.list_options or xArgs.list_rules:
   140    140           if xArgs.list_options:
   141    141               gce.displayOptions("fr")
   142    142           if xArgs.list_rules:
   143    143               gce.displayRules(None  if xArgs.list_rules == "*"  else xArgs.list_rules)
   144    144           exit()
   145    145   
   146    146       if xArgs.suggest:
   147         -        lSugg = oDict.suggest(xArgs.suggest)
          147  +        lSugg = oSpellChecker.suggest(xArgs.suggest)
   148    148           if xArgs.json:
   149    149               sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False)
   150    150           else:
   151    151               sText = "Suggestions : " + " | ".join(lSugg)
   152    152           echo(sText)
   153    153           exit()
   154    154   
................................................................................
   177    177               for i, sText in enumerate(readfile(sFile), 1):
   178    178                   if xArgs.textformatter or xArgs.textformatteronly:
   179    179                       sText = oTF.formatText(sText)
   180    180                   if xArgs.textformatteronly:
   181    181                       output(sText, hDst)
   182    182                   else:
   183    183                       if xArgs.json:
   184         -                        sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter)
          184  +                        sText = generateJSON(i, sText, oTokenizer, oSpellChecker, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter)
   185    185                       else:
   186         -                        sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
          186  +                        sText = generateText(sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
   187    187                       if sText:
   188    188                           if xArgs.json and bComma:
   189    189                               output(",\n", hDst)
   190    190                           output(sText, hDst)
   191    191                           bComma = True
   192    192                   if hDst:
   193    193                       echo("§ %d\r" % i, end="", flush=True)
   194    194           else:
   195    195               # concaténation des lignes non séparées par une ligne vide
   196    196               for i, lLine in enumerate(readfileAndConcatLines(sFile), 1):
   197    197                   sText, lLineSet = txt.createParagraphWithLines(lLine)
   198    198                   if xArgs.json:
   199         -                    sText = generateJSON(i, sText, oTokenizer, oDict, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, lLineSet=lLineSet)
          199  +                    sText = generateJSON(i, sText, oTokenizer, oSpellChecker, bContext=xArgs.context, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, lLineSet=lLineSet)
   200    200                   else:
   201         -                    sText = generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
          201  +                    sText = generateText(sText, oTokenizer, oSpellChecker, bDebug=False, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width)
   202    202                   if sText:
   203    203                       if xArgs.json and bComma:
   204    204                           output(",\n", hDst)
   205    205                       output(sText, hDst)
   206    206                       bComma = True
   207    207                   if hDst:
   208    208                       echo("§ %d\r" % i, end="", flush=True)
................................................................................
   213    213           sInputText = "\n~==========~ Enter your text [/h /q] ~==========~\n"
   214    214           sText = _getText(sInputText)
   215    215           while True:
   216    216               if sText.startswith("?"):
   217    217                   for sWord in sText[1:].strip().split():
   218    218                       if sWord:
   219    219                           echo("* " + sWord)
   220         -                        for sMorph in oDict.getMorph(sWord):
          220  +                        for sMorph in oSpellChecker.getMorph(sWord):
   221    221                               echo("  {:<32} {}".format(sMorph, oLexGraphe.formatTags(sMorph)))
   222    222               elif sText.startswith("!"):
   223    223                   for sWord in sText[1:].strip().split():
   224    224                       if sWord:
   225         -                        echo(" | ".join(oDict.suggest(sWord)))
   226         -                        #echo(" | ".join(oDict.suggest2(sWord)))
          225  +                        echo(" | ".join(oSpellChecker.suggest(sWord)))
          226  +                        #echo(" | ".join(oSpellChecker.suggest2(sWord)))
   227    227               elif sText.startswith(">"):
   228         -                oDict.drawPath(sText[1:].strip())
          228  +                oSpellChecker.drawPath(sText[1:].strip())
   229    229               elif sText.startswith("="):
   230         -                for sRes in oDict.select(sText[1:].strip()):
          230  +                for sRes in oSpellChecker.select(sText[1:].strip()):
   231    231                       echo(sRes)
   232    232               elif sText.startswith("/+ "):
   233    233                   gce.setOptions({ opt:True  for opt in sText[3:].strip().split()  if opt in gce.getOptions() })
   234    234                   echo("done")
   235    235               elif sText.startswith("/- "):
   236    236                   gce.setOptions({ opt:False  for opt in sText[3:].strip().split()  if opt in gce.getOptions() })
   237    237                   echo("done")
................................................................................
   262    262               elif sText.startswith("/rl"):
   263    263                   # reload (todo)
   264    264                   pass
   265    265               else:
   266    266                   for sParagraph in txt.getParagraph(sText):
   267    267                       if xArgs.textformatter:
   268    268                           sText = oTF.formatText(sText)
   269         -                    sRes = generateText(sText, oTokenizer, oDict, bDebug=xArgs.debug, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width)
          269  +                    sRes = generateText(sText, oTokenizer, oSpellChecker, bDebug=xArgs.debug, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width)
   270    270                       if sRes:
   271    271                           echo("\n" + sRes)
   272    272                       else:
   273    273                           echo("\nNo error found.")
   274    274               sText = _getText(sInputText)
   275    275   
   276    276   
   277    277   if __name__ == '__main__':
   278    278       main()

Modified grammalecte-server.py from [6dbdf10c60] to [3253a2b2ff].

   125    125   def genUserId ():
   126    126       i = 0
   127    127       while True:
   128    128           yield str(i)
   129    129           i += 1
   130    130   
   131    131   
   132         -def parseParagraph (iParagraph, sText, oTokenizer, oDict, dOptions, bDebug=False, bEmptyIfNoErrors=False):
          132  +def parseParagraph (iParagraph, sText, oTokenizer, oSpellChecker, dOptions, bDebug=False, bEmptyIfNoErrors=False):
   133    133       aGrammErrs = gce.parse(sText, "FR", bDebug, dOptions)
   134    134       aGrammErrs = list(aGrammErrs)
   135    135       aSpellErrs = []
   136    136       for dToken in oTokenizer.genTokens(sText):
   137         -        if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']):
          137  +        if dToken['sType'] == "WORD" and not oSpellChecker.isValidToken(dToken['sValue']):
   138    138               aSpellErrs.append(dToken)
   139    139       if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
   140    140           return ""
   141    141       return "  " + json.dumps({ "iParagraph": iParagraph, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
   142    142       
   143    143   
   144    144   if __name__ == '__main__':
................................................................................
   147    147       echo("Grammalecte v{}".format(gce.version))
   148    148       dServerOptions = getServerOptions()
   149    149       dGCOptions = getConfigOptions("fr")
   150    150       if dGCOptions:
   151    151           gce.setOptions(dGCOptions)
   152    152       dServerGCOptions = gce.getOptions()
   153    153       echo("Grammar options:\n" + " | ".join([ k + ": " + str(v)  for k, v in sorted(dServerGCOptions.items()) ]))
   154         -    oDict = gce.getDictionary()
          154  +    oSpellChecker = gce.getSpellChecker()
   155    155       oTokenizer = tkz.Tokenizer("fr")
   156    156       oTF = tf.TextFormatter()
   157    157       dUser = {}
   158    158       userGenerator = genUserId()
   159    159   
   160    160       app = Bottle()
   161    161   
................................................................................
   195    195                   dOptions.update(json.loads(request.forms.options))
   196    196               except:
   197    197                   sError = "request options not used"
   198    198           sJSON = '{ "program": "grammalecte-fr", "version": "'+gce.version+'", "lang": "'+gce.lang+'", "error": "'+sError+'", "data" : [\n'
   199    199           for i, sText in enumerate(txt.getParagraph(request.forms.text), 1):
   200    200               if bTF:
   201    201                   sText = oTF.formatText(sText)
   202         -            sText = parseParagraph(i, sText, oTokenizer, oDict, dOptions, bEmptyIfNoErrors=True)
          202  +            sText = parseParagraph(i, sText, oTokenizer, oSpellChecker, dOptions, bEmptyIfNoErrors=True)
   203    203               if sText:
   204    204                   if bComma:
   205    205                       sJSON += ",\n"
   206    206                   sJSON += sText
   207    207                   bComma = True
   208    208           sJSON += "\n]}\n"
   209    209           return sJSON

Modified graphspell-js/tokenizer.js from [d6429837c4] to [c3f0ee8c90].

    84     84                   }
    85     85               }
    86     86               i += nCut;
    87     87               sText = sText.slice(nCut);
    88     88           }
    89     89       }
    90     90   
    91         -    getSpellingErrors (sText, oDict) {
           91  +    getSpellingErrors (sText, oSpellChecker) {
    92     92           let aSpellErr = [];
    93     93           for (let oToken of this.genTokens(sText)) {
    94         -            if (oToken.sType === 'WORD' && !oDict.isValidToken(oToken.sValue)) {
           94  +            if (oToken.sType === 'WORD' && !oSpellChecker.isValidToken(oToken.sValue)) {
    95     95                   aSpellErr.push(oToken);
    96     96               }
    97     97           }
    98     98           return aSpellErr;
    99     99       }
   100    100   }
   101    101   
   102    102   
   103    103   if (typeof(exports) !== 'undefined') {
   104    104       exports.Tokenizer = Tokenizer;
   105    105   }