Grammalecte  Check-in [a85f64f6f8]

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Overview
Comment:[graphspell][core][fr] code cleaning (pylint)
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk | fr | core | graphspell
Files: files | file ages | folders
SHA3-256:a85f64f6f87355970851429c257d916ea75897a4fbda83dc2c74b8e876e4d568
User & Date: olr 2019-05-10 20:52:12
Context
2019-05-11
07:17
[fx] gc panel: fix CSS nightmare check-in: a54db46fa5 user: olr tags: fx, trunk
2019-05-10
20:52
[graphspell][core][fr] code cleaning (pylint) check-in: a85f64f6f8 user: olr tags: core, fr, graphspell, trunk
19:44
[graphspell][core][fr] code cleaning (pylint) check-in: a186cf9261 user: olr tags: core, fr, graphspell, trunk
Changes

Changes to gc_core/py/grammar_checker.py.

    53     53           "returns a tuple: (grammar errors, spelling errors)"
    54     54           aGrammErrs = self.gce.parse(sText, "FR", bDebug=bDebug, dOptions=dOptions, bContext=bContext)
    55     55           aSpellErrs = self.oSpellChecker.parseParagraph(sText, bSpellSugg)
    56     56           return aGrammErrs, aSpellErrs
    57     57   
    58     58       def generateText (self, sText, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100, bDebug=False):
    59     59           "[todo]"
    60         -        pass
    61     60   
    62     61       def generateTextAsJSON (self, sText, bContext=False, bEmptyIfNoErrors=False, bSpellSugg=False, bReturnText=False, bDebug=False):
    63     62           "[todo]"
    64         -        pass
    65     63   
    66     64       def generateParagraph (self, sText, dOptions=None, bEmptyIfNoErrors=False, bSpellSugg=False, nWidth=100, bDebug=False):
    67     65           "parse text and return a readable text with underline errors"
    68     66           aGrammErrs, aSpellErrs = self.getParagraphErrors(sText, dOptions, False, bSpellSugg, bDebug)
    69     67           if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs:
    70     68               return ""
    71     69           return text.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)

Changes to gc_lang/fr/modules/conj.py.

   416    416               return "# erreur"
   417    417   
   418    418       def _getPronomSujet (self, sWho, bFem):
   419    419           try:
   420    420               if sWho == ":3s":
   421    421                   if self._sRawInfo[5] == "r":
   422    422                       return "on"
   423         -                elif bFem:
          423  +                if bFem:
   424    424                       return "elle"
   425    425               if sWho == ":3p" and bFem:
   426    426                   return "elles"
   427    427               return _dProSuj[sWho]
   428    428           except:
   429    429               traceback.print_exc()
   430    430               return "# erreur"

Changes to gc_lang/fr/modules/conj_data.py.

cannot compute difference between binary files

Changes to gc_lang/fr/modules/conj_generator.py.

    20     20   def getVerbGroupChar (sVerb):
    21     21       "returns the group number of <sVerb> guessing on its ending"
    22     22       sVerb = sVerb.lower()
    23     23       if sVerb.endswith("er"):
    24     24           return "1"
    25     25       if sVerb.endswith("ir"):
    26     26           return "2"
    27         -    if sVerb == "être" or sVerb == "avoir":
           27  +    if sVerb in ("être", "avoir"):
    28     28           return "0"
    29     29       if sVerb.endswith("re"):
    30     30           return "3"
    31     31       return "4"
    32     32   
    33     33   
    34     34   def getConjRules (sVerb, bVarPpas=True, nGroup=2):

Changes to gc_lang/fr/modules/lexicographe.py.

   196    196                   aMorph.append( "{} : {}".format(sWord, self.formatTags(lMorph[0])) )
   197    197               else:
   198    198                   aMorph.append( "{} :  inconnu du dictionnaire".format(sWord) )
   199    199               # suffixe d’un mot composé
   200    200               if m2:
   201    201                   aMorph.append( "-{} : {}".format(m2.group(2), self._formatSuffix(m2.group(2).lower())) )
   202    202               # Verbes
   203         -            aVerb = set([ s[1:s.find("/")]  for s in lMorph  if ":V" in s ])
          203  +            aVerb = { s[1:s.find("/")]  for s in lMorph  if ":V" in s }
   204    204               return (aMorph, aVerb)
   205    205           except:
   206    206               traceback.print_exc()
   207    207               return (["#erreur"], None)
   208    208   
   209    209       def formatTags (self, sTags):
   210    210           "returns string: readable tags"

Changes to gc_lang/fr/modules/locutions_data.py.

cannot compute difference between binary files

Changes to gc_lang/fr/modules/mfsp_data.py.

cannot compute difference between binary files

Changes to gc_lang/fr/modules/phonet_data.py.

cannot compute difference between binary files

Changes to gc_lang/fr/modules/tests.py.

   223    223           end = time.perf_counter()
   224    224           print('{} : {}'.format(label, end - start))
   225    225           if hDst:
   226    226               hDst.write("{:<12.6}".format(end-start))
   227    227   
   228    228   
   229    229   def perf (sVersion, hDst=None):
          230  +    "performance tests"
   230    231       print("\nPerformance tests")
   231    232       gce.load()
   232    233       aErrs = gce.parse("Texte sans importance… utile pour la compilation des règles avant le calcul des perfs.")
   233    234   
   234    235       spHere, spfThisFile = os.path.split(__file__)
   235    236       with open(os.path.join(spHere, "perf.txt"), "r", encoding="utf-8") as hSrc:
   236    237           if hDst:

Changes to gc_lang/fr/modules/textformatter.py.

     1         -#!python3
     2         -
     3      1   """
     4      2   Text formatter
     5      3   """
     6      4   
     7      5   import re
     8      6   
     9      7   
................................................................................
   243    241       ("ma_word", True),
   244    242       ("ma_1letter_lowercase", False),
   245    243       ("ma_1letter_uppercase", False),
   246    244   ]
   247    245   
   248    246   
   249    247   class TextFormatter:
          248  +    "Text Formatter: purge typographic mistakes from text"
   250    249   
   251    250       def __init__ (self):
   252    251           for sOpt, lTup in dReplTable.items():
   253    252               for i, t in enumerate(lTup):
   254    253                   lTup[i] = (re.compile(t[0]), t[1])
   255    254   
   256         -    def formatText (self, sText, **args):
          255  +    def formatText (self, sText):
   257    256           for sOptName, bVal in lOptRepl:
   258    257               if bVal:
   259    258                   for zRgx, sRep in dReplTable[sOptName]:
   260    259                       sText = zRgx.sub(sRep, sText)
   261    260           return sText

Changes to graphspell/char_player.py.

    39     39   
    40     40   
    41     41   _xTransNumbersToExponent = str.maketrans({
    42     42       "0": "⁰", "1": "¹", "2": "²", "3": "³", "4": "⁴", "5": "⁵", "6": "⁶", "7": "⁷", "8": "⁸", "9": "⁹"
    43     43   })
    44     44   
    45     45   def numbersToExponent (sWord):
           46  +    "convert numeral chars to exponant chars"
    46     47       return sWord.translate(_xTransNumbersToExponent)
    47     48   
    48     49   
    49     50   aVowel = set("aáàâäāeéèêëēiíìîïīoóòôöōuúùûüūyýỳŷÿȳœæAÁÀÂÄĀEÉÈÊËĒIÍÌÎÏĪOÓÒÔÖŌUÚÙÛÜŪYÝỲŶŸȲŒÆ")
    50     51   aConsonant = set("bcçdfghjklmnñpqrstvwxzBCÇDFGHJKLMNÑPQRSTVWXZ")
    51     52   aDouble = set("bcdfjklmnprstzBCDFJKLMNPRSTZ")  # letters that may be used twice successively
    52     53   

Changes to graphspell/dawg.py.

   189    189               nCommonPrefix += 1
   190    190   
   191    191           # Check the lUncheckedNodes for redundant nodes, proceeding from last
   192    192           # one down to the common prefix size. Then truncate the list at that point.
   193    193           self._minimize(nCommonPrefix)
   194    194   
   195    195           # add the suffix, starting from the correct node mid-way through the graph
   196         -        if len(self.lUncheckedNodes) == 0:
          196  +        if not self.lUncheckedNodes:
   197    197               oNode = self.oRoot
   198    198           else:
   199    199               oNode = self.lUncheckedNodes[-1][2]
   200    200   
   201    201           iChar = nCommonPrefix
   202    202           for c in aEntry[nCommonPrefix:]:
   203    203               oNextNode = DawgNode()

Changes to graphspell/spellchecker.py.

     1      1   """
     2      2   Spellchecker.
     3      3   Useful to check several dictionaries at once.
     4      4   
     5      5   To avoid iterating over a pile of dictionaries, it is assumed that 3 are enough:
     6      6   - the main dictionary, bundled with the package
     7         -- the extended dictionary
     8      7   - the community dictionary, added by an organization
     9      8   - the personal dictionary, created by the user for its own convenience
    10      9   """
    11     10   
    12     11   import importlib
    13     12   import traceback
    14     13   
................................................................................
   201    200           lMorph = self.oMainDic.getMorph(sWord)
   202    201           if self.bCommunityDic:
   203    202               lMorph.extend(self.oCommunityDic.getMorph(sWord))
   204    203           if self.bPersonalDic:
   205    204               lMorph.extend(self.oPersonalDic.getMorph(sWord))
   206    205           if self.bStorage:
   207    206               self._dMorphologies[sWord] = lMorph
   208         -            self._dLemmas[sWord] = set([ s[1:s.find("/")]  for s in lMorph ])
          207  +            self._dLemmas[sWord] = { s[1:s.find("/")]  for s in lMorph }
   209    208           return lMorph
   210    209   
   211    210       def getLemma (self, sWord):
   212    211           "retrieves lemmas"
   213    212           if self.bStorage:
   214    213               if sWord not in self._dLemmas:
   215    214                   self.getMorph(sWord)
   216    215               return self._dLemmas[sWord]
   217         -        return set([ s[1:s.find("/")]  for s in self.getMorph(sWord) ])
          216  +        return { s[1:s.find("/")]  for s in self.getMorph(sWord) }
   218    217   
   219    218       def suggest (self, sWord, nSuggLimit=10):
   220    219           "generator: returns 1, 2 or 3 lists of suggestions"
   221    220           if self.dDefaultSugg:
   222    221               if sWord in self.dDefaultSugg:
   223    222                   yield self.dDefaultSugg[sWord].split("|")
   224    223               elif sWord.istitle() and sWord.lower() in self.dDefaultSugg:

Changes to pylintrc.

   465    465   
   466    466   # A regular expression matching the name of dummy variables (i.e. expectedly
   467    467   # not used).
   468    468   dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)
   469    469   
   470    470   # Argument names that match this expression will be ignored. Default to name
   471    471   # with leading underscore
   472         -ignored-argument-names=_.*|^sSentence|^dTags|^bCondMemo|^sCountry|^nLastToken|^sx?$|^m$|^dTokenPos
          472  +ignored-argument-names=_.*|^sSentence|^dTags|^bCondMemo|^sCountry|^nLastToken|^sx?$|^m$|^dTokenPos|^nTokenOffset|^lToken
   473    473   
   474    474   # Tells whether we should check for unused import in __init__ files.
   475    475   init-import=no
   476    476   
   477    477   # List of qualified module names which can have objects that can redefine
   478    478   # builtins.
   479    479   redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins