Subversion Repositories navi

Rev

Rev 255 | Rev 258 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | RSS feed

#!/usr/bin/python
# -*- coding: utf-8 -*-
#    This file is part of Tsim Apiak.
#
#    Tsim Apiak is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public Licence as published by
#    the Free Software Foundation, either version 3 of the Licence, or
#    (at your option) any later version.
#
#    In addition to this, you must also comply with clause 4 of the
#    Apache Licence, version 2.0, concerning attribution. Where there
#    is a contradiction between the two licences, the GPL
#    takes preference.
#
#    Tsim Apiak is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with Tsim Apiak.  If not, see <http://www.gnu.org/licenses/>.

import dbconnector
import parsenum
import re

#wordlist = [{"id": 0, "navi": u"tawtute", "infix": u"tawtute", "type": u"n."}] + dbconnector.getnavilist() + [{"id": 0, "navi": u"na'vi", "infix": u"na'vi", "type": u"n."}] # XXX HACK - extra proper nouns
wordlist = dbconnector.getnavilist()


BROKENWORDS = ((u"sami", u"si", u"", u"am", u"", (()), (()), False), (u"to", u"to", u"", u"", u"", (()), (()), False), (u"frato", u"to", u"", u"", u"", [[u"fra"]], (()), False)) # XXX HACK - These are words that are either not in Eana Eltu, or that get interpreted wrongly for whatever reason. The latter should be removed from this list when the parser gets more sophisticated. The former should also have an entry in the equivalent array in the translator! If it can take infixes, consider adding it to the main wordlist above (see the examples). The order is - original, Na'vi root, 0-pos infix, 1-pos infix, 2-pos infix, PREFIXES, suffixes. Things that can take affixes should go in the above list instead.
INFIXES1 = (u"awn", u"eyk", u"us", u"äp", u"")
INFIXES2 = (u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u"")
INFIXES3 = (u"äng", u"ats", u"eiy", u"ei", u"uy", u"")
PREFIXES = (u"tsay", u"say", u"fay", u"fra", u"pxe", u"fne", u"tsa", u"sa", u"pe", u"fe", u"le", u"nì", u"sä", u"tì", u"sì", u"ay", u"me", u"fì", u"ke", u"he", u"px", u"a", u"m")
ADPOSITIONS = (u"mungwrr", u"kxamlä", u"pximaw", u"pxisre", u"tafkip", u"nemfa", u"takip", u"mìkam", u"teri", u"fkip", u"luke", u"pxel", u"pxaw", u"rofa", u"ìlä", u"fpi", u"ftu", u"kip", u"lok", u"maw", u"sre", u"sìn", u"vay", u"eo", u"fa", u"hu", u"io", u"ka", u"mì", u"na", u"ne", u"ro", u"ta", u"uo", u"wä", u"äo", u"to", u"sì")
POSTFIXES = ADPOSITIONS + (u"tsyìp", u"eyä", u"ìri", u"aru", u"ati", u"ayä", u"ari", u"ay", u"ìl", u"it", u"lo", u"ri", u"ru", u"ti", u"ur", u"ve", u"yä", u"ya", u"tu", u"vi", u"yu", u"an", u"ng", u"ke", u"al", u"at", u"ar", u"ey", u"e", u"o", u"l", u"t", u"y", u"a", u"ä", u"r")
#prefixesn = ur"(?P<npr>(?:(?:fì|tsa)?(?:me|pxe|ay|fra)?|(?:fay)?|(?:tsay)?)(?:fne)?(?:tì|sä)?"
#prefixesv = ur"(?P<vpr>(?:nì|sä|tì|rä'ä |ke )?)"

LENIT = ((u"px", u"p"), (u"tx", u"t"), (u"kx", u"k"), (u"ts", u"s"), (u"t", u"s"), (u"p", u"f"), (u"k", u"h"), (u"'", u""))

def parseword(wordin):
    tempid = 0
    temptype = u""
    for brokenword in BROKENWORDS: # XXX HACK - this is all code to work around bugs that shouldn't exist
        if wordin[0] == brokenword[0]:
            for word in wordlist:
                if brokenword[1] == word["navi"]:
                    tempid = word["id"]
                    temptype = word["type"]
            return {"word": {"id": tempid, "navi": brokenword[1], "infix": u"", "type": temptype}, "pref": brokenword[5], "post": brokenword[6], "len": brokenword[7], "inf": (brokenword[2], brokenword[3], brokenword[4]) }
    ret = {"word": {"id": 0, "navi": u"[" + wordin[0] + u"]", "infix": u"", "type": u""}}
    for word in wordlist:
        word["navi"] = word["navi"].lower()
        foundit = True
        foundprefs = []
        foundposts = []
        lenited = False
        splitword = word["infix"].split(u" ")
        foundins = [u"", u"", u""]
        if len(wordin) < len(splitword):
            foundit = False
            continue
        for wor in range(len(splitword)):
            if not foundit:
                break
            foundprefs.append([])
            foundposts.append([])
            center = u""
            if u"<1>" in splitword[wor]:
                tempin1 = []
                tempin2 = []
                tempin3 = []
                for in1 in INFIXES1:
                    if in1 in wordin[wor]:
                        tempin1.append(in1)
                for in2 in INFIXES2:
                    if in2 in wordin[wor]:
                        tempin2.append(in2)
                for in3 in INFIXES3:
                    if in3 in wordin[wor]:
                        tempin3.append(in3)
                for in1 in tempin1:
                    for in2 in tempin2:
                        for in3 in tempin3:
                            if splitword[wor].replace(u"<1><2>", in1 + in2).replace(u"<3>", in3).replace(u"lll", u"l").replace(u"rrr", u"r") in wordin[wor]:
                                center = splitword[wor].replace(u"<1><2>", in1 + in2).replace(u"<3>", in3).replace(u"lll", u"l").replace(u"rrr", u"r")
                                foundins = [in1, in2, in3]
                                break
                        if center != u"":
                            break
                    if center != u"":
                        break
            else:
                if splitword[wor] in wordin[wor]:
                    center = splitword[wor]
                if center == u"" and (wordin[wor] == u"paya" or splitword[wor] != u"pxay"): # XXX HACK - workaround to fix pay being lenited pxay. Maybe fixable without hardcoding?
                    for i in LENIT:
                        temp = u""
                        if splitword[wor].startswith(i[0]):
                            temp = i[1] + splitword[wor][len(i[0]):]
                            if temp in wordin[wor]:
                                lenited = True
                                center = temp
                if center == u"":
                    if splitword[wor].endswith(u"nga"):
                        temp = splitword[wor][:-3] + u"ng"
                        if temp in wordin[wor]:
                            center = temp
                    if splitword[wor].endswith(u"po"):
                        temp = splitword[wor][:-2] + u"p"
                        if temp in wordin[wor]:
                            center = temp
            if center == u"":
                foundit = False
                break
            temp = wordin[wor].split(center)
            if len(temp) != 2:
                foundit = False
                break
            pref, posf = temp
            last = u""
            while last != pref:
                last = pref
                for pre in PREFIXES:
                    if pref != u"":
                        if pref.endswith(pre):
                            if pre in foundprefs[wor]:
                                break
                            foundprefs[wor].append(pre)
                            pref = pref[:-len(pre)]
                            break
            if pref != u"":
                foundit = False
                foundprefs = []
                break
            last = u""
            while last != posf:
                last = posf
                for pos in POSTFIXES:
                    if posf != u"":
                        if posf.startswith(pos):
                            if pos in foundposts[wor]:
                                break
                            if pos != u"ä" or word["navi"] != u"pey": # XXX HACK - fix for peyä. THIS SHOULD NOT BE HERE!
                                foundposts[wor].append(pos)
                                posf = posf[len(pos):]
                                break
                            else:
                                break
            if posf != u"":
                foundit = False
                foundposts = []
                break
        if foundit == True:
            foundword = word
            break
    ret["pref"] = foundprefs
    ret["post"] = foundposts
    ret["inf"] = foundins
    ret["len"] = lenited
    if foundit == True:
        ret["word"] = foundword
    return ret

def parsesent(sent):
    sent = sent.strip().lower().replace(u"’", u"'")
    sent = re.sub(ur"[^\wìä' ]", u"", sent)
    sent = re.sub(ur"\ +", u" ", sent)
    sent = sent.split(u" ")
    ret = []
    left = len(sent)
    while left:
        word = parsenum.parse(sent[len(sent) - left])
        if word == None:
            word = parseword(sent[-left:])
        left -= len(word["word"]["navi"].split(" "))
        ret.append(word)
    return ret