Subversion Repositories navi

Compare Revisions

Ignore whitespace Rev 176 → Rev 301

/tsimapiak/parse.py
12,7 → 12,7
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Foobar is distributed in the hope that it will be useful,
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
20,35 → 20,135
# You should have received a copy of the GNU General Public License
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>.
 
import tsimapiak.dbconnector as dbconnector
import tsimapiak.parsenum as parsenum
import itertools
import re
import dbconnector
import parsenum
 
#wordlist = [{"id": 0, "navi": u"tawtute", "infix": u"tawtute", "type": u"n."}] + dbconnector.getnavilist() + [{"id": 0, "navi": u"na'vi", "infix": u"na'vi", "type": u"n."}] # XXX HACK - extra proper nouns
wordlist = dbconnector.getnavilist()
 
infixes1 = (u"awn", u"eyk", u"us", u"äp", u"")
infixes2 = (u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u"")
infixes3 = (u"äng", u"ats", u"eiy", u"ei", u"uy", u"")
prefixes = (u"tsay", u"say", u"fay", u"fra", u"pxe", u"fne", u"tsa", u"sa", u"pe", u"fe", u"le", u"nì", u"sä", u"tì", u"sì", u"ay", u"me", u"fì", u"ke", u"he", u"a")
adpositions = (u"mungwrr", u"kxamlä", u"pximaw", u"pxisre", u"tafkip", u"nemfa", u"takip", u"mìkam", u"teri", u"fkip", u"luke", u"pxel", u"pxaw", u"rofa", u"ìlä", u"fpi", u"ftu", u"kip", u"lok", u"maw", u"sre", u"sìn", u"vay", u"eo", u"fa", u"hu", u"io", u"ka", u"mì", u"na", u"ne", u"ro", u"ta", u"uo", u"wä", u"äo", u"to", u"sì")
postfixes = adpositions + (u"tsyìp", u"eyä", u"ìri", u"ìl", u"it", u"lo", u"ri", u"ru", u"ti", u"ur", u"ve", u"yä", u"ya", u"tu", u"vi", u"yu", u"an", u"ng", u"ke", u"e", u"o", u"l", u"t", u"y", u"a", u"ä", u"r")
prefixes, infixes, postfixes = dbconnector.getaffixlists()
 
# XXX HACK - These are words that are either not in Eana Eltu, or that get interpreted wrongly for whatever reason. The latter should be removed from this list when the parser gets more sophisticated. The former should also have an entry in the equivalent array in the translator! If it can take infixes, consider adding it to the main wordlist above (see the examples). The order is - original, Na'vi root, 0-pos infix, 1-pos infix, 2-pos infix, prefixes, suffixes. Things that can take affixes should go in the above list instead.
BROKENWORDS = (
(u"sami", u"si", u"", u"am", u"", (()), (()), False, "si"), # otherwise parses as sa (tsa-lenited) + mi
#(u"to", u"to", u"", u"", u"", (()), (()), False),
#(u"frato", u"to", u"", u"", u"", [[u"fra"]], (()), False),
(u"soaiä", u"soaia", u"", u"", u"", (()), [[(u"ä", None)]], False, "soaia"), # does not parse, irregular form
#(u"mengenga", u"ngenga", u"", u"", u"", [[u"me"]], (()), False),
#(u"pxengenga", u"ngenga", u"", u"", u"", [[u"pxe"]], (()), False),
(u"kìmä", u"kä", u"", u"ìm", u"", (()), (()), False, "kä"), # otherwise parses as kìm (spin) + ä (genitive)
(u"apxay", u"pxay", u"", u"", u"", [[(u"a", "a")]], (()), False, "pxay"), # otherwise parses as apxa + -y (genitive)
#(u"akawng", u"kawng", u"", u"", u"", [[u"a"]], (()), False),
#(u"kawnga", u"kawng", u"", u"", u"", (()), [[(u"a", None)]], False),
#(u"kawng", u"kawng", u"", u"", u"", (()), (()), False),
#(u"ka", u"ka", u"", u"", u"", (()), (()), False),
#(u"uo", u"uo", u"", u"", u"", (()), (()), False),
#(u"sìk", u"sìk", u"", u"", u"", (()), (()), False),
#(u"sim", u"sim", u"", u"", u"", (()), (()), False), # probably not tsim lenited
(u"tse", u"tse", u"", u"", u"", (()), (()), False, "tse"), # otherwise parses as tsa'u abbreviated (special case)
(u"por", u"po", u"", u"", u"", (()), [[("r", None)]], False, "po"), # otherwise parses as lenited pxor which is unlikely
)
 
BANNEDNUMBERS = { # words which must not be parsed by the number parser
"pey" # more likely dictionary word pey than lenited pxey 3
}
 
#INFIXES1 = (u"awn", u"eyk", u"us", u"äp", u"")
#INFIXES2 = (u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u"")
#INFIXES3 = (u"äng", u"ats", u"eiy", u"ei", u"uy", u"")
#PREFIXES = (u"tsay", u"fray", u"say", u"fay", u"fra", u"pxe", u"fne", u"tsa", u"kel", u"lek", u"sa", u"pe", u"fe", u"le", u"nì", u"sä", u"tì", u"sì", u"ay", u"me", u"fì", u"ke", u"he", u"px", u"a", u"m", u"k")
#ADPOSITIONS = (u"mungwrr", u"kxamlä", u"pximaw", u"pxisre", u"tafkip", u"nemfa", u"takip", u"mìkam", u"teri", u"fkip", u"luke", u"pxel", u"pxaw", u"rofa", u"ìlä", u"fpi", u"ftu", u"kip", u"lok", u"maw", u"sre", u"sìn", u"vay", u"eo", u"fa", u"hu", u"io", u"ka", u"mì", u"na", u"ne", u"ro", u"ta", u"uo", u"wä", u"äo", u"to", u"sì")
#POSTFIXES = ADPOSITIONS + (u"tsyìp", u"eyä", u"ìri", u"aru", u"ati", u"ayä", u"ari", u"ay", u"ìl", u"it", u"lo", u"ri", u"ru", u"ti", u"ur", u"ve", u"yä", u"ya", u"tu", u"vi", u"yu", u"an", u"ng", u"ke", u"al", u"at", u"ar", u"ey", u"e", u"o", u"l", u"t", u"y", u"a", u"ä", u"r")
#prefixesn = ur"(?P<npr>(?:(?:fì|tsa)?(?:me|pxe|ay|fra)?|(?:fay)?|(?:tsay)?)(?:fne)?(?:tì|sä)?"
#prefixesv = ur"(?P<vpr>(?:nì|sä|tì|rä'ä |ke )?)"
 
lenit = ((u"px", u"p"), (u"tx", u"t"), (u"kx", u"k"), (u"ts", u"s"), (u"t", u"s"), (u"p", u"f"), (u"k", u"h"), (u"'", u""))
EXTRAINFIXES = [
{"id": "-1", "navi": "eiy", "orig_navi": "ei", "gloss": "LAUD.", "position": 2},
{"id": "-2", "navi": "eng", "orig_navi": "äng", "gloss": "PEJ.", "position": 2},
]
 
EXTRAPOSTFIXES = [
{"id": "-3", "navi": "eyä", "orig_navi": "yä", "gloss": "GEN."},
{"id": "-4", "navi": "pxì", "orig_navi": "pxì", "gloss": "FRAC."},
]
 
EXTRAADP = (("to", [x["id"] for x in wordlist if x["navi"] == "to"][0]), ("sì", [x["id"] for x in wordlist if x["navi"] == "sì"][0])) # words that act like adpositions but technically aren't
 
LENIT = ((u"px", u"p"), (u"tx", u"t"), (u"kx", u"k"), (u"ts", u"s"), (u"t", u"s"), (u"p", u"f"), (u"k", u"h"), (u"'", u""))
 
# Let's lenit the prefixes
extraprefixes = []
for prefix in prefixes:
for letter, replacement in LENIT:
if prefix["navi"].startswith(letter):
new_prefix = prefix["navi"].replace(letter, replacement, 1)
if not [x for x in prefixes if x["navi"] == new_prefix]: # always assume a dictionary word over a lenited prefix
extraprefixes.append({"id": prefix["id"], "navi": new_prefix, "gloss": prefix["gloss"] + ".LENTD", "orig_navi": prefix["navi"]})
break
 
prefixes = sorted(prefixes + extraprefixes, key=lambda x: len(x["navi"]), reverse=True)
infixes = sorted(infixes + EXTRAINFIXES, key=lambda x: len(x["navi"]), reverse=True)
postfixes = sorted(postfixes + EXTRAPOSTFIXES, key=lambda x: len(x["navi"]), reverse=True)
 
# Let's lenit the dictionary
extrawords = []
for word in wordlist:
splitword = word["navi"].split(" ")
splitinfix = word["infix"].split(" ")
lenitword = {}
lenitinfix = {}
for i, wor in enumerate(splitword):
for letter, replacement in LENIT:
if wor.startswith(letter):
lenitword[i] = wor.replace(letter, replacement, 1)
lenitinfix[i] = splitinfix[i].replace(letter, replacement, 1)
break
 
s = list(lenitword.keys())
for lenits in itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(1, len(s)+1)):
new_word = ""
new_infix = ""
for i, wor in enumerate(splitword):
if i in lenits:
new_word += lenitword[i]
new_infix += lenitinfix[i]
else:
new_word += wor
new_infix += splitinfix[i]
new_word += " "
new_infix += " "
print(f"Generated lenited {new_word} from {word['navi']}")
new_word = new_word[:-1]
new_infix = new_infix[:-1]
extrawords.append({"id": word["id"], "navi": new_word, "infix": new_infix, "type": word["type"], "lenited": True, "orig_navi": word["navi"]})
 
wordlist = sorted(wordlist + extrawords, key=lambda x: len(x["navi"]) * 2 + (0 if x["lenited"] else 1), reverse=True)
 
# TODO add reef Na'vi
 
def parseword(wordin):
ret = {"word": {"id": 0, "navi": u"[" + wordin[0] + u"]", "infix": u"", "type": u""}}
tempid = 0
temptype = u""
for brokenword in BROKENWORDS: # XXX HACK - this is all code to work around bugs that shouldn't exist
if wordin[0] == brokenword[0]:
for word in wordlist:
if brokenword[1] == word["navi"]:
tempid = word["id"]
temptype = word["type"]
return {"word": {"id": tempid, "navi": brokenword[1], "infix": u"", "type": temptype, "orig_navi": brokenword[8]}, "pref": brokenword[5], "post": brokenword[6], "len": brokenword[7], "inf": (brokenword[2], brokenword[3], brokenword[4]) }
ret = {"word": {"id": 0, "navi": u"[" + wordin[0] + u"]", "infix": u"", "type": u"", "orig_navi": "[" + wordin[0] + "]"}, "len": False, "pref": [], "post": [], "inf": ["", "", ""]}
for word in wordlist:
word["navi"] = word["navi"].lower()
foundit = True
foundprefs = []
foundposts = []
lenited = False
splitword = word["infix"].split(u" ")
foundins = [u"", u"", u""]
if len(wordin) < len(splitword):
foundit = False
next
continue
for wor in range(len(splitword)):
if not foundit:
break
55,38 → 155,70
foundprefs.append([])
foundposts.append([])
center = u""
pre = []
post = []
if u"<1>" in splitword[wor]:
for in1 in infixes1:
for in2 in infixes2:
for in3 in infixes3:
if splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3) in wordin[wor]:
center = splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3)
if u"<0>" in splitword[wor]:
tempin1 = []
tempin2 = []
tempin3 = []
for in1 in [x["navi"] for x in infixes if x["position"] == 0] + [""]:
if in1 in wordin[wor]:
tempin1.append(in1)
for in2 in [x["navi"] for x in infixes if x["position"] == 1] + [""]:
if in2 in wordin[wor]:
tempin2.append(in2)
for in3 in [x["navi"] for x in infixes if x["position"] == 2] + [""]:
if in3 in wordin[wor]:
tempin3.append(in3)
for in1 in tempin1:
for in2 in tempin2:
for in3 in tempin3:
if splitword[wor].replace(u"<0><1>", in1 + in2).replace(u"<2>", in3).replace(u"lll", u"l").replace(u"rrr", u"r") in wordin[wor]:
center = splitword[wor].replace(u"<0><1>", in1 + in2).replace(u"<2>", in3).replace(u"lll", u"l").replace(u"rrr", u"r")
foundins = [in1, in2, in3]
break
if center != u"": break
if center != u"": break
if center != u"":
break
if center != u"":
break
else:
if splitword[wor] in wordin[wor]:
center = splitword[wor]
if center == u"":
for i in lenit:
temp = u""
if splitword[wor].startswith(i[0]):
temp = i[1] + splitword[wor][len(i[0]):]
if temp in wordin[wor]:
lenited = True
center = temp
if center == u"":
if splitword[wor].endswith(u"nga"):
temp = splitword[wor][:-3] + u"ng"
temp = splitword[wor][:-3] + u"nge"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"fo"):
temp = splitword[wor][:-2] + u"fe"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"po"):
temp = splitword[wor][:-2] + u"p"
temp = splitword[wor][:-2] + u"pe"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"tsa"):
temp = splitword[wor][:-3] + u"tse"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"fko"):
temp = splitword[wor][:-3] + u"fke"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"sa'u"):
temp = splitword[wor][:-4] + u"se"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"sa"):
temp = splitword[wor][:-2] + u"se"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"sno"):
temp = splitword[wor][:-3] + u"sne"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"ayla"):
temp = splitword[wor][:-3] + u"ayle"
if temp in wordin[wor]:
center = temp
if center == u"":
foundit = False
break
100,49 → 232,57
last = pref
for pre in prefixes:
if pref != u"":
if pref.endswith(pre):
if pre in foundprefs[wor]:
if pref.endswith(pre["navi"]):
if pre["navi"] in foundprefs[wor]:
break
foundprefs[wor].append(pre)
pref = pref[:-len(pre)]
foundprefs[wor].append((pre["navi"], pre["orig_navi"])) # only needed here, to handle lenition
pref = pref[:-len(pre["navi"])]
break
if pref != u"":
foundit = False
foundprefs = []
break
last = u""
while last != posf:
last = posf
for pos in postfixes:
for pos, posid in sorted([(x["navi"], None) for x in postfixes] + [(x["navi"], x["id"]) for x in wordlist if x["type"] == "adp."] + list(EXTRAADP), key=lambda x: len(x[0]), reverse=True):
if posf != u"":
if posf.startswith(pos):
if pos in foundposts[wor]:
if (pos, posid) in foundposts[wor]:
break
foundposts[wor].append(pos)
posf = posf[len(pos):]
break
if pos != u"ä" or word["orig_navi"] != u"pey": # XXX HACK - fix for peyä. THIS SHOULD NOT BE HERE!
foundposts[wor].append((pos, posid))
posf = posf[len(pos):]
break
else:
break
if posf != u"":
foundit = False
foundposts = []
break
if foundit == True:
foundword = word
break
ret["pref"] = foundprefs
ret["post"] = foundposts
ret["inf"] = foundins
ret["len"] = lenited
if foundit == True:
ret["pref"] = foundprefs
ret["post"] = foundposts
ret["inf"] = foundins
ret["len"] = word["lenited"]
ret["word"] = foundword
return ret
 
def parsesent(sent):
sent = sent.strip().lower().replace(u"’", u"'")
sent = re.sub(ur"[^\wìä' ]",u"",sent)
sent = re.sub(ur"\ +",u" ",sent)
sent = sent.replace("ù", "u") # Basic support for reef Na'vi
sent = re.sub(r"[^\wìä' ]", u"", sent)
sent = re.sub(r"\ +", u" ", sent)
sent = sent.split(u" ")
ret = []
left = len(sent)
while left:
word = parsenum.parse(sent[len(sent)-left])
word = None
if sent[len(sent) - left] not in BANNEDNUMBERS:
word = parsenum.parse(sent[len(sent) - left])
if word == None:
word = parseword(sent[-left:])
left -= len(word["word"]["navi"].split(" "))
/tsimapiak/parsenum.py
12,7 → 12,7
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Foobar is distributed in the hope that it will be useful,
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
20,7 → 20,9
# You should have received a copy of the GNU General Public License
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>.
 
num = [u"kew",
from __future__ import print_function
 
NUM = [u"kew",
u"'aw",
u"mune",
u"pxey",
29,7 → 31,16
u"pukap",
u"kinä"]
 
numord = [u"kew",
NUMLEN = [u"hew",
u"aw",
u"mune",
u"pey",
u"sìng",
u"mrr",
u"fukap",
u"hinä"]
 
NUMORD = [u"kew",
u"'aw",
u"mu",
u"pxey",
38,7 → 49,16
u"pu",
u"ki"]
 
rem = [u"aw",
NUMORDLEN = [u"hew",
u"aw",
u"mu",
u"pey",
u"sì",
u"mrr",
u"fu",
u"hi"]
 
REM = [u"aw",
u"mun",
u"pey",
u"sìng",
46,7 → 66,7
u"fu",
u"hin"]
 
remord = [u"aw",
REMORD = [u"aw",
u"mu",
u"pey",
u"sì",
54,7 → 74,7
u"fu",
u"hi"]
 
base = [u"",
BASE = [u"",
u"me",
u"pxe",
u"tsì",
62,82 → 82,209
u"pu",
u"ki"]
 
BASELEN = [u"",
u"me",
u"pe",
u"sì",
u"mrr",
u"fu",
u"hi"]
 
def parse(numin):
if u"mm" in numin:
return None
if (numin[0] == u"a") and (numin[len(numin)-1] == u"a"):
if (numin == u"") or len(numin) == 1 or ((numin[0] == u"a" and numin[1] != "w") and (numin[len(numin) - 1] == u"a")):
return None
prefs = []
posts = []
outoct = 0
outdec = 0
ret = {"word": {"id": 0, "navi": u"", "infix": u"", "type": u""}, "pref": [prefs], "post": [posts], "inf": [u"", u"", u""], "len": False, "dec": outdec, "oct": outoct}
if numin[0] == u"a":
prefs.append(u"a")
frac = False
ret = {"word": {"id": 0, "navi": u"", "orig_navi": "", "infix": u"", "type": u""}, "pref": [prefs], "post": [posts], "inf": [u"", u"", u""], "len": False, "dec": outdec, "oct": outoct}
if numin[0] == u"a" and len(numin) > 1 and numin[1] != "w":
prefs.append((u"a", "a"))
numin = numin[1:]
if numin[len(numin)-1] == u"a":
posts.append(u"a")
elif numin[0:2] == "nì":
prefs.append(("nì", "nì"))
numin = numin[2:]
if numin[len(numin) - 1] == u"a":
posts.append((u"a", None))
numin = numin[:-1]
if numin[-2:] == u"ve":
posts.append(u"ve")
posts.append((u"ve", None))
numin = numin[:-2]
#base numbers
for n in range(len(num)):
if u"ve" in posts:
if numin == numord[n]:
if numin[-3:] == u"pxì":
posts.append((u"pxì", None))
numin = numin[:-3]
 
# Special fractions
if numin in ("mawl", "pan", "fan"):
outoct = 2 if numin == "mawl" else 3
outdec = 2 if numin == "mawl" else 3
ret["word"]["navi"] = "1/" + str(outdec)
ret["word"]["orig_navi"] = "mawl" if numin == "mawl" else "pan"
ret["dec"] = outdec
ret["oct"] = outoct
ret["len"] = True if numin == "fan" else False
return ret
 
#BASE numbers
for n in range(len(NUM)):
if (u"ve", None) in posts:
if numin == NUMORD[n]:
outoct = n
outdec = n
ret["word"]["navi"] = unicode(outdec) + u"."
ret["word"]["navi"] = str(outdec) + u"."
ret["word"]["orig_navi"] = NUMORD[n]
ret["dec"] = outdec
ret["oct"] = outoct
return ret
if numin == NUMORDLEN[n]:
outoct = n
outdec = n
ret["word"]["navi"] = str(outdec) + u"."
ret["word"]["orig_navi"] = NUMORD[n]
ret["dec"] = outdec
ret["oct"] = outoct
ret["len"] = True
return ret
elif ("pxì", None) in posts and n > 3:
if numin == NUMORD[n]:
outoct = n
outdec = n
ret["word"]["navi"] = "1/" + str(outdec)
ret["word"]["orig_navi"] = NUMORD[n]
ret["dec"] = outdec
ret["oct"] = outoct
return ret
if numin == NUMORDLEN[n]:
outoct = n
outdec = n
ret["word"]["navi"] = "1/" + str(outdec)
ret["word"]["orig_navi"] = NUMORD[n]
ret["dec"] = outdec
ret["oct"] = outoct
ret["len"] = True
return ret
else:
if numin == num[n]:
if numin == NUM[n]:
outoct = n
outdec = n
ret["word"]["navi"] = unicode(outdec)
ret["word"]["navi"] = str(outdec)
ret["word"]["orig_navi"] = NUM[n]
ret["dec"] = outdec
ret["oct"] = outoct
return ret
if numin == NUMLEN[n]:
outoct = n
outdec = n
ret["word"]["navi"] = str(outdec)
ret["word"]["orig_navi"] = NUM[n]
ret["dec"] = outdec
ret["oct"] = outoct
ret["len"] = True
return ret
#other numbers
for n in range(len(base)):
if numin.startswith(base[n] + u"zazam"):
outoct += (n+1) * (10**4)
outdec += (n+1) * (8**4)
numin = numin[len(base[n]) + 5:]
for n in range(len(base)):
if numin.startswith(base[n] + u"vozam"):
outoct += (n+1) * (10**3)
outdec += (n+1) * (8**3)
numin = numin[len(base[n]) + 5:]
for n in range(len(base)):
if numin.startswith(base[n] + u"zam"):
outoct += (n+1) * (10**2)
outdec += (n+1) * (8**2)
numin = numin[len(base[n]) + 3:]
for n in range(len(base)):
if numin.startswith(base[n] + u"vol"):
outoct += (n+1) * 10
outdec += (n+1) * 8
numin = numin[len(base[n]) + 3:]
if numin.startswith(base[n] + u"vo"):
outoct += (n+1) * 10
outdec += (n+1) * 8
numin = numin[len(base[n]) + 2:]
for n in range(len(rem)):
if u"ve" in posts:
if numin == remord[n]:
outoct += n + 1
outdec += n + 1
numin = u""
else:
if numin == rem[n]:
outoct += n + 1
outdec += n + 1
numin = u""
notbase = False
orig_navi = ""
for n in range(len(BASE)):
if numin.startswith(BASE[n] + u"vozaza") or (not notbase and numin.startswith(BASELEN[n] + "vozaza")):
base = BASE[n]
if not numin.startswith(BASE[n]):
base = BASELEN[n]
ret["len"] = True
outoct += (n + 1) * (10 ** 5)
outdec += (n + 1) * (8 ** 5)
if numin[len(base) + 6:].startswith(u"mrr") or numin[len(base) + 6:].startswith(u"me") or numin[len(base) + 6:].startswith("mu") or not numin[len(base) + 6:].startswith("m"):
orig_navi += BASE[n] + "vozaza"
numin = numin[len(base) + 6:]
else:
orig_navi += BASE[n] + "vozazam"
numin = numin[len(base) + 7:]
notbase = True
for n in range(len(BASE)):
if numin.startswith(BASE[n] + u"zaza") or (not notbase and numin.startswith(BASELEN[n] + "zaza")):
base = BASE[n]
if not numin.startswith(BASE[n]):
base = BASELEN[n]
ret["len"] = True
outoct += (n + 1) * (10 ** 4)
outdec += (n + 1) * (8 ** 4)
if numin[len(base) + 4:].startswith(u"mrr") or numin[len(base) + 4:].startswith(u"me") or numin[len(base) + 4:].startswith("mu") or not numin[len(base) + 4:].startswith("m"):
orig_navi += BASE[n] + "zaza"
numin = numin[len(base) + 4:]
else:
orig_navi += BASE[n] + "zazam"
numin = numin[len(base) + 5:]
notbase = True
for n in range(len(BASE)):
if numin.startswith(BASE[n] + u"voza") or (not notbase and numin.startswith(BASELEN[n] + "voza")):
base = BASE[n]
if not numin.startswith(BASE[n]):
base = BASELEN[n]
ret["len"] = True
outoct += (n + 1) * (10 ** 3)
outdec += (n + 1) * (8 ** 3)
if numin[len(base) + 4:].startswith(u"mrr") or numin[len(base) + 4:].startswith(u"me") or numin[len(base) + 4:].startswith("mu") or not numin[len(base) + 4:].startswith("m"):
orig_navi += BASE[n] + "voza"
numin = numin[len(base) + 4:]
else:
orig_navi += BASE[n] + "vozam"
numin = numin[len(base) + 5:]
notbase = True
for n in range(len(BASE)):
if numin.startswith(BASE[n] + u"za") or (not notbase and numin.startswith(BASELEN[n] + "za")):
base = BASE[n]
if not numin.startswith(BASE[n]):
base = BASELEN[n]
ret["len"] = True
outoct += (n + 1) * (10 ** 2)
outdec += (n + 1) * (8 ** 2)
if numin[len(base) + 2:].startswith(u"mrr") or numin[len(base) + 2:].startswith(u"me") or numin[len(base) + 2:].startswith("mu") or not numin[len(base) + 2:].startswith("m"):
orig_navi += BASE[n] + "za"
numin = numin[len(base) + 2:]
else:
orig_navi += BASE[n] + "zam"
numin = numin[len(base) + 3:]
notbase = True
for n in range(len(BASE)):
if numin.startswith(BASE[n] + u"vol") or (not notbase and numin.startswith(BASELEN[n] + "vol")):
base = BASE[n]
if not numin.startswith(BASE[n]):
base = BASELEN[n]
ret["len"] = True
outoct += (n + 1) * 10
outdec += (n + 1) * 8
numin = numin[len(base) + 3:]
notbase = True
orig_navi += BASE[n] + "vol"
if numin.startswith(BASE[n] + u"vo") or (not notbase and numin.startswith(BASELEN[n] + "vo")):
base = BASE[n]
if not numin.startswith(BASE[n]):
base = BASELEN[n]
ret["len"] = True
outoct += (n + 1) * 10
outdec += (n + 1) * 8
numin = numin[len(base) + 2:]
notbase = True
orig_navi += BASE[n] + "vo"
if notbase:
for n in range(len(REM)):
if (u"ve", None) in posts or ("pxì", None) in posts:
if numin == REMORD[n]:
orig_navi += numin
outoct += n + 1
outdec += n + 1
numin = u""
else:
if numin == REM[n]:
orig_navi += numin
outoct += n + 1
outdec += n + 1
numin = u""
if numin == u"":
ret["word"]["navi"] = unicode(outdec) if not u"ve" in posts else unicode(outdec) + u"."
ret["word"]["navi"] = ("" if not ("pxì", None) in posts else "1/") + str(outdec) + ("" if not (u"ve", None) in posts else ".")
ret["word"]["orig_navi"] = orig_navi
ret["dec"] = outdec
ret["oct"] = outoct
return ret
145,4 → 292,4
return None
 
if __name__ == "__main__":
print parse(u"mevolawve")
print(parse(u"mevolawve"))
/tsimapiak/translate.py
12,7 → 12,7
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Foobar is distributed in the hope that it will be useful,
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
20,45 → 20,55
# You should have received a copy of the GNU General Public License
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>.
 
import parse
import dbconnector
import tsimapiak.dbconnector as dbconnector
import tsimapiak.parse as parse
 
infixes1 = ((u"awn", u"P.PART"), (u"eyk", u"CAUS"), (u"us", u"A.PART"), (u"äp", u"REFL."))
infixes2 = ((u"ìyev", u"FUT.SUBJ"), (u"iyev", u"FUT.SUBJ"), (u"ìmìy", u"REC.PAST.REC.FUT"), (u"arm", u"IMPF.PAST"), (u"asy", u"FUT.D"), (u"ilv", u"PRES.PER.SUBJ"), (u"ìmv", u"REC.PAST.SUBJ"), (u"imv", u"PAST.SUBJ"), (u"ìrm", u"IMPF.REC.PAST"), (u"irv", u"PRES.IMPF.SUBJ"), (u"ìsy", u"IMM.FUT.D"), (u"aly", u"PERF.FUT"), (u"ary", u"IMPF.FUT"), (u"ìly", u"PERF.IMM.FUT"), (u"ìry", u"IMPF.IMM.FUT"), (u"ìlm", u"PERF.REC.PAST"), (u"alm", u"PERF.PAST"), (u"am", u"PAST."), (u"ay", u"FUT."), (u"er", u"IMPF."), (u"ìm", u"REC.PAST"), (u"iv", u"SUBJ."), (u"ìy", u"IMM.FUT"), (u"ol", u"PERF."))
infixes3 = ((u"äng", u"PEJ."), (u"ats", u"INFR."), (u"eiy", u"LAUD."), (u"ei", u"LAUD."), (u"uy", u"HON."))
prefixes = ((u"tsay", u"those"), (u"say", u"those-LENTD"), (u"fay", u"these"), (u"fra", u"every"), (u"pxe", u"TRI."), (u"fne", u"type"), (u"tsa", u"that"), (u"sa", u"that-LENTD"), (u"pe", u"what"), (u"fe", u"what-LENTD"), (u"le", u"ADJD."), (u"nì", u"ADVD."), (u"sä", u"INSTD."), (u"tì", u"NOUND."), (u"sì", u"NOUND.-LENTD"), (u"ay", u"PL."), (u"me", u"DU."), (u"fì", u"this"), (u"ke", u"not"), (u"he", u"not-LENTD"), (u"a", u"ADJ.POST"))
adpositions = ((u"mungwrr", u"except"), (u"kxamlä", u"through"), (u"pximaw", u"right.after"), (u"pxisre", u"right.before"), (u"tafkip", u"from.up.among"), (u"nemfa", u"into.inside"), (u"takip", u"from among"), (u"mìkam", u"between"), (u"teri", u"about.concerning"), (u"fkip", u"up.among"), (u"luke", u"without"), (u"pxel", u"like.as"), (u"pxaw", u"around"), (u"rofa", u"beside.alongside"), (u"ìlä", u"by.via.following"), (u"fpi", u"for.the.sake/benefit.of"), (u"ftu", u"from.direction"), (u"kip", u"among"), (u"lok", u"close.to"), (u"maw", u"after.time"), (u"sre", u"before.time"), (u"sìn", u"on.onto"), (u"vay", u"up.to"), (u"eo", u"before.in.front.of"), (u"fa", u"with.by.means.of"), (u"hu", u"with.accompaniment"), (u"io", u"above"), (u"ka", u"across"), (u"mì", u"in.on"), (u"na", u"like.as"), (u"ne", u"to.towards"), (u"ro", u"at.locative"), (u"ta", u"from"), (u"uo", u"behind"), (u"wä", u"against.opposition"), (u"äo", u"below"), (u"to", u"than"), (u"sì", u"and"))
postfixes = adpositions + ((u"tsyìp", u"DIM."), (u"eyä", u"GEN."), (u"ìri", u"TOP."), (u"ìl", u"ERG."), (u"it", u"ACC"), (u"lo", u"MULT."), (u"ri", u"TOP."), (u"ru", u"DAT."), (u"ti", u"ACC."), (u"ur", u"DAT."), (u"ve", u"ORD."), (u"yä", u"GEN."), (u"ya", u"VOC."), (u"tu", u"OBJD."), (u"vi", u"PART."), (u"yu", u"AGENTD."), (u"an", u"MASC."), (u"ng", u"INCL."), (u"ke", u"not"), (u"e", u"FEM."), (u"o", u"INDEF."), (u"l", u"ERG."), (u"t", u"ACC."), (u"y", u"GEN."), (u"a", u"ADJ.PRE"), (u"ä", u"GEN."), (u"r", u"DAT."))
#BROKENWORDS = [[u"", u"", u"", u"", u"", u"", u""]] #, (u"tawtute", u"skyperson"), (u"na'vi", u"The People")) # XXX HACK - words not in EE
#INFIXES1 = ((u"awn", u"P.PART"), (u"eyk", u"CAUS"), (u"us", u"A.PART"), (u"äp", u"REFL."))
#INFIXES2 = ((u"ìyev", u"FUT.SUBJ"), (u"iyev", u"FUT.SUBJ"), (u"ìmìy", u"REC.PAST.REC.FUT"), (u"arm", u"IMPF.PAST"), (u"asy", u"FUT.D"), (u"ilv", u"PRES.PER.SUBJ"), (u"ìmv", u"REC.PAST.SUBJ"), (u"imv", u"PAST.SUBJ"), (u"ìrm", u"IMPF.REC.PAST"), (u"irv", u"PRES.IMPF.SUBJ"), (u"ìsy", u"IMM.FUT.D"), (u"aly", u"PERF.FUT"), (u"ary", u"IMPF.FUT"), (u"ìly", u"PERF.IMM.FUT"), (u"ìry", u"IMPF.IMM.FUT"), (u"ìlm", u"PERF.REC.PAST"), (u"alm", u"PERF.PAST"), (u"am", u"PAST."), (u"ay", u"FUT."), (u"er", u"IMPF."), (u"ìm", u"REC.PAST"), (u"iv", u"SUBJ."), (u"ìy", u"IMM.FUT"), (u"ol", u"PERF."))
#INFIXES3 = ((u"äng", u"PEJ."), (u"ats", u"INFR."), (u"eiy", u"LAUD."), (u"ei", u"LAUD."), (u"uy", u"HON."))
#PREFIXES = ((u"tsay", u"those"), (u"fray", u"every-PL."), (u"say", u"those-LENTD"), (u"fay", u"these"), (u"fra", u"every"), (u"pxe", u"TRI."), (u"fne", u"type"), (u"tsa", u"that"), (u"kel", u"ADJD.-not"), (u"lek", u"not-ADJD."), (u"sa", u"that-LENTD"), (u"pe", u"what"), (u"fe", u"what-LENTD"), (u"le", u"ADJD."), (u"nì", u"ADVD."), (u"sä", u"INSTD."), (u"tì", u"NOUND."), (u"sì", u"NOUND.-LENTD"), (u"ay", u"PL."), (u"me", u"DU."), (u"fì", u"this"), (u"ke", u"not"), (u"he", u"not-LENTD"), (u"px", u"TRI."), (u"a", u"ADJ.POST"), (u"m", u"DU."), (u"k", u"not"))
#ADPOSITIONS = ((u"mungwrr", u"except"), (u"kxamlä", u"through"), (u"pximaw", u"right.after"), (u"pxisre", u"right.before"), (u"tafkip", u"from.up.among"), (u"nemfa", u"into.inside"), (u"takip", u"from among"), (u"mìkam", u"between"), (u"teri", u"about.concerning"), (u"fkip", u"up.among"), (u"luke", u"without"), (u"pxel", u"like.as"), (u"pxaw", u"around"), (u"rofa", u"beside.alongside"), (u"ìlä", u"by.via.following"), (u"fpi", u"for.the.sake/benefit.of"), (u"ftu", u"from.direction"), (u"kip", u"among"), (u"lok", u"close.to"), (u"maw", u"after.time"), (u"sre", u"before.time"), (u"sìn", u"on.onto"), (u"vay", u"up.to"), (u"eo", u"before.in.front.of"), (u"fa", u"with.by.means.of"), (u"hu", u"with.accompaniment"), (u"io", u"above"), (u"ka", u"across"), (u"mì", u"in.on"), (u"na", u"like.as"), (u"ne", u"to.towards"), (u"ro", u"at.locative"), (u"ta", u"from"), (u"uo", u"behind"), (u"wä", u"against.opposition"), (u"äo", u"below"), (u"to", u"than"), (u"sì", u"and"))
#POSTFIXES = ADPOSITIONS + ((u"tsyìp", u"DIM."), (u"eyä", u"GEN."), (u"ìri", u"TOP."), (u"ari", u"TOP."), (u"ayä", u"GEN."), (u"aru", u"DAT."), (u"ati", u"ACC."), (u"ay", u"GEN."), (u"ìl", u"ERG."), (u"it", u"ACC"), (u"lo", u"MULT."), (u"ri", u"TOP."), (u"ru", u"DAT."), (u"ti", u"ACC."), (u"ur", u"DAT."), (u"ve", u"ORD."), (u"yä", u"GEN."), (u"ya", u"VOC."), (u"tu", u"OBJD."), (u"vi", u"PART."), (u"yu", u"AGENTD."), (u"an", u"MASC."), (u"ng", u"INCL."), (u"ke", u"not"), (u"al", u"ERG."), (u"at", u"ACC."), (u"ar", u"DAT."), (u"ey", u"GEN."), (u"e", u"FEM."), (u"o", u"INDEF."), (u"l", u"ERG."), (u"t", u"ACC."), (u"y", u"GEN."), (u"a", u"ADJ.PRE"), (u"ä", u"GEN."), (u"r", u"DAT."))
 
def translatesent(sent, lang):
sent = parse.parsesent(sent)
for word in sent:
if word["word"]["id"] != 0:
word["translated"] = dbconnector.translate(word["word"]["id"],lang)
word["translated"] = ""
if word["word"]["id"] > 0:
word["translated"] = dbconnector.translate(word["word"]["id"], lang)
else:
word["translated"] = word["word"]["navi"]
# for brokenword in BROKENWORDS:
# if brokenword[0] == word["word"]["navi"]:
# word["translated"] = brokenword[1]
if word["translated"] == u"":
word["translated"] = word["word"]["navi"]
if word["inf"][0] != u"":
for fix in infixes1:
for fix in [(x["navi"], x["gloss"]) for x in parse.infixes if x["position"] == 0]:
if fix[0] == word["inf"][0]:
word["translated"] += '-' + fix[1]
if word["inf"][1] != u"":
for fix in infixes2:
for fix in [(x["navi"], x["gloss"]) for x in parse.infixes if x["position"] == 1]:
if fix[0] == word["inf"][1]:
word["translated"] += '-' + fix[1]
if word["inf"][2] != u"":
for fix in infixes3:
for fix in [(x["navi"], x["gloss"]) for x in parse.infixes if x["position"] == 2]:
if fix[0] == word["inf"][2]:
word["translated"] += '-' + fix[1]
for temp in word["pref"]: # double array? WTF?
for navf in temp:
for fix in prefixes:
for temp in word["pref"]:
for navf, navf_orig in temp:
for fix in [(x["navi"], x["gloss"]) for x in parse.prefixes]:
if fix[0] == navf:
word["translated"] += '-' + fix[1]
for temp in word["post"]: # double array? WTF?
for navf in temp:
for fix in postfixes:
for temp in word["post"]:
for navf, navfid in temp:
for fix in [(x["navi"], x["gloss"]) for x in parse.postfixes]:
if fix[0] == navf:
word["translated"] += '-' + fix[1]
break
else:
# adpositions and the like
word["translated"] += "-" + dbconnector.translate(navfid, lang)
if word["len"]:
word["translated"] += '-' + 'LENTD'
return sent
/tsimapiak/dbconnector.py
12,7 → 12,7
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Foobar is distributed in the hope that it will be useful,
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
21,32 → 21,70
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>.
 
 
import tornado.database
import re
import mysql.connector as mysql
 
def getnavilist():
ret = []
current = u""
db = tornado.database.Connection("127.0.0.1", "navi", user="navi", password="navi")
for row in db.query("""
db = mysql.connect(host="127.0.0.1", db="navi", user="navi", passwd="navi")
cur = db.cursor(dictionary=True)
cur.execute("""
SELECT *
FROM `metaWords`
WHERE partOfSpeech <> 'num.' AND partOfSpeech <> "prefix"
ORDER BY CHAR_LENGTH(navi) DESC"""):
if row["infixes"]:
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["infixes"].lower(), "type": row["partOfSpeech"]})
WHERE partOfSpeech <> 'num.' AND partOfSpeech <> 'prefix' AND partOfSpeech <> 'affix'
ORDER BY CHAR_LENGTH(navi) DESC""")
for row in cur:
navi = row["navi"].replace("+", "").replace("-", "")
if row["infixes"] and row["infixes"] != "NULL": # yeah seriously
ret.append({"id": row["id"], "navi": navi, "orig_navi": navi, "infix": row["infixes"].lower(), "type": row["partOfSpeech"], "lenited": False})
else:
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["navi"].lower(), "type": row["partOfSpeech"]})
ret.append({"id": row["id"], "navi": navi, "orig_navi": navi, "infix": navi.lower(), "type": row["partOfSpeech"], "lenited": False})
cur.close()
db.close()
return ret
 
def translate(wid,language):
db = tornado.database.Connection("127.0.0.1", "navi", user="navi", password="navi")
for row in db.query("""
def getaffixlists():
ret = ([], [], [])
db = mysql.connect(host="127.0.0.1", db="navi", user="navi", passwd="navi")
cur = db.cursor(dictionary=True)
cur.execute("""
SELECT *
FROM `metaInfixes`
ORDER BY CHAR_LENGTH(navi) DESC""")
for row in cur:
endfix = False
if row["navi"] and row["navi"][0] == "-":
navi = row["navi"].replace("-", "").lower()
ret[2].append({"id": row["id"], "navi": navi, "orig_navi": navi, "gloss": row["shorthand"].upper()})
endfix = True
if row["navi"] and row["navi"][-1] in ("-", "+"):
navi = row["navi"].replace("-", "").replace("+", "").lower()
ret[0].append({"id": row["id"], "navi": navi, "orig_navi": navi, "gloss": row["shorthand"].upper()})
endfix = True
if not endfix:
if row["position"] is None or row["position"] == "NULL":
# not actually an affix
continue
ret[1].append({"id": row["id"], "navi": row["navi"].lower(), "orig_navi": row["navi"].lower(), "gloss": row["shorthand"].upper(), "position": int(row["position"])})
cur.close()
db.close()
 
for subret in ret:
subret.sort(key=lambda x: len(x["navi"]), reverse=True)
 
return ret
 
def translate(wid, language):
ret = None
db = mysql.connect(host="127.0.0.1", db="navi", user="navi", passwd="navi")
cur = db.cursor(dictionary=True)
cur.execute("""
SELECT *
FROM `localizedWords`
WHERE id = %s AND languageCode = %s""",wid,language):
WHERE id = %s AND languageCode = %s""", (wid, language))
for row in cur:
ret = row["localized"]
break
if ret == None:
return u"ERROR: WORD NOT LOCALISED"
db.close()
return ret
/tsimapiak/__init__.py
0,0 → 1,0
 
/webapp/templates/parse.html
16,7 → 16,7
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Foobar is distributed in the hope that it will be useful,
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
39,17 → 39,17
</tr>
{% for wor in out %}
<tr>
<td rowspan="4">{{ wor["word"]["navi"] }}</td>
<td rowspan="4">{{ wor["word"]["orig_navi"] }}</td>
<td>Infixes:</td>
<td>{{ u", ".join(wor["inf"]) }}</td>
</tr>
<tr>
<td>Prefixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["pref"]) }}</td>
<td>{{ u"; ".join(u", ".join([y[1] for y in x]) for x in wor["pref"]) }}</td>
</tr>
<tr>
<td>Postfixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["post"]) }}</td>
<td>{{ u"; ".join(u", ".join([y[0] for y in x]) for x in wor["post"]) }}</td>
</tr>
<tr>
<td>Lenited:</td>
58,7 → 58,7
{% end %}
</table>
{% end %}
<p>This program uses Eana Eltu for the list of words and infix positions (but nothing else), created by Tuiq and Taronyu. Thanks also go to the rest of the Learn Na'vi community!</p>
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, Seze, Mark Miller, Tìtstewan, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p>
<script type="text/javascript">
document.getElementById("word").focus();
</script>
/webapp/templates/translate.html
16,7 → 16,7
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Foobar is distributed in the hope that it will be useful,
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
29,11 → 29,19
<form action="/translate" method="post">
<input id="word" name="word" type="text" value="{{last}}" style="width: 100%;" />
<select name="lang" id="lang">
<option value="eng" selected="selected">English</option>
<option value="en" selected="selected">English</option>
<option value="hu">Hungarian - Magyar</option>
<option value="de">German - Deutsch</option>
<option value="est">Estonian - Eesti</option>
<option value="ptbr">Brazilian Portuguese - Português do Brasil</option>
<option value="et">Estonian - Eesti</option>
<option value="pt">Brazilian Portuguese - Português do Brasil</option>
<option value="sv">Swedish - Svenska</option>
<option value="nl">Dutch - Nederlands</option>
<option value="es">Spanish - Español</option>
<option value="fr">French - Français</option>
<option value="pl">Polish - Polski</option>
<option value="ru">Russian - Русский</option>
<option value="tr">Turkish - Türkçe</option>
<option value="uk">Ukrainian - Українська</option>
</select>
<input name="btn" type="submit" value="Translate!" />
</form>
47,7 → 55,7
</tr>
{% for wor in out %}
<tr>
<td rowspan="4">{{ wor["word"]["navi"] }}</td>
<td rowspan="4">{{ wor["word"]["orig_navi"] }}</td>
<td rowspan="4">{{ wor["translated"] }}</td>
<td>Infixes:</td>
<td>{{ u", ".join(wor["inf"]) }}</td>
54,11 → 62,11
</tr>
<tr>
<td>Prefixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["pref"]) }}</td>
<td>{{ u"; ".join(u", ".join([y[1] for y in x]) for x in wor["pref"]) }}</td>
</tr>
<tr>
<td>Postfixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["post"]) }}</td>
<td>{{ u"; ".join(u", ".join([y[0] for y in x]) for x in wor["post"]) }}</td>
</tr>
<tr>
<td>Lenited:</td>
67,11 → 75,11
{% end %}
</table>
{% end %}
<p>This program uses Eana Eltu for the list of words and infix positions (but nothing else), created by Tuiq and Taronyu. Thanks also go to the rest of the Learn Na'vi community!</p>
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, Seze, Mark Miller, Tìtstewan, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p>
<script type="text/javascript">
document.getElementById("word").focus();
</script>
{% if lang != "eng" %}
{% if lang != "en" %}
<script type="text/javascript">
if("{{ lang }}" == "hu"){
document.getElementById("lang").selectedIndex = 1
79,12 → 87,36
if("{{ lang }}" == "de"){
document.getElementById("lang").selectedIndex = 2
}
if("{{ lang }}" == "est"){
if("{{ lang }}" == "et"){
document.getElementById("lang").selectedIndex = 3
}
if("{{ lang }}" == "ptbr"){
if("{{ lang }}" == "pt"){
document.getElementById("lang").selectedIndex = 4
}
if("{{ lang }}" == "sv"){
document.getElementById("lang").selectedIndex = 5
}
if("{{ lang }}" == "nl"){
document.getElementById("lang").selectedIndex = 6
}
if("{{ lang }}" == "es"){
document.getElementById("lang").selectedIndex = 7
}
if("{{ lang }}" == "fr"){
document.getElementById("lang").selectedIndex = 8
}
if("{{ lang }}" == "pl"){
document.getElementById("lang").selectedIndex = 9
}
if("{{ lang }}" == "ru"){
document.getElementById("lang").selectedIndex = 10
}
if("{{ lang }}" == "tr"){
document.getElementById("lang").selectedIndex = 11
}
if("{{ lang }}" == "uk"){
document.getElementById("lang").selectedIndex = 12
}
</script>
{% end %}
{% end %}
/webapp/templates/index.html
16,7 → 16,7
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Foobar is distributed in the hope that it will be useful,
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
26,6 → 26,7
-->
 
<a href="/number"><b>Number translator</b></a> - this webapp allows you to translate written-out Na'vi numbers into decimal and octal.<br />
<a href="/parse"><b>Parser</b></a> - this webapp can parse Na'vi sentences into the base words, prefixes, infixes and suffixes. It does not translate the words.
<a href="/translate"><b>Translator</b></a> - this webapp translates full Na'vi sentences into human languages, as well as any affixes into the technical abbreviations.
<a href="/parse"><b>Parser</b></a> - this webapp can parse Na'vi sentences into the base words, prefixes, infixes and suffixes. It does not translate the words.<br />
<a href="/translate"><b>Translator</b></a> - this webapp translates full Na'vi sentences into human languages, as well as any affixes into the technical abbreviations.<br />
<a href="svn://tim32.org/navi">SVN repository</a> <a href="http://websvn.tim32.org/listing.php?repname=navi">(websvn)</a>
{% end %}
/webapp/templates/number.html
16,7 → 16,7
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Foobar is distributed in the hope that it will be useful,
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
/webapp/templates/base.html
12,7 → 12,7
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Foobar is distributed in the hope that it will be useful,
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
52,7 → 52,7
<body>
<div id="center">
<h1>Tsim Apiak</h1>
<h2>By Muzer and szabot</h2>
<h2>By Sh4rK and Muzer</h2>
<h2>{% block title %}Title{% end %}</h2>
{% block body %}Body{% end %}
</div>
/webapp/main.py
12,7 → 12,7
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Foobar is distributed in the hope that it will be useful,
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
21,19 → 21,14
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>.
 
 
from tsimapiak import parse, parsenum, translate
import http.client as httplib
import os
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.autoreload
 
import os
import re
 
from tsimapiak import parsenum
from tsimapiak import dbconnector
from tsimapiak import parse
from tsimapiak import translate
 
class Index(tornado.web.RequestHandler):
def get(self):
self.render("templates/index.html")
47,7 → 42,7
num = self.get_argument("num").strip()
except:
self.redirect("/number")
numout = parsenum.parse(num.replace(" ",""))
numout = parsenum.parse(num.replace(" ", ""))
if numout == None:
numoutt = -1
else:
58,18 → 53,10
def get(self):
os.system("/usr/bin/restartnavi")
 
 
class TestDB(tornado.web.RequestHandler):
def get(self):
lis = dbconnector.getnavilist()
text = u"id | navi | infix | partofspeech<br />"
text += u"<br />".join(u" | ".join(unicode(y) for y in x) for x in lis)
self.write(text)
 
class Parse(tornado.web.RequestHandler):
def get(self):
self.render("templates/parse.html", last="", out=None)
 
def post(self):
try:
word = self.get_argument("word")
80,8 → 67,8
 
class Translate(tornado.web.RequestHandler):
def get(self):
self.render("templates/translate.html", last="", out=None, lang="eng")
self.render("templates/translate.html", last="", out=None, lang="en")
 
def post(self):
try:
word = self.get_argument("word")
90,6 → 77,20
self.redirect("/translate")
out = translate.translatesent(word, lang)
self.render("templates/translate.html", last=word, out=out, lang=lang)
class Errors(tornado.web.RequestHandler):
def get_error_html(self, status_code, **kwargs):
if status_code == 500:
return "<html><title>%(code)d: %(message)s</title>" \
"<body>%(code)d: %(message)s<br /><br />Either we are currently working on the server, or you uncovered a bug. Please check back later on. If you still get this error, please report this bug to us in the forum thread or on IRC.</body></html>" % {
"code": status_code,
"message": httplib.responses[status_code],
}
else:
return "<html><title>%(code)d: %(message)s</title>" \
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": httplib.responses[status_code],
}
 
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static")
99,13 → 100,12
("/", Index),
("/number", Number),
("/restart", Restart),
("/testdb", TestDB),
("/parse", Parse),
("/translate", Translate)
("/translate", Translate),
("/(\\.well-known/.*)", tornado.web.StaticFileHandler, dict(path=settings["static_path"]))
], **settings)
 
if __name__ == "__main__":
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(1337)
#tornado.autoreload.start()
tornado.ioloop.IOLoop.instance().start()
/ircbot/bot.py
0,0 → 1,86
#! /usr/bin/env python
# This file is part of Tsim Apiak.
#
# Tsim Apiak is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public Licence as published by
# the Free Software Foundation, either version 3 of the Licence, or
# (at your option) any later version.
#
# In addition to this, you must also comply with clause 4 of the
# Apache Licence, version 2.0, concerning attribution. Where there
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>.#
 
from irc.bot import SingleServerIRCBot
from irc.client import nm_to_n
from tsimapiak import translate
from threading import *
 
class Bot(SingleServerIRCBot):
def __init__(self, channel, nickname, server, port=6667):
SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
self.channel = channel
 
def pingit(self, c):
c.ping("testing")
t = Timer(30, self.pingit, [c])
t.start()
 
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
 
def on_welcome(self, c, e):
c.join(self.channel)
c.privmsg("NiceBot", "asztallab")
t = Timer(30, self.pingit, [c])
t.start()
 
def on_privmsg(self, c, e):
self.do_command(e, e.arguments()[0], True)
 
def on_pubmsg(self, c, e):
a = e.arguments()[0]
if a[0] == "!":
self.do_command(e, a[1:].strip(), False)
return
 
def do_command(self, e, cmd, priv):
# try:
# cmd = cmd.decode("utf-8")
# except:
# cmd = cmd.decode("iso-8859-1")
if priv:
nick = nm_to_n(e.source())
else:
nick = self.channel
c = self.connection
 
if (cmd.split(" ")[0] == "tr") or (cmd.split(" ")[0] == "translate"):
lang = "en"
if len(cmd.split(" ")) > 1 and cmd.split(" ")[1].startswith("-"):
if cmd.split(" ")[1][1:] in ("hu", "de", "pt", "et", "sv", "nl", "es", "fr", "pl", "ru", "tr", "uk"):
lang = cmd.split(" ")[1][1:]
sent = " ".join(cmd.split(" ")[2:])
else:
sent = " ".join(cmd.split(" ")[1:])
translated = []
for word in translate.translatesent(sent, lang):
translated.append(word["translated"])
translated = nm_to_n(e.source()) + ": " + " | ".join(translated)
c.privmsg(nick, translated)
 
def main():
#bot = Bot("#tim32", "TsimApiak", "irc.tim32.org", 6667)
bot = Bot("#na'vi", "TsimApiak", "ikranakel.learnnavi.org", 6667)
bot.start()
 
if __name__ == "__main__":
main()
Property changes:
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: ircbot/README.txt
===================================================================
--- ircbot/README.txt (nonexistent)
+++ ircbot/README.txt (revision 301)
@@ -0,0 +1,3 @@
+This is an IRC bot for TsimApiak.
+
+To use it you have to put the tsimapiak directory inside this dir, and run bot.py.
Index: discordbot/README.txt
===================================================================
--- discordbot/README.txt (nonexistent)
+++ discordbot/README.txt (revision 301)
@@ -0,0 +1,3 @@
+This is a Discord bot for TsimApiak.
+
+To use it you have to put the tsimapiak directory inside this dir, and run bot.py.
Index: discordbot/bot.py
===================================================================
--- discordbot/bot.py (nonexistent)
+++ discordbot/bot.py (revision 301)
@@ -0,0 +1,60 @@
+#! /usr/bin/env python
+# This file is part of Tsim Apiak.
+#
+# Tsim Apiak is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public Licence as published by
+# the Free Software Foundation, either version 3 of the Licence, or
+# (at your option) any later version.
+#
+# In addition to this, you must also comply with clause 4 of the
+# Apache Licence, version 2.0, concerning attribution. Where there
+# is a contradiction between the two licences, the GPL
+# takes preference.
+#
+# Tsim Apiak is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Tsim Apiak. If not, see .#
+
+import os
+
+import discord
+import dotenv
+from typing import Optional
+
+from tsimapiak import translate
+
+dotenv.load_dotenv(dotenv.find_dotenv())
+
+intents = discord.Intents.default()
+client = discord.Client(intents=intents)
+tree = discord.app_commands.CommandTree(client)
+
+@client.event
+async def on_ready():
+ print(f"{client.user} Connected to discord")
+ await tree.sync()
+
+@tree.command(
+ name="translate",
+ description="Translate (gloss) Na'vi to English"
+)
+async def on_translate(interaction: discord.Interaction, argument: str, language: Optional[str]):
+ if not language:
+ language = "en"
+ translated = []
+ for word in translate.translatesent(argument, language):
+ translated.append(word["translated"])
+
+ await interaction.response.send_message(argument + "\n" + " | ".join(translated))
+
+def main():
+ TOKEN = os.getenv('DISCORD_TOKEN')
+
+ client.run(TOKEN)
+
+if __name__ == "__main__":
+ main()
/discordbot/bot.py
Property changes:
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: cliapp/tsimapiakcli.py
===================================================================
--- cliapp/tsimapiakcli.py (nonexistent)
+++ cliapp/tsimapiakcli.py (revision 301)
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# This file is part of Tsim Apiak.
+#
+# Tsim Apiak is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public Licence as published by
+# the Free Software Foundation, either version 3 of the Licence, or
+# (at your option) any later version.
+#
+# In addition to this, you must also comply with clause 4 of the
+# Apache Licence, version 2.0, concerning attribution. Where there
+# is a contradiction between the two licences, the GPL
+# takes preference.
+#
+# Tsim Apiak is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Tsim Apiak. If not, see .
+
+from tsimapiak import translate
+import sys
+
+for line in sys.stdin:
+ try:
+ line = line.decode("utf-8")
+ except:
+ line = line.decode("iso-8859-1")
+ translated = []
+ for word in translate.translatesent(line, "en"):
+ translated.append(word["translated"])
+ translated = " | ".join(translated)
+ print translated
+
/cliapp/tsimapiakcli.py
Property changes:
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: cliapp/README
===================================================================
--- cliapp/README (nonexistent)
+++ cliapp/README (revision 301)
@@ -0,0 +1 @@
+This app is only for use on the server.
Index: LICENCE
===================================================================
--- LICENCE (revision 176)
+++ LICENCE (revision 301)
@@ -8,7 +8,7 @@
is a contradiction between the two licences, the GPL
takes preference.
- Foobar is distributed in the hope that it will be useful,
+ Tsim Apiak is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.