Subversion Repositories navi

Compare Revisions

Ignore whitespace Rev 283 → Rev 298

/tsimapiak/dbconnector.py
33,10 → 33,11
WHERE partOfSpeech <> 'num.' AND partOfSpeech <> 'prefix' AND partOfSpeech <> 'affix'
ORDER BY CHAR_LENGTH(navi) DESC""")
for row in cur:
if row["infixes"]:
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["infixes"].lower(), "type": row["partOfSpeech"]})
navi = row["navi"].replace("+", "").replace("-", "")
if row["infixes"] and row["infixes"] != "NULL": # yeah seriously
ret.append({"id": row["id"], "navi": navi, "orig_navi": navi, "infix": row["infixes"].lower(), "type": row["partOfSpeech"], "lenited": False})
else:
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["navi"].lower(), "type": row["partOfSpeech"]})
ret.append({"id": row["id"], "navi": navi, "orig_navi": navi, "infix": navi.lower(), "type": row["partOfSpeech"], "lenited": False})
cur.close()
db.close()
return ret
50,17 → 51,26
FROM `metaInfixes`
ORDER BY CHAR_LENGTH(navi) DESC""")
for row in cur:
endfix = False
if row["navi"] and row["navi"][0] == "-":
ret[2].append({"id": row["id"], "navi": row["navi"].replace("-", ""), "gloss": row["shorthand"].upper()})
elif row["navi"] and row["navi"][-1] in ("-", "+"):
ret[0].append({"id": row["id"], "navi": row["navi"].replace("-", ""), "gloss": row["shorthand"].upper()})
else:
if not row["position"]:
navi = row["navi"].replace("-", "").lower()
ret[2].append({"id": row["id"], "navi": navi, "orig_navi": navi, "gloss": row["shorthand"].upper()})
endfix = True
if row["navi"] and row["navi"][-1] in ("-", "+"):
navi = row["navi"].replace("-", "").replace("+", "").lower()
ret[0].append({"id": row["id"], "navi": navi, "orig_navi": navi, "gloss": row["shorthand"].upper()})
endfix = True
if not endfix:
if not row["position"] or row["position"] == "NULL":
# not actually an affix
continue
ret[1].append({"id": row["id"], "navi": row["navi"].replace("-", ""), "gloss": row["shorthand"].upper(), "position": int(row["position"])})
ret[1].append({"id": row["id"], "navi": row["navi"].lower(), "orig_navi": row["navi"].lower(), "gloss": row["shorthand"].upper(), "position": int(row["position"])})
cur.close()
db.close()
 
for subret in ret:
subret.sort(key=lambda x: len(x["navi"]), reverse=True)
 
return ret
 
def translate(wid, language):
/tsimapiak/parse.py
22,6 → 22,7
 
import tsimapiak.dbconnector as dbconnector
import tsimapiak.parsenum as parsenum
import itertools
import re
 
#wordlist = [{"id": 0, "navi": u"tawtute", "infix": u"tawtute", "type": u"n."}] + dbconnector.getnavilist() + [{"id": 0, "navi": u"na'vi", "infix": u"na'vi", "type": u"n."}] # XXX HACK - extra proper nouns
31,21 → 32,22
 
# XXX HACK - These are words that are either not in Eana Eltu, or that get interpreted wrongly for whatever reason. The latter should be removed from this list when the parser gets more sophisticated. The former should also have an entry in the equivalent array in the translator! If it can take infixes, consider adding it to the main wordlist above (see the examples). The order is - original, Na'vi root, 0-pos infix, 1-pos infix, 2-pos infix, prefixes, suffixes. Things that can take affixes should go in the above list instead.
BROKENWORDS = (
(u"sami", u"si", u"", u"am", u"", (()), (()), False),
(u"to", u"to", u"", u"", u"", (()), (()), False),
(u"sami", u"si", u"", u"am", u"", (()), (()), False, "si"), # otherwise parses as sa (tsa-lenited) + mi
#(u"to", u"to", u"", u"", u"", (()), (()), False),
#(u"frato", u"to", u"", u"", u"", [[u"fra"]], (()), False),
(u"soaiä", u"soaia", u"", u"", u"", (()), [[u"ä"]], False),
(u"mengenga", u"ngenga", u"", u"", u"", [[u"me"]], (()), False),
(u"pxengenga", u"ngenga", u"", u"", u"", [[u"pxe"]], (()), False),
(u"kìmä", u"kä", u"", u"ìm", u"", (()), (()), False),
(u"apxay", u"pxay", u"", u"", u"", [[u"a"]], (()), False),
(u"akawng", u"kawng", u"", u"", u"", [[u"a"]], (()), False), # TODO remember why on earth this is needed; how is awng interpreted as awnga?
(u"kawnga", u"kawng", u"", u"", u"", (()), [[u"a"]], False),
(u"kawng", u"kawng", u"", u"", u"", (()), (()), False),
(u"ka", u"ka", u"", u"", u"", (()), (()), False),
(u"uo", u"uo", u"", u"", u"", (()), (()), False),
(u"sìk", u"sìk", u"", u"", u"", (()), (()), False),
(u"sim", u"sim", u"", u"", u"", (()), (()), False) # probably not tsim lenited
(u"soaiä", u"soaia", u"", u"", u"", (()), [[(u"ä", None)]], False, "soaia"), # does not parse, irregular form
#(u"mengenga", u"ngenga", u"", u"", u"", [[u"me"]], (()), False),
#(u"pxengenga", u"ngenga", u"", u"", u"", [[u"pxe"]], (()), False),
(u"kìmä", u"kä", u"", u"ìm", u"", (()), (()), False, "kä"), # otherwise parses as kìm (spin) + ä (genitive)
(u"apxay", u"pxay", u"", u"", u"", [[(u"a", "a")]], (()), False, "pxay"), # otherwise parses as apxa + -y (genitive)
#(u"akawng", u"kawng", u"", u"", u"", [[u"a"]], (()), False),
#(u"kawnga", u"kawng", u"", u"", u"", (()), [[(u"a", None)]], False),
#(u"kawng", u"kawng", u"", u"", u"", (()), (()), False),
#(u"ka", u"ka", u"", u"", u"", (()), (()), False),
#(u"uo", u"uo", u"", u"", u"", (()), (()), False),
#(u"sìk", u"sìk", u"", u"", u"", (()), (()), False),
#(u"sim", u"sim", u"", u"", u"", (()), (()), False), # probably not tsim lenited
(u"tse", u"tse", u"", u"", u"", (()), (()), False, "tse"), # otherwise parses as tsa'u abbreviated (special case)
)
 
#INFIXES1 = (u"awn", u"eyk", u"us", u"äp", u"")
57,10 → 59,67
#prefixesn = ur"(?P<npr>(?:(?:fì|tsa)?(?:me|pxe|ay|fra)?|(?:fay)?|(?:tsay)?)(?:fne)?(?:tì|sä)?"
#prefixesv = ur"(?P<vpr>(?:nì|sä|tì|rä'ä |ke )?)"
 
EXTRAADP = ("to", "sì") # words that act like adpositions but technically aren't
EXTRAINFIXES = [
{"id": "-1", "navi": "eiy", "orig_navi": "ei", "gloss": "LAUD.", "position": 2},
{"id": "-2", "navi": "eng", "orig_navi": "äng", "gloss": "PEJ.", "position": 2},
]
 
EXTRAPOSTFIXES = [
{"id": "-3", "navi": "eyä", "orig_navi": "yä", "gloss": "GEN."},
]
 
EXTRAADP = (("to", [x["id"] for x in wordlist if x["navi"] == "to"][0]), ("sì", [x["id"] for x in wordlist if x["navi"] == "sì"][0])) # words that act like adpositions but technically aren't
 
LENIT = ((u"px", u"p"), (u"tx", u"t"), (u"kx", u"k"), (u"ts", u"s"), (u"t", u"s"), (u"p", u"f"), (u"k", u"h"), (u"'", u""))
 
# Let's lenit the prefixes
extraprefixes = []
for prefix in prefixes:
for letter, replacement in LENIT:
if prefix["navi"].startswith(letter):
new_prefix = prefix["navi"].replace(letter, replacement, 1)
if not [x for x in prefixes if x["navi"] == new_prefix]: # always assume a dictionary word over a lenited prefix
extraprefixes.append({"id": prefix["id"], "navi": new_prefix, "gloss": prefix["gloss"] + ".LENTD", "orig_navi": prefix["navi"]})
break
 
prefixes = sorted(prefixes + extraprefixes, key=lambda x: len(x["navi"]), reverse=True)
infixes = sorted(infixes + EXTRAINFIXES, key=lambda x: len(x["navi"]), reverse=True)
postfixes = sorted(postfixes + EXTRAPOSTFIXES, key=lambda x: len(x["navi"]), reverse=True)
 
# Let's lenit the dictionary
extrawords = []
for word in wordlist:
splitword = word["navi"].split(" ")
splitinfix = word["infix"].split(" ")
lenitword = {}
lenitinfix = {}
for i, wor in enumerate(splitword):
for letter, replacement in LENIT:
if wor.startswith(letter):
lenitword[i] = wor.replace(letter, replacement, 1)
lenitinfix[i] = splitinfix[i].replace(letter, replacement, 1)
break
 
s = list(lenitword.keys())
for lenits in itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(1, len(s)+1)):
new_word = ""
new_infix = ""
for i, wor in enumerate(splitword):
if i in lenits:
new_word += lenitword[i]
new_infix += lenitinfix[i]
else:
new_word += wor
new_infix += splitinfix[i]
new_word += " "
new_infix += " "
print(f"Generated lenited {new_word} from {word['navi']}")
new_word = new_word[:-1]
new_infix = new_infix[:-1]
extrawords.append({"id": word["id"], "navi": new_word, "infix": new_infix, "type": word["type"], "lenited": True, "orig_navi": word["navi"]})
 
wordlist = sorted(wordlist + extrawords, key=lambda x: len(x["navi"]) * 2 + (0 if x["lenited"] else 1), reverse=True)
 
def parseword(wordin):
tempid = 0
temptype = u""
70,14 → 129,13
if brokenword[1] == word["navi"]:
tempid = word["id"]
temptype = word["type"]
return {"word": {"id": tempid, "navi": brokenword[1], "infix": u"", "type": temptype}, "pref": brokenword[5], "post": brokenword[6], "len": brokenword[7], "inf": (brokenword[2], brokenword[3], brokenword[4]) }
ret = {"word": {"id": 0, "navi": u"[" + wordin[0] + u"]", "infix": u"", "type": u""}}
return {"word": {"id": tempid, "navi": brokenword[1], "infix": u"", "type": temptype, "orig_navi": brokenword[8]}, "pref": brokenword[5], "post": brokenword[6], "len": brokenword[7], "inf": (brokenword[2], brokenword[3], brokenword[4]) }
ret = {"word": {"id": 0, "navi": u"[" + wordin[0] + u"]", "infix": u"", "type": u"", "orig_navi": "[" + wordin[0] + "]"}, "len": False}
for word in wordlist:
word["navi"] = word["navi"].lower()
foundit = True
foundprefs = []
foundposts = []
lenited = False
splitword = word["infix"].split(u" ")
foundins = [u"", u"", u""]
if len(wordin) < len(splitword):
89,7 → 147,7
foundprefs.append([])
foundposts.append([])
center = u""
if u"<1>" in splitword[wor]:
if u"<0>" in splitword[wor]:
tempin1 = []
tempin2 = []
tempin3 = []
105,8 → 163,8
for in1 in tempin1:
for in2 in tempin2:
for in3 in tempin3:
if splitword[wor].replace(u"<1><2>", in1 + in2).replace(u"<3>", in3).replace(u"lll", u"l").replace(u"rrr", u"r") in wordin[wor]:
center = splitword[wor].replace(u"<1><2>", in1 + in2).replace(u"<3>", in3).replace(u"lll", u"l").replace(u"rrr", u"r")
if splitword[wor].replace(u"<0><1>", in1 + in2).replace(u"<2>", in3).replace(u"lll", u"l").replace(u"rrr", u"r") in wordin[wor]:
center = splitword[wor].replace(u"<0><1>", in1 + in2).replace(u"<2>", in3).replace(u"lll", u"l").replace(u"rrr", u"r")
foundins = [in1, in2, in3]
break
if center != u"":
116,31 → 174,43
else:
if splitword[wor] in wordin[wor]:
center = splitword[wor]
if center == u"" and (wordin[wor] == u"paya" or splitword[wor] != u"pxay"): # XXX HACK - workaround to fix pay being lenited pxay. Maybe fixable without hardcoding?
for i in LENIT:
temp = u""
if splitword[wor].startswith(i[0]):
temp = i[1] + splitword[wor][len(i[0]):]
if temp in wordin[wor]:
lenited = True
center = temp
if center == u"":
if splitword[wor].endswith(u"nga"):
temp = splitword[wor][:-3] + u"ng"
temp = splitword[wor][:-3] + u"nge"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"fo"):
temp = splitword[wor][:-2] + u"f"
temp = splitword[wor][:-2] + u"fe"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"po"):
temp = splitword[wor][:-2] + u"p"
temp = splitword[wor][:-2] + u"pe"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"tsa"):
temp = splitword[wor][:-3] + u"ts"
temp = splitword[wor][:-3] + u"tse"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"fko"):
temp = splitword[wor][:-3] + u"fke"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"sa'u"):
temp = splitword[wor][:-4] + u"se"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"sa"):
temp = splitword[wor][:-2] + u"se"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"sno"):
temp = splitword[wor][:-3] + u"sne"
if temp in wordin[wor]:
center = temp
if splitword[wor].endswith(u"ayla"):
temp = splitword[wor][:-3] + u"ayle"
if temp in wordin[wor]:
center = temp
if center == u"":
foundit = False
break
152,13 → 222,13
last = u""
while last != pref:
last = pref
for pre in [x["navi"] for x in prefixes]:
for pre in prefixes:
if pref != u"":
if pref.endswith(pre):
if pre in foundprefs[wor]:
if pref.endswith(pre["navi"]):
if pre["navi"] in foundprefs[wor]:
break
foundprefs[wor].append(pre)
pref = pref[:-len(pre)]
foundprefs[wor].append((pre["navi"], pre["orig_navi"])) # only needed here, to handle lenition
pref = pref[:-len(pre["navi"])]
break
if pref != u"":
foundit = False
167,13 → 237,13
last = u""
while last != posf:
last = posf
for pos in [x["navi"] for x in postfixes] + [x["navi"] for x in wordlist if x["type"] == "adp."] + list(EXTRAADP):
for pos, posid in sorted([(x["navi"], None) for x in postfixes] + [(x["navi"], x["id"]) for x in wordlist if x["type"] == "adp."] + list(EXTRAADP), key=lambda x: len(x[0]), reverse=True):
if posf != u"":
if posf.startswith(pos):
if pos in foundposts[wor]:
if (pos, posid) in foundposts[wor]:
break
if pos != u"ä" or word["navi"] != u"pey": # XXX HACK - fix for peyä. THIS SHOULD NOT BE HERE!
foundposts[wor].append(pos)
if pos != u"ä" or word["orig_navi"] != u"pey": # XXX HACK - fix for peyä. THIS SHOULD NOT BE HERE!
foundposts[wor].append((pos, posid))
posf = posf[len(pos):]
break
else:
188,8 → 258,8
ret["pref"] = foundprefs
ret["post"] = foundposts
ret["inf"] = foundins
ret["len"] = lenited
if foundit == True:
ret["len"] = word["lenited"]
ret["word"] = foundword
return ret
 
/tsimapiak/parsenum.py
75,18 → 75,18
outdec = 0
ret = {"word": {"id": 0, "navi": u"", "infix": u"", "type": u""}, "pref": [prefs], "post": [posts], "inf": [u"", u"", u""], "len": False, "dec": outdec, "oct": outoct}
if numin[0] == u"a":
prefs.append(u"a")
prefs.append((u"a", "a"))
numin = numin[1:]
if numin[len(numin) - 1] == u"a":
posts.append(u"a")
posts.append((u"a", None))
numin = numin[:-1]
if numin[-2:] == u"ve":
posts.append(u"ve")
posts.append((u"ve", None))
numin = numin[:-2]
 
#BASE numbers
for n in range(len(NUM)):
if u"ve" in posts:
if (u"ve", None) in posts:
if numin == NUMORD[n]:
outoct = n
outdec = n
153,7 → 153,7
notbase = True
if notbase:
for n in range(len(REM)):
if u"ve" in posts:
if (u"ve", None) in posts:
if numin == REMORD[n]:
outoct += n + 1
outdec += n + 1
164,7 → 164,7
outdec += n + 1
numin = u""
if numin == u"":
ret["word"]["navi"] = str(outdec) if not u"ve" in posts else str(outdec) + u"."
ret["word"]["navi"] = str(outdec) if not (u"ve", None) in posts else str(outdec) + u"."
ret["dec"] = outdec
ret["oct"] = outoct
return ret
/tsimapiak/translate.py
31,8 → 31,6
#ADPOSITIONS = ((u"mungwrr", u"except"), (u"kxamlä", u"through"), (u"pximaw", u"right.after"), (u"pxisre", u"right.before"), (u"tafkip", u"from.up.among"), (u"nemfa", u"into.inside"), (u"takip", u"from among"), (u"mìkam", u"between"), (u"teri", u"about.concerning"), (u"fkip", u"up.among"), (u"luke", u"without"), (u"pxel", u"like.as"), (u"pxaw", u"around"), (u"rofa", u"beside.alongside"), (u"ìlä", u"by.via.following"), (u"fpi", u"for.the.sake/benefit.of"), (u"ftu", u"from.direction"), (u"kip", u"among"), (u"lok", u"close.to"), (u"maw", u"after.time"), (u"sre", u"before.time"), (u"sìn", u"on.onto"), (u"vay", u"up.to"), (u"eo", u"before.in.front.of"), (u"fa", u"with.by.means.of"), (u"hu", u"with.accompaniment"), (u"io", u"above"), (u"ka", u"across"), (u"mì", u"in.on"), (u"na", u"like.as"), (u"ne", u"to.towards"), (u"ro", u"at.locative"), (u"ta", u"from"), (u"uo", u"behind"), (u"wä", u"against.opposition"), (u"äo", u"below"), (u"to", u"than"), (u"sì", u"and"))
#POSTFIXES = ADPOSITIONS + ((u"tsyìp", u"DIM."), (u"eyä", u"GEN."), (u"ìri", u"TOP."), (u"ari", u"TOP."), (u"ayä", u"GEN."), (u"aru", u"DAT."), (u"ati", u"ACC."), (u"ay", u"GEN."), (u"ìl", u"ERG."), (u"it", u"ACC"), (u"lo", u"MULT."), (u"ri", u"TOP."), (u"ru", u"DAT."), (u"ti", u"ACC."), (u"ur", u"DAT."), (u"ve", u"ORD."), (u"yä", u"GEN."), (u"ya", u"VOC."), (u"tu", u"OBJD."), (u"vi", u"PART."), (u"yu", u"AGENTD."), (u"an", u"MASC."), (u"ng", u"INCL."), (u"ke", u"not"), (u"al", u"ERG."), (u"at", u"ACC."), (u"ar", u"DAT."), (u"ey", u"GEN."), (u"e", u"FEM."), (u"o", u"INDEF."), (u"l", u"ERG."), (u"t", u"ACC."), (u"y", u"GEN."), (u"a", u"ADJ.PRE"), (u"ä", u"GEN."), (u"r", u"DAT."))
 
prefixes, infixes, postfixes = dbconnector.getaffixlists()
 
def translatesent(sent, lang):
sent = parse.parsesent(sent)
for word in sent:
44,33 → 42,33
# if brokenword[0] == word["word"]["navi"]:
# word["translated"] = brokenword[1]
if word["translated"] == u"":
word["translated"] = word["word"]["navi"]
word["translated"] = word["word"]["orig_navi"]
if word["inf"][0] != u"":
for fix in [(x["navi"], x["gloss"]) for x in infixes if x["position"] == 0]:
for fix in [(x["navi"], x["gloss"]) for x in parse.infixes if x["position"] == 0]:
if fix[0] == word["inf"][0]:
word["translated"] += '-' + fix[1]
if word["inf"][1] != u"":
for fix in [(x["navi"], x["gloss"]) for x in infixes if x["position"] == 1]:
for fix in [(x["navi"], x["gloss"]) for x in parse.infixes if x["position"] == 1]:
if fix[0] == word["inf"][1]:
word["translated"] += '-' + fix[1]
if word["inf"][2] != u"":
for fix in [(x["navi"], x["gloss"]) for x in infixes if x["position"] == 2]:
for fix in [(x["navi"], x["gloss"]) for x in parse.infixes if x["position"] == 2]:
if fix[0] == word["inf"][2]:
word["translated"] += '-' + fix[1]
for temp in word["pref"]:
for navf in temp:
for fix in [(x["navi"], x["gloss"]) for x in prefixes]:
for navf, navf_orig in temp:
for fix in [(x["navi"], x["gloss"]) for x in parse.prefixes]:
if fix[0] == navf:
word["translated"] += '-' + fix[1]
for temp in word["post"]:
for navf in temp:
for fix in [(x["navi"], x["gloss"]) for x in postfixes]:
for navf, navfid in temp:
for fix in [(x["navi"], x["gloss"]) for x in parse.postfixes]:
if fix[0] == navf:
word["translated"] += '-' + fix[1]
break
else:
# adpositions and the like
word["translated"] += "-" + dbconnector.translate(navf, lang)
word["translated"] += "-" + dbconnector.translate(navfid, lang)
if word["len"]:
word["translated"] += '-' + 'LENTD'
return sent
/webapp/templates/parse.html
39,17 → 39,17
</tr>
{% for wor in out %}
<tr>
<td rowspan="4">{{ wor["word"]["navi"] }}</td>
<td rowspan="4">{{ wor["word"]["orig_navi"] }}</td>
<td>Infixes:</td>
<td>{{ u", ".join(wor["inf"]) }}</td>
</tr>
<tr>
<td>Prefixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["pref"]) }}</td>
<td>{{ u"; ".join(u", ".join([y[1] for y in x]) for x in wor["pref"]) }}</td>
</tr>
<tr>
<td>Postfixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["post"]) }}</td>
<td>{{ u"; ".join(u", ".join([y[0] for y in x]) for x in wor["post"]) }}</td>
</tr>
<tr>
<td>Lenited:</td>
58,7 → 58,7
{% end %}
</table>
{% end %}
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p>
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, Seze, Mark Miller, Tìtstewan, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p>
<script type="text/javascript">
document.getElementById("word").focus();
</script>
/webapp/templates/translate.html
29,13 → 29,19
<form action="/translate" method="post">
<input id="word" name="word" type="text" value="{{last}}" style="width: 100%;" />
<select name="lang" id="lang">
<option value="eng" selected="selected">English</option>
<option value="en" selected="selected">English</option>
<option value="hu">Hungarian - Magyar</option>
<option value="de">German - Deutsch</option>
<option value="est">Estonian - Eesti</option>
<option value="ptbr">Brazilian Portuguese - Português do Brasil</option>
<option value="et">Estonian - Eesti</option>
<option value="pt">Brazilian Portuguese - Português do Brasil</option>
<option value="sv">Swedish - Svenska</option>
<option value="nl">Dutch - Nederlands</option>
<option value="es">Spanish - Español</option>
<option value="fr">French - Français</option>
<option value="pl">Polish - Polski</option>
<option value="ru">Russian - Русский</option>
<option value="tr">Turkish - Türkçe</option>
<option value="uk">Ukrainian - Українська</option>
</select>
<input name="btn" type="submit" value="Translate!" />
</form>
49,7 → 55,7
</tr>
{% for wor in out %}
<tr>
<td rowspan="4">{{ wor["word"]["navi"] }}</td>
<td rowspan="4">{{ wor["word"]["orig_navi"] }}</td>
<td rowspan="4">{{ wor["translated"] }}</td>
<td>Infixes:</td>
<td>{{ u", ".join(wor["inf"]) }}</td>
56,11 → 62,11
</tr>
<tr>
<td>Prefixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["pref"]) }}</td>
<td>{{ u"; ".join(u", ".join([y[1] for y in x]) for x in wor["pref"]) }}</td>
</tr>
<tr>
<td>Postfixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["post"]) }}</td>
<td>{{ u"; ".join(u", ".join([y[0] for y in x]) for x in wor["post"]) }}</td>
</tr>
<tr>
<td>Lenited:</td>
69,11 → 75,11
{% end %}
</table>
{% end %}
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p>
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, Seze, Mark Miller, Tìtstewan, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p>
<script type="text/javascript">
document.getElementById("word").focus();
</script>
{% if lang != "eng" %}
{% if lang != "en" %}
<script type="text/javascript">
if("{{ lang }}" == "hu"){
document.getElementById("lang").selectedIndex = 1
81,10 → 87,10
if("{{ lang }}" == "de"){
document.getElementById("lang").selectedIndex = 2
}
if("{{ lang }}" == "est"){
if("{{ lang }}" == "et"){
document.getElementById("lang").selectedIndex = 3
}
if("{{ lang }}" == "ptbr"){
if("{{ lang }}" == "pt"){
document.getElementById("lang").selectedIndex = 4
}
if("{{ lang }}" == "sv"){
93,6 → 99,24
if("{{ lang }}" == "nl"){
document.getElementById("lang").selectedIndex = 6
}
if("{{ lang }}" == "es"){
document.getElementById("lang").selectedIndex = 7
}
if("{{ lang }}" == "fr"){
document.getElementById("lang").selectedIndex = 8
}
if("{{ lang }}" == "pl"){
document.getElementById("lang").selectedIndex = 9
}
if("{{ lang }}" == "ru"){
document.getElementById("lang").selectedIndex = 10
}
if("{{ lang }}" == "tr"){
document.getElementById("lang").selectedIndex = 11
}
if("{{ lang }}" == "uk"){
document.getElementById("lang").selectedIndex = 12
}
</script>
{% end %}
{% end %}
/webapp/main.py
67,7 → 67,7
 
class Translate(tornado.web.RequestHandler):
def get(self):
self.render("templates/translate.html", last="", out=None, lang="eng")
self.render("templates/translate.html", last="", out=None, lang="en")
 
def post(self):
try:
101,7 → 101,8
("/number", Number),
("/restart", Restart),
("/parse", Parse),
("/translate", Translate)
("/translate", Translate),
("/(\\.well-known/.*)", tornado.web.StaticFileHandler, dict(path=settings["static_path"]))
], **settings)
 
if __name__ == "__main__":
/ircbot/bot.py
64,9 → 64,9
c = self.connection
 
if (cmd.split(" ")[0] == "tr") or (cmd.split(" ")[0] == "translate"):
lang = "eng"
lang = "en"
if len(cmd.split(" ")) > 1 and cmd.split(" ")[1].startswith("-"):
if cmd.split(" ")[1][1:] in ("hu", "de", "ptbr", "est", "sv", "nl"):
if cmd.split(" ")[1][1:] in ("hu", "de", "pt", "et", "sv", "nl", "es", "fr", "pl", "ru", "tr", "uk"):
lang = cmd.split(" ")[1][1:]
sent = " ".join(cmd.split(" ")[2:])
else:
/discordbot/README.txt
0,0 → 1,3
This is a Discord bot for TsimApiak.
 
To use it you have to put the tsimapiak directory inside this dir, and run bot.py.
/discordbot/bot.py
0,0 → 1,60
#! /usr/bin/env python
# This file is part of Tsim Apiak.
#
# Tsim Apiak is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public Licence as published by
# the Free Software Foundation, either version 3 of the Licence, or
# (at your option) any later version.
#
# In addition to this, you must also comply with clause 4 of the
# Apache Licence, version 2.0, concerning attribution. Where there
# is a contradiction between the two licences, the GPL
# takes preference.
#
# Tsim Apiak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>.#
 
import os
 
import discord
import dotenv
from typing import Optional
 
from tsimapiak import translate
 
dotenv.load_dotenv(dotenv.find_dotenv())
 
intents = discord.Intents.default()
client = discord.Client(intents=intents)
tree = discord.app_commands.CommandTree(client)
 
@client.event
async def on_ready():
print(f"{client.user} Connected to discord")
await tree.sync()
 
@tree.command(
name="translate",
description="Translate (gloss) Na'vi to English"
)
async def on_translate(interaction: discord.Interaction, argument: str, language: Optional[str]):
if not language:
language = "en"
translated = []
for word in translate.translatesent(argument, language):
translated.append(word["translated"])
 
await interaction.response.send_message(argument + "\n" + " | ".join(translated))
 
def main():
TOKEN = os.getenv('DISCORD_TOKEN')
 
client.run(TOKEN)
 
if __name__ == "__main__":
main()
Property changes:
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Index: cliapp/tsimapiakcli.py
===================================================================
--- cliapp/tsimapiakcli.py (revision 283)
+++ cliapp/tsimapiakcli.py (revision 298)
@@ -28,7 +28,7 @@
except:
line = line.decode("iso-8859-1")
translated = []
- for word in translate.translatesent(line, "eng"):
+ for word in translate.translatesent(line, "en"):
translated.append(word["translated"])
translated = " | ".join(translated)
print translated