/tsimapiak/dbconnector.py |
---|
21,30 → 21,60 |
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>. |
import tornado.database |
import mysql.connector as mysql |
def getnavilist(): |
ret = [] |
db = tornado.database.Connection("127.0.0.1", "navi", user="navi", password="navi") |
for row in db.query(""" |
db = mysql.connect(host="127.0.0.1", db="navi", user="navi", passwd="navi") |
cur = db.cursor(dictionary=True) |
cur.execute(""" |
SELECT * |
FROM `metaWords` |
WHERE partOfSpeech <> 'num.' AND partOfSpeech <> 'prefix' AND partOfSpeech <> 'affix' |
ORDER BY CHAR_LENGTH(navi) DESC"""): |
if row["infixes"]: |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["infixes"].lower(), "type": row["partOfSpeech"]}) |
ORDER BY CHAR_LENGTH(navi) DESC""") |
for row in cur: |
if row["infixes"] and row["infixes"] != "NULL": # yeah seriously |
ret.append({"id": row["id"], "navi": row["navi"].replace("+", "").replace("-", ""), "infix": row["infixes"].lower(), "type": row["partOfSpeech"]}) |
else: |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["navi"].lower(), "type": row["partOfSpeech"]}) |
ret.append({"id": row["id"], "navi": row["navi"].replace("+", "").replace("-", ""), "infix": row["navi"].lower(), "type": row["partOfSpeech"]}) |
cur.close() |
db.close() |
return ret |
def getaffixlists(): |
ret = ([], [], []) |
db = mysql.connect(host="127.0.0.1", db="navi", user="navi", passwd="navi") |
cur = db.cursor(dictionary=True) |
cur.execute(""" |
SELECT * |
FROM `metaInfixes` |
ORDER BY CHAR_LENGTH(navi) DESC""") |
for row in cur: |
endfix = False |
if row["navi"] and row["navi"][0] == "-": |
ret[2].append({"id": row["id"], "navi": row["navi"].replace("-", ""), "gloss": row["shorthand"].upper()}) |
endfix = True |
if row["navi"] and row["navi"][-1] in ("-", "+"): |
ret[0].append({"id": row["id"], "navi": row["navi"].replace("-", ""), "gloss": row["shorthand"].upper()}) |
endfix = True |
if not endfix: |
if not row["position"] or row["position"] == "NULL": |
# not actually an affix |
continue |
ret[1].append({"id": row["id"], "navi": row["navi"].replace("-", ""), "gloss": row["shorthand"].upper(), "position": int(row["position"])}) |
cur.close() |
db.close() |
return ret |
def translate(wid, language): |
ret = None |
db = tornado.database.Connection("127.0.0.1", "navi", user="navi", password="navi") |
for row in db.query(""" |
db = mysql.connect(host="127.0.0.1", db="navi", user="navi", passwd="navi") |
cur = db.cursor(dictionary=True) |
cur.execute(""" |
SELECT * |
FROM `localizedWords` |
WHERE id = %s AND languageCode = %s""", wid, language): |
WHERE id = %s AND languageCode = %s""", (wid, language)) |
for row in cur: |
ret = row["localized"] |
break |
if ret == None: |
/tsimapiak/parse.py |
---|
20,25 → 20,45 |
# You should have received a copy of the GNU General Public License |
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>. |
import dbconnector |
import parsenum |
import tsimapiak.dbconnector as dbconnector |
import tsimapiak.parsenum as parsenum |
import re |
#wordlist = [{"id": 0, "navi": u"tawtute", "infix": u"tawtute", "type": u"n."}] + dbconnector.getnavilist() + [{"id": 0, "navi": u"na'vi", "infix": u"na'vi", "type": u"n."}] # XXX HACK - extra proper nouns |
wordlist = dbconnector.getnavilist() |
prefixes, infixes, postfixes = dbconnector.getaffixlists() |
BROKENWORDS = ((u"sami", u"si", u"", u"am", u"", (()), (()), False), (u"to", u"to", u"", u"", u"", (()), (()), False), (u"frato", u"to", u"", u"", u"", [[u"fra"]], (()), False), (u"soaiä", u"soaia", u"", u"", u"", (()), [[u"ä"]], False), (u"mengenga", u"ngenga", u"", u"", u"", [[u"me"]], (()), False), (u"pxengenga", u"ngenga", u"", u"", u"", [[u"pxe"]], (()), False), (u"kìmä", u"kä", u"", u"ìm", u"", (()), (()), False), (u"apxay", u"pxay", u"", u"", u"", [[u"a"]], (()), False), (u"akawng", u"kawng", u"", u"", u"", [[u"a"]], (()), False), (u"ka", u"ka", u"", u"", u"", (()), (()), False), (u"uo", u"uo", u"", u"", u"", (()), (()), False), (u"sim", u"sim", u"", u"", u"", (()), (()), False)) # probably not tsim lenited |
# XXX HACK - These are words that are either not in Eana Eltu, or that get interpreted wrongly for whatever reason. The latter should be removed from this list when the parser gets more sophisticated. The former should also have an entry in the equivalent array in the translator! If it can take infixes, consider adding it to the main wordlist above (see the examples). The order is - original, Na'vi root, 0-pos infix, 1-pos infix, 2-pos infix, prefixes, suffixes. Things that can take affixes should go in the above list instead. |
INFIXES1 = (u"awn", u"eyk", u"us", u"äp", u"") |
INFIXES2 = (u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u"") |
INFIXES3 = (u"äng", u"ats", u"eiy", u"ei", u"uy", u"") |
PREFIXES = (u"tsay", u"fray", u"say", u"fay", u"fra", u"pxe", u"fne", u"tsa", u"kel", u"lek", u"sa", u"pe", u"fe", u"le", u"nì", u"sä", u"tì", u"sì", u"ay", u"me", u"fì", u"ke", u"he", u"px", u"a", u"m", u"k") |
ADPOSITIONS = (u"mungwrr", u"kxamlä", u"pximaw", u"pxisre", u"tafkip", u"nemfa", u"takip", u"mìkam", u"teri", u"fkip", u"luke", u"pxel", u"pxaw", u"rofa", u"ìlä", u"fpi", u"ftu", u"kip", u"lok", u"maw", u"sre", u"sìn", u"vay", u"eo", u"fa", u"hu", u"io", u"ka", u"mì", u"na", u"ne", u"ro", u"ta", u"uo", u"wä", u"äo", u"to", u"sì") |
POSTFIXES = ADPOSITIONS + (u"tsyìp", u"eyä", u"ìri", u"aru", u"ati", u"ayä", u"ari", u"ay", u"ìl", u"it", u"lo", u"ri", u"ru", u"ti", u"ur", u"ve", u"yä", u"ya", u"tu", u"vi", u"yu", u"an", u"ng", u"ke", u"al", u"at", u"ar", u"ey", u"e", u"o", u"l", u"t", u"y", u"a", u"ä", u"r") |
BROKENWORDS = ( |
(u"sami", u"si", u"", u"am", u"", (()), (()), False), |
(u"to", u"to", u"", u"", u"", (()), (()), False), |
#(u"frato", u"to", u"", u"", u"", [[u"fra"]], (()), False), |
(u"soaiä", u"soaia", u"", u"", u"", (()), [[(u"ä", None)]], False), |
(u"mengenga", u"ngenga", u"", u"", u"", [[u"me"]], (()), False), |
(u"pxengenga", u"ngenga", u"", u"", u"", [[u"pxe"]], (()), False), |
(u"kìmä", u"kä", u"", u"ìm", u"", (()), (()), False), |
(u"apxay", u"pxay", u"", u"", u"", [[u"a"]], (()), False), |
(u"akawng", u"kawng", u"", u"", u"", [[u"a"]], (()), False), |
(u"kawnga", u"kawng", u"", u"", u"", (()), [[(u"a", None)]], False), |
(u"kawng", u"kawng", u"", u"", u"", (()), (()), False), |
(u"ka", u"ka", u"", u"", u"", (()), (()), False), |
(u"uo", u"uo", u"", u"", u"", (()), (()), False), |
(u"sìk", u"sìk", u"", u"", u"", (()), (()), False), |
(u"sim", u"sim", u"", u"", u"", (()), (()), False) # probably not tsim lenited |
) |
#INFIXES1 = (u"awn", u"eyk", u"us", u"äp", u"") |
#INFIXES2 = (u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u"") |
#INFIXES3 = (u"äng", u"ats", u"eiy", u"ei", u"uy", u"") |
#PREFIXES = (u"tsay", u"fray", u"say", u"fay", u"fra", u"pxe", u"fne", u"tsa", u"kel", u"lek", u"sa", u"pe", u"fe", u"le", u"nì", u"sä", u"tì", u"sì", u"ay", u"me", u"fì", u"ke", u"he", u"px", u"a", u"m", u"k") |
#ADPOSITIONS = (u"mungwrr", u"kxamlä", u"pximaw", u"pxisre", u"tafkip", u"nemfa", u"takip", u"mìkam", u"teri", u"fkip", u"luke", u"pxel", u"pxaw", u"rofa", u"ìlä", u"fpi", u"ftu", u"kip", u"lok", u"maw", u"sre", u"sìn", u"vay", u"eo", u"fa", u"hu", u"io", u"ka", u"mì", u"na", u"ne", u"ro", u"ta", u"uo", u"wä", u"äo", u"to", u"sì") |
#POSTFIXES = ADPOSITIONS + (u"tsyìp", u"eyä", u"ìri", u"aru", u"ati", u"ayä", u"ari", u"ay", u"ìl", u"it", u"lo", u"ri", u"ru", u"ti", u"ur", u"ve", u"yä", u"ya", u"tu", u"vi", u"yu", u"an", u"ng", u"ke", u"al", u"at", u"ar", u"ey", u"e", u"o", u"l", u"t", u"y", u"a", u"ä", u"r") |
#prefixesn = ur"(?P<npr>(?:(?:fì|tsa)?(?:me|pxe|ay|fra)?|(?:fay)?|(?:tsay)?)(?:fne)?(?:tì|sä)?" |
#prefixesv = ur"(?P<vpr>(?:nì|sä|tì|rä'ä |ke )?)" |
EXTRAADP = (("to", [x["id"] for x in wordlist if x["navi"] == "to"][0]), ("sì", [x["id"] for x in wordlist if x["navi"] == "sì"][0])) # words that act like adpositions but technically aren't |
LENIT = ((u"px", u"p"), (u"tx", u"t"), (u"kx", u"k"), (u"ts", u"s"), (u"t", u"s"), (u"p", u"f"), (u"k", u"h"), (u"'", u"")) |
def parseword(wordin): |
69,24 → 89,24 |
foundprefs.append([]) |
foundposts.append([]) |
center = u"" |
if u"<1>" in splitword[wor]: |
if u"<0>" in splitword[wor]: |
tempin1 = [] |
tempin2 = [] |
tempin3 = [] |
for in1 in INFIXES1: |
for in1 in [x["navi"] for x in infixes if x["position"] == 0] + [""]: |
if in1 in wordin[wor]: |
tempin1.append(in1) |
for in2 in INFIXES2: |
for in2 in [x["navi"] for x in infixes if x["position"] == 1] + [""]: |
if in2 in wordin[wor]: |
tempin2.append(in2) |
for in3 in INFIXES3: |
for in3 in [x["navi"] for x in infixes if x["position"] == 2] + [""]: |
if in3 in wordin[wor]: |
tempin3.append(in3) |
for in1 in tempin1: |
for in2 in tempin2: |
for in3 in tempin3: |
if splitword[wor].replace(u"<1><2>", in1 + in2).replace(u"<3>", in3).replace(u"lll", u"l").replace(u"rrr", u"r") in wordin[wor]: |
center = splitword[wor].replace(u"<1><2>", in1 + in2).replace(u"<3>", in3).replace(u"lll", u"l").replace(u"rrr", u"r") |
if splitword[wor].replace(u"<0><1>", in1 + in2).replace(u"<2>", in3).replace(u"lll", u"l").replace(u"rrr", u"r") in wordin[wor]: |
center = splitword[wor].replace(u"<0><1>", in1 + in2).replace(u"<2>", in3).replace(u"lll", u"l").replace(u"rrr", u"r") |
foundins = [in1, in2, in3] |
break |
if center != u"": |
132,7 → 152,7 |
last = u"" |
while last != pref: |
last = pref |
for pre in PREFIXES: |
for pre in [x["navi"] for x in prefixes]: |
if pref != u"": |
if pref.endswith(pre): |
if pre in foundprefs[wor]: |
147,13 → 167,13 |
last = u"" |
while last != posf: |
last = posf |
for pos in POSTFIXES: |
for pos, posid in [(x["navi"], None) for x in postfixes] + [(x["navi"], x["id"]) for x in wordlist if x["type"] == "adp."] + list(EXTRAADP): |
if posf != u"": |
if posf.startswith(pos): |
if pos in foundposts[wor]: |
if (pos, posid) in foundposts[wor]: |
break |
if pos != u"ä" or word["navi"] != u"pey": # XXX HACK - fix for peyä. THIS SHOULD NOT BE HERE! |
foundposts[wor].append(pos) |
foundposts[wor].append((pos, posid)) |
posf = posf[len(pos):] |
break |
else: |
175,8 → 195,8 |
def parsesent(sent): |
sent = sent.strip().lower().replace(u"’", u"'") |
sent = re.sub(ur"[^\wìä' ]", u"", sent) |
sent = re.sub(ur"\ +", u" ", sent) |
sent = re.sub(r"[^\wìä' ]", u"", sent) |
sent = re.sub(r"\ +", u" ", sent) |
sent = sent.split(u" ") |
ret = [] |
left = len(sent) |
/tsimapiak/parsenum.py |
---|
20,6 → 20,8 |
# You should have received a copy of the GNU General Public License |
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>. |
from __future__ import print_function |
NUM = [u"kew", |
u"'aw", |
u"mune", |
76,19 → 78,19 |
prefs.append(u"a") |
numin = numin[1:] |
if numin[len(numin) - 1] == u"a": |
posts.append(u"a") |
posts.append((u"a", None)) |
numin = numin[:-1] |
if numin[-2:] == u"ve": |
posts.append(u"ve") |
posts.append((u"ve", None)) |
numin = numin[:-2] |
#BASE numbers |
for n in range(len(NUM)): |
if u"ve" in posts: |
if (u"ve", None) in posts: |
if numin == NUMORD[n]: |
outoct = n |
outdec = n |
ret["word"]["navi"] = unicode(outdec) + u"." |
ret["word"]["navi"] = str(outdec) + u"." |
ret["dec"] = outdec |
ret["oct"] = outoct |
return ret |
96,7 → 98,7 |
if numin == NUM[n]: |
outoct = n |
outdec = n |
ret["word"]["navi"] = unicode(outdec) |
ret["word"]["navi"] = str(outdec) |
ret["dec"] = outdec |
ret["oct"] = outoct |
return ret |
151,7 → 153,7 |
notbase = True |
if notbase: |
for n in range(len(REM)): |
if u"ve" in posts: |
if (u"ve", None) in posts: |
if numin == REMORD[n]: |
outoct += n + 1 |
outdec += n + 1 |
162,7 → 164,7 |
outdec += n + 1 |
numin = u"" |
if numin == u"": |
ret["word"]["navi"] = unicode(outdec) if not u"ve" in posts else unicode(outdec) + u"." |
ret["word"]["navi"] = str(outdec) if not (u"ve", None) in posts else str(outdec) + u"." |
ret["dec"] = outdec |
ret["oct"] = outoct |
return ret |
170,4 → 172,4 |
return None |
if __name__ == "__main__": |
print parse(u"mevolawve") |
print(parse(u"mevolawve")) |
/tsimapiak/translate.py |
---|
20,17 → 20,19 |
# You should have received a copy of the GNU General Public License |
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>. |
import dbconnector |
import parse |
import tsimapiak.dbconnector as dbconnector |
import tsimapiak.parse as parse |
#BROKENWORDS = [[u"", u"", u"", u"", u"", u"", u""]] #, (u"tawtute", u"skyperson"), (u"na'vi", u"The People")) # XXX HACK - words not in EE |
INFIXES1 = ((u"awn", u"P.PART"), (u"eyk", u"CAUS"), (u"us", u"A.PART"), (u"äp", u"REFL.")) |
INFIXES2 = ((u"ìyev", u"FUT.SUBJ"), (u"iyev", u"FUT.SUBJ"), (u"ìmìy", u"REC.PAST.REC.FUT"), (u"arm", u"IMPF.PAST"), (u"asy", u"FUT.D"), (u"ilv", u"PRES.PER.SUBJ"), (u"ìmv", u"REC.PAST.SUBJ"), (u"imv", u"PAST.SUBJ"), (u"ìrm", u"IMPF.REC.PAST"), (u"irv", u"PRES.IMPF.SUBJ"), (u"ìsy", u"IMM.FUT.D"), (u"aly", u"PERF.FUT"), (u"ary", u"IMPF.FUT"), (u"ìly", u"PERF.IMM.FUT"), (u"ìry", u"IMPF.IMM.FUT"), (u"ìlm", u"PERF.REC.PAST"), (u"alm", u"PERF.PAST"), (u"am", u"PAST."), (u"ay", u"FUT."), (u"er", u"IMPF."), (u"ìm", u"REC.PAST"), (u"iv", u"SUBJ."), (u"ìy", u"IMM.FUT"), (u"ol", u"PERF.")) |
INFIXES3 = ((u"äng", u"PEJ."), (u"ats", u"INFR."), (u"eiy", u"LAUD."), (u"ei", u"LAUD."), (u"uy", u"HON.")) |
PREFIXES = ((u"tsay", u"those"), (u"fray", u"every-PL."), (u"say", u"those-LENTD"), (u"fay", u"these"), (u"fra", u"every"), (u"pxe", u"TRI."), (u"fne", u"type"), (u"tsa", u"that"), (u"kel", u"ADJD.-not"), (u"lek", u"not-ADJD."), (u"sa", u"that-LENTD"), (u"pe", u"what"), (u"fe", u"what-LENTD"), (u"le", u"ADJD."), (u"nì", u"ADVD."), (u"sä", u"INSTD."), (u"tì", u"NOUND."), (u"sì", u"NOUND.-LENTD"), (u"ay", u"PL."), (u"me", u"DU."), (u"fì", u"this"), (u"ke", u"not"), (u"he", u"not-LENTD"), (u"px", u"TRI."), (u"a", u"ADJ.POST"), (u"m", u"DU."), (u"k", u"not")) |
ADPOSITIONS = ((u"mungwrr", u"except"), (u"kxamlä", u"through"), (u"pximaw", u"right.after"), (u"pxisre", u"right.before"), (u"tafkip", u"from.up.among"), (u"nemfa", u"into.inside"), (u"takip", u"from among"), (u"mìkam", u"between"), (u"teri", u"about.concerning"), (u"fkip", u"up.among"), (u"luke", u"without"), (u"pxel", u"like.as"), (u"pxaw", u"around"), (u"rofa", u"beside.alongside"), (u"ìlä", u"by.via.following"), (u"fpi", u"for.the.sake/benefit.of"), (u"ftu", u"from.direction"), (u"kip", u"among"), (u"lok", u"close.to"), (u"maw", u"after.time"), (u"sre", u"before.time"), (u"sìn", u"on.onto"), (u"vay", u"up.to"), (u"eo", u"before.in.front.of"), (u"fa", u"with.by.means.of"), (u"hu", u"with.accompaniment"), (u"io", u"above"), (u"ka", u"across"), (u"mì", u"in.on"), (u"na", u"like.as"), (u"ne", u"to.towards"), (u"ro", u"at.locative"), (u"ta", u"from"), (u"uo", u"behind"), (u"wä", u"against.opposition"), (u"äo", u"below"), (u"to", u"than"), (u"sì", u"and")) |
POSTFIXES = ADPOSITIONS + ((u"tsyìp", u"DIM."), (u"eyä", u"GEN."), (u"ìri", u"TOP."), (u"ari", u"TOP."), (u"ayä", u"GEN."), (u"aru", u"DAT."), (u"ati", u"ACC."), (u"ay", u"GEN."), (u"ìl", u"ERG."), (u"it", u"ACC"), (u"lo", u"MULT."), (u"ri", u"TOP."), (u"ru", u"DAT."), (u"ti", u"ACC."), (u"ur", u"DAT."), (u"ve", u"ORD."), (u"yä", u"GEN."), (u"ya", u"VOC."), (u"tu", u"OBJD."), (u"vi", u"PART."), (u"yu", u"AGENTD."), (u"an", u"MASC."), (u"ng", u"INCL."), (u"ke", u"not"), (u"al", u"ERG."), (u"at", u"ACC."), (u"ar", u"DAT."), (u"ey", u"GEN."), (u"e", u"FEM."), (u"o", u"INDEF."), (u"l", u"ERG."), (u"t", u"ACC."), (u"y", u"GEN."), (u"a", u"ADJ.PRE"), (u"ä", u"GEN."), (u"r", u"DAT.")) |
#INFIXES1 = ((u"awn", u"P.PART"), (u"eyk", u"CAUS"), (u"us", u"A.PART"), (u"äp", u"REFL.")) |
#INFIXES2 = ((u"ìyev", u"FUT.SUBJ"), (u"iyev", u"FUT.SUBJ"), (u"ìmìy", u"REC.PAST.REC.FUT"), (u"arm", u"IMPF.PAST"), (u"asy", u"FUT.D"), (u"ilv", u"PRES.PER.SUBJ"), (u"ìmv", u"REC.PAST.SUBJ"), (u"imv", u"PAST.SUBJ"), (u"ìrm", u"IMPF.REC.PAST"), (u"irv", u"PRES.IMPF.SUBJ"), (u"ìsy", u"IMM.FUT.D"), (u"aly", u"PERF.FUT"), (u"ary", u"IMPF.FUT"), (u"ìly", u"PERF.IMM.FUT"), (u"ìry", u"IMPF.IMM.FUT"), (u"ìlm", u"PERF.REC.PAST"), (u"alm", u"PERF.PAST"), (u"am", u"PAST."), (u"ay", u"FUT."), (u"er", u"IMPF."), (u"ìm", u"REC.PAST"), (u"iv", u"SUBJ."), (u"ìy", u"IMM.FUT"), (u"ol", u"PERF.")) |
#INFIXES3 = ((u"äng", u"PEJ."), (u"ats", u"INFR."), (u"eiy", u"LAUD."), (u"ei", u"LAUD."), (u"uy", u"HON.")) |
#PREFIXES = ((u"tsay", u"those"), (u"fray", u"every-PL."), (u"say", u"those-LENTD"), (u"fay", u"these"), (u"fra", u"every"), (u"pxe", u"TRI."), (u"fne", u"type"), (u"tsa", u"that"), (u"kel", u"ADJD.-not"), (u"lek", u"not-ADJD."), (u"sa", u"that-LENTD"), (u"pe", u"what"), (u"fe", u"what-LENTD"), (u"le", u"ADJD."), (u"nì", u"ADVD."), (u"sä", u"INSTD."), (u"tì", u"NOUND."), (u"sì", u"NOUND.-LENTD"), (u"ay", u"PL."), (u"me", u"DU."), (u"fì", u"this"), (u"ke", u"not"), (u"he", u"not-LENTD"), (u"px", u"TRI."), (u"a", u"ADJ.POST"), (u"m", u"DU."), (u"k", u"not")) |
#ADPOSITIONS = ((u"mungwrr", u"except"), (u"kxamlä", u"through"), (u"pximaw", u"right.after"), (u"pxisre", u"right.before"), (u"tafkip", u"from.up.among"), (u"nemfa", u"into.inside"), (u"takip", u"from among"), (u"mìkam", u"between"), (u"teri", u"about.concerning"), (u"fkip", u"up.among"), (u"luke", u"without"), (u"pxel", u"like.as"), (u"pxaw", u"around"), (u"rofa", u"beside.alongside"), (u"ìlä", u"by.via.following"), (u"fpi", u"for.the.sake/benefit.of"), (u"ftu", u"from.direction"), (u"kip", u"among"), (u"lok", u"close.to"), (u"maw", u"after.time"), (u"sre", u"before.time"), (u"sìn", u"on.onto"), (u"vay", u"up.to"), (u"eo", u"before.in.front.of"), (u"fa", u"with.by.means.of"), (u"hu", u"with.accompaniment"), (u"io", u"above"), (u"ka", u"across"), (u"mì", u"in.on"), (u"na", u"like.as"), (u"ne", u"to.towards"), (u"ro", u"at.locative"), (u"ta", u"from"), (u"uo", u"behind"), (u"wä", u"against.opposition"), (u"äo", u"below"), (u"to", u"than"), (u"sì", u"and")) |
#POSTFIXES = ADPOSITIONS + ((u"tsyìp", u"DIM."), (u"eyä", u"GEN."), (u"ìri", u"TOP."), (u"ari", u"TOP."), (u"ayä", u"GEN."), (u"aru", u"DAT."), (u"ati", u"ACC."), (u"ay", u"GEN."), (u"ìl", u"ERG."), (u"it", u"ACC"), (u"lo", u"MULT."), (u"ri", u"TOP."), (u"ru", u"DAT."), (u"ti", u"ACC."), (u"ur", u"DAT."), (u"ve", u"ORD."), (u"yä", u"GEN."), (u"ya", u"VOC."), (u"tu", u"OBJD."), (u"vi", u"PART."), (u"yu", u"AGENTD."), (u"an", u"MASC."), (u"ng", u"INCL."), (u"ke", u"not"), (u"al", u"ERG."), (u"at", u"ACC."), (u"ar", u"DAT."), (u"ey", u"GEN."), (u"e", u"FEM."), (u"o", u"INDEF."), (u"l", u"ERG."), (u"t", u"ACC."), (u"y", u"GEN."), (u"a", u"ADJ.PRE"), (u"ä", u"GEN."), (u"r", u"DAT.")) |
prefixes, infixes, postfixes = dbconnector.getaffixlists() |
def translatesent(sent, lang): |
sent = parse.parsesent(sent) |
for word in sent: |
44,27 → 46,31 |
if word["translated"] == u"": |
word["translated"] = word["word"]["navi"] |
if word["inf"][0] != u"": |
for fix in INFIXES1: |
for fix in [(x["navi"], x["gloss"]) for x in infixes if x["position"] == 0]: |
if fix[0] == word["inf"][0]: |
word["translated"] += '-' + fix[1] |
if word["inf"][1] != u"": |
for fix in INFIXES2: |
for fix in [(x["navi"], x["gloss"]) for x in infixes if x["position"] == 1]: |
if fix[0] == word["inf"][1]: |
word["translated"] += '-' + fix[1] |
if word["inf"][2] != u"": |
for fix in INFIXES3: |
for fix in [(x["navi"], x["gloss"]) for x in infixes if x["position"] == 2]: |
if fix[0] == word["inf"][2]: |
word["translated"] += '-' + fix[1] |
for temp in word["pref"]: |
for navf in temp: |
for fix in PREFIXES: |
for fix in [(x["navi"], x["gloss"]) for x in prefixes]: |
if fix[0] == navf: |
word["translated"] += '-' + fix[1] |
for temp in word["post"]: |
for navf in temp: |
for fix in POSTFIXES: |
for navf, navfid in temp: |
for fix in [(x["navi"], x["gloss"]) for x in postfixes]: |
if fix[0] == navf: |
word["translated"] += '-' + fix[1] |
break |
else: |
# adpositions and the like |
word["translated"] += "-" + dbconnector.translate(navfid, lang) |
if word["len"]: |
word["translated"] += '-' + 'LENTD' |
return sent |
/discordbot/README.txt |
---|
0,0 → 1,3 |
This is a Discord bot for TsimApiak. |
To use it you have to put the tsimapiak directory inside this dir, and run bot.py. |
/discordbot/bot.py |
---|
0,0 → 1,60 |
#! /usr/bin/env python |
# This file is part of Tsim Apiak. |
# |
# Tsim Apiak is free software: you can redistribute it and/or modify |
# it under the terms of the GNU General Public Licence as published by |
# the Free Software Foundation, either version 3 of the Licence, or |
# (at your option) any later version. |
# |
# In addition to this, you must also comply with clause 4 of the |
# Apache Licence, version 2.0, concerning attribution. Where there |
# is a contradiction between the two licences, the GPL |
# takes preference. |
# |
# Tsim Apiak is distributed in the hope that it will be useful, |
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# GNU General Public License for more details. |
# |
# You should have received a copy of the GNU General Public License |
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>.# |
import os |
import discord |
import dotenv |
from typing import Optional |
from tsimapiak import translate |
dotenv.load_dotenv(dotenv.find_dotenv()) |
intents = discord.Intents.default() |
client = discord.Client(intents=intents) |
tree = discord.app_commands.CommandTree(client) |
@client.event |
async def on_ready(): |
print(f"{client.user} Connected to discord") |
await tree.sync() |
@tree.command( |
name="translate", |
description="Translate (gloss) Na'vi to English" |
) |
async def on_translate(interaction: discord.Interaction, argument: str, language: Optional[str]): |
if not language: |
language = "en" |
translated = [] |
for word in translate.translatesent(argument, language): |
translated.append(word["translated"]) |
await interaction.response.send_message(argument + "\n" + " | ".join(translated)) |
def main(): |
TOKEN = os.getenv('DISCORD_TOKEN') |
client.run(TOKEN) |
if __name__ == "__main__": |
main() |
Property changes: |
Added: svn:executable |
## -0,0 +1 ## |
+* |
\ No newline at end of property |
Index: cliapp/tsimapiakcli.py |
=================================================================== |
--- cliapp/tsimapiakcli.py (revision 278) |
+++ cliapp/tsimapiakcli.py (revision 287) |
@@ -28,7 +28,7 @@ |
except: |
line = line.decode("iso-8859-1") |
translated = [] |
- for word in translate.translatesent(line, "eng"): |
+ for word in translate.translatesent(line, "en"): |
translated.append(word["translated"]) |
translated = " | ".join(translated) |
print translated |
Index: ircbot/bot.py |
=================================================================== |
--- ircbot/bot.py (revision 278) |
+++ ircbot/bot.py (revision 287) |
@@ -64,7 +64,7 @@ |
c = self.connection |
if (cmd.split(" ")[0] == "tr") or (cmd.split(" ")[0] == "translate"): |
- lang = "eng" |
+ lang = "en" |
if len(cmd.split(" ")) > 1 and cmd.split(" ")[1].startswith("-"): |
if cmd.split(" ")[1][1:] in ("hu", "de", "ptbr", "est", "sv", "nl"): |
lang = cmd.split(" ")[1][1:] |
/webapp/main.py |
---|
22,7 → 22,7 |
from tsimapiak import parse, parsenum, translate |
import httplib |
import http.client as httplib |
import os |
import tornado.httpserver |
import tornado.ioloop |
67,7 → 67,7 |
class Translate(tornado.web.RequestHandler): |
def get(self): |
self.render("templates/translate.html", last="", out=None, lang="eng") |
self.render("templates/translate.html", last="", out=None, lang="en") |
def post(self): |
try: |
101,7 → 101,8 |
("/number", Number), |
("/restart", Restart), |
("/parse", Parse), |
("/translate", Translate) |
("/translate", Translate), |
("/(\\.well-known/.*)", tornado.web.StaticFileHandler, dict(path=settings["static_path"])) |
], **settings) |
if __name__ == "__main__": |
/webapp/templates/parse.html |
---|
49,7 → 49,7 |
</tr> |
<tr> |
<td>Postfixes:</td> |
<td>{{ u"; ".join(u", ".join(x) for x in wor["post"]) }}</td> |
<td>{{ u"; ".join(u", ".join([y[0] for y in x]) for x in wor["post"]) }}</td> |
</tr> |
<tr> |
<td>Lenited:</td> |
58,7 → 58,7 |
{% end %} |
</table> |
{% end %} |
<p>This program uses Eana Eltu for the list of words and infix positions (but nothing else), created by Tuiq and Taronyu. Thanks also go to the rest of the Learn Na'vi community!</p> |
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p> |
<script type="text/javascript"> |
document.getElementById("word").focus(); |
</script> |
/webapp/templates/translate.html |
---|
29,11 → 29,11 |
<form action="/translate" method="post"> |
<input id="word" name="word" type="text" value="{{last}}" style="width: 100%;" /> |
<select name="lang" id="lang"> |
<option value="eng" selected="selected">English</option> |
<option value="en" selected="selected">English</option> |
<option value="hu">Hungarian - Magyar</option> |
<option value="de">German - Deutsch</option> |
<option value="est">Estonian - Eesti</option> |
<option value="ptbr">Brazilian Portuguese - Português do Brasil</option> |
<option value="et">Estonian - Eesti</option> |
<option value="pt">Brazilian Portuguese - Português do Brasil</option> |
<option value="sv">Swedish - Svenska</option> |
<option value="nl">Dutch - Nederlands</option> |
</select> |
60,7 → 60,7 |
</tr> |
<tr> |
<td>Postfixes:</td> |
<td>{{ u"; ".join(u", ".join(x) for x in wor["post"]) }}</td> |
<td>{{ u"; ".join(u", ".join([y[0] for y in x]) for x in wor["post"]) }}</td> |
</tr> |
<tr> |
<td>Lenited:</td> |
69,11 → 69,11 |
{% end %} |
</table> |
{% end %} |
<p>This program uses Eana Eltu for the list of words and infix positions (but nothing else), created by Tuiq and Taronyu. Thanks also go to the rest of the Learn Na'vi community!</p> |
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p> |
<script type="text/javascript"> |
document.getElementById("word").focus(); |
</script> |
{% if lang != "eng" %} |
{% if lang != "en" %} |
<script type="text/javascript"> |
if("{{ lang }}" == "hu"){ |
document.getElementById("lang").selectedIndex = 1 |
/webapp/templates/index.html |
---|
27,5 → 27,6 |
<a href="/number"><b>Number translator</b></a> - this webapp allows you to translate written-out Na'vi numbers into decimal and octal.<br /> |
<a href="/parse"><b>Parser</b></a> - this webapp can parse Na'vi sentences into the base words, prefixes, infixes and suffixes. It does not translate the words.<br /> |
<a href="/translate"><b>Translator</b></a> - this webapp translates full Na'vi sentences into human languages, as well as any affixes into the technical abbreviations. |
<a href="/translate"><b>Translator</b></a> - this webapp translates full Na'vi sentences into human languages, as well as any affixes into the technical abbreviations.<br /> |
<a href="svn://tim32.org/navi">SVN repository</a> <a href="http://websvn.tim32.org/listing.php?repname=navi">(websvn)</a> |
{% end %} |