Subversion Repositories navi

Compare Revisions

Ignore whitespace Rev 215 → Rev 233

/ircbot/bot.py
50,7 → 50,7
if (cmd.split(" ")[0] == "tr") or (cmd.split(" ")[0] == "translate"):
lang = "eng"
if cmd.split(" ")[1].startswith("-"):
if cmd.split(" ")[1][1:] in ("hu", "de", "ptbr", "est"):
if cmd.split(" ")[1][1:] in ("hu", "de", "ptbr", "est", "sv"):
lang = cmd.split(" ")[1][1:]
sent = " ".join(cmd.split(" ")[2:])
else:
/webapp/templates/translate.html
34,6 → 34,7
<option value="de">German - Deutsch</option>
<option value="est">Estonian - Eesti</option>
<option value="ptbr">Brazilian Portuguese - Português do Brasil</option>
<option value="sv">Swedish - Svenska</option>
</select>
<input name="btn" type="submit" value="Translate!" />
</form>
85,6 → 86,9
if("{{ lang }}" == "ptbr"){
document.getElementById("lang").selectedIndex = 4
}
if("{{ lang }}" == "sv"){
document.getElementById("lang").selectedIndex = 5
}
</script>
{% end %}
{% end %}
/tsimapiak/parse.py
24,10 → 24,11
import dbconnector
import parsenum
 
wordlist = [{"id": 0, "navi": u"tawtute", "infix": u"tawtute", "type": u"n."}] + dbconnector.getnavilist() + [{"id": 0, "navi": u"na'vi", "infix": u"na'vi", "type": u"n."}]
#wordlist = [{"id": 0, "navi": u"tawtute", "infix": u"tawtute", "type": u"n."}] + dbconnector.getnavilist() + [{"id": 0, "navi": u"na'vi", "infix": u"na'vi", "type": u"n."}] # XXX HACK - extra proper nouns
wordlist = dbconnector.getnavilist()
 
 
brokenwords = ((u"sami", u"si", u"", u"am", u"", (()), (()), False), (u"to", u"to", u"", u"", u"", (()), (()), False), (u"poltxe", u"plltxe", u"", u"ol", u"", (()), (()), False)) # These are words that are either not in Eana Eltu, or that get interpreted wrongly for whatever reason. The latter should be removed from this list when the parser gets more sophisticated. The former should also have an entry in the equivalent array in the translator! If it can take infixes, consider adding it to the main wordlist above (see the examples). The order is - original, Na'vi root, 0-pos infix, 1-pos infix, 2-pos infix, prefixes, suffixes
brokenwords = ((u"sami", u"si", u"", u"am", u"", (()), (()), False), (u"to", u"to", u"", u"", u"", (()), (()), False), (u"frato", u"to", u"", u"", u"", [[u"fra"]], (()), False)) #, (u"poltxe", u"plltxe", u"", u"ol", u"", (()), (()), False)) # XXX HACK - These are words that are either not in Eana Eltu, or that get interpreted wrongly for whatever reason. The latter should be removed from this list when the parser gets more sophisticated. The former should also have an entry in the equivalent array in the translator! If it can take infixes, consider adding it to the main wordlist above (see the examples). The order is - original, Na'vi root, 0-pos infix, 1-pos infix, 2-pos infix, prefixes, suffixes. Things that can take affixes should go in the above list instead.
infixes1 = (u"awn", u"eyk", u"us", u"äp", u"")
infixes2 = (u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u"")
infixes3 = (u"äng", u"ats", u"eiy", u"ei", u"uy", u"")
42,7 → 43,7
def parseword(wordin):
tempid = 0
temptype = u""
for brokenword in brokenwords:
for brokenword in brokenwords: # XXX HACK - this is all code to work around bugs that shouldn't exist
if wordin[0] == brokenword[0]:
for word in wordlist:
if brokenword[1] == word["navi"]:
51,6 → 52,7
return {"word": {"id": tempid, "navi": brokenword[1], "infix": u"", "type": temptype}, "pref": brokenword[5], "post": brokenword[6], "len": brokenword[7], "inf": (brokenword[2], brokenword[3], brokenword[4]) }
ret = {"word": {"id": 0, "navi": u"[" + wordin[0] + u"]", "infix": u"", "type": u""}}
for word in wordlist:
word["navi"] = word["navi"].lower()
foundit = True
foundprefs = []
foundposts = []
84,8 → 86,8
for in1 in tempin1:
for in2 in tempin2:
for in3 in tempin3:
if splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3) in wordin[wor]:
center = splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3)
if splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3).replace(u"lll",u"l").replace(u"rrr",u"r") in wordin[wor]:
center = splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3).replace(u"lll",u"l").replace(u"rrr",u"r")
foundins = [in1, in2, in3]
break
if center != u"": break
93,7 → 95,7
else:
if splitword[wor] in wordin[wor]:
center = splitword[wor]
if center == u"" and (wordin[wor] == u"paya" or splitword[wor] != u"pxay"): # XXX - maybe fixable without hardcoding?
if center == u"" and (wordin[wor] == u"paya" or splitword[wor] != u"pxay"): # XXX HACK - workaround to fix pay being lenited pxay. Maybe fixable without hardcoding?
for i in lenit:
temp = u""
if splitword[wor].startswith(i[0]):
140,7 → 142,7
if posf.startswith(pos):
if pos in foundposts[wor]:
break
if pos != u"ä" or word["navi"] != u"pey":
if pos != u"ä" or word["navi"] != u"pey": # XXX HACK - fix for peyä. THIS SHOULD NOT BE HERE!
foundposts[wor].append(pos)
posf = posf[len(pos):]
break
/tsimapiak/translate.py
23,7 → 23,7
import parse
import dbconnector
 
brokenwords = ((u"to", u"than"), (u"tawtute", u"skyperson"), (u"na'vi", u"The People")) # words not in EE
brokenwords = [[u"to", u"superlative marker than", u"Komparativ Marker", u"võrdlemise märkija", u"középfokot jelez mint", u"palavre que"]] #, (u"tawtute", u"skyperson"), (u"na'vi", u"The People")) # XXX HACK - words not in EE
infixes1 = ((u"awn", u"P.PART"), (u"eyk", u"CAUS"), (u"us", u"A.PART"), (u"äp", u"REFL."))
infixes2 = ((u"ìyev", u"FUT.SUBJ"), (u"iyev", u"FUT.SUBJ"), (u"ìmìy", u"REC.PAST.REC.FUT"), (u"arm", u"IMPF.PAST"), (u"asy", u"FUT.D"), (u"ilv", u"PRES.PER.SUBJ"), (u"ìmv", u"REC.PAST.SUBJ"), (u"imv", u"PAST.SUBJ"), (u"ìrm", u"IMPF.REC.PAST"), (u"irv", u"PRES.IMPF.SUBJ"), (u"ìsy", u"IMM.FUT.D"), (u"aly", u"PERF.FUT"), (u"ary", u"IMPF.FUT"), (u"ìly", u"PERF.IMM.FUT"), (u"ìry", u"IMPF.IMM.FUT"), (u"ìlm", u"PERF.REC.PAST"), (u"alm", u"PERF.PAST"), (u"am", u"PAST."), (u"ay", u"FUT."), (u"er", u"IMPF."), (u"ìm", u"REC.PAST"), (u"iv", u"SUBJ."), (u"ìy", u"IMM.FUT"), (u"ol", u"PERF."))
infixes3 = ((u"äng", u"PEJ."), (u"ats", u"INFR."), (u"eiy", u"LAUD."), (u"ei", u"LAUD."), (u"uy", u"HON."))
/tsimapiak/parsenum.py
106,19 → 106,28
if numin.startswith(base[n] + u"zazam"):
outoct += (n+1) * (10**4)
outdec += (n+1) * (8**4)
numin = numin[len(base[n]) + 5:]
if numin[len(base[n]) + 4:].startswith(u"mrr") or numin[len(base[n]) + 4:].startswith(u"me"):
numin = numin[len(base[n]) + 4:]
else:
numin = numin[len(base[n]) + 5:]
notbase = True
for n in range(len(base)):
if numin.startswith(base[n] + u"vozam"):
outoct += (n+1) * (10**3)
outdec += (n+1) * (8**3)
numin = numin[len(base[n]) + 5:]
if numin[len(base[n]) + 4:].startswith(u"mrr") or numin[len(base[n]) + 4:].startswith(u"me"):
numin = numin[len(base[n]) + 4:]
else:
numin = numin[len(base[n]) + 5:]
notbase = True
for n in range(len(base)):
if numin.startswith(base[n] + u"zam"):
outoct += (n+1) * (10**2)
outdec += (n+1) * (8**2)
numin = numin[len(base[n]) + 3:]
if numin[len(base[n]) + 2:].startswith(u"mrr") or numin[len(base[n]) + 2:].startswith(u"me"):
numin = numin[len(base[n]) + 2:]
else:
numin = numin[len(base[n]) + 3:]
notbase = True
for n in range(len(base)):
if numin.startswith(base[n] + u"vol"):