Subversion Repositories navi

Compare Revisions

Ignore whitespace Rev 294 → Rev 296

/tsimapiak/dbconnector.py
34,9 → 34,9
ORDER BY CHAR_LENGTH(navi) DESC""")
for row in cur:
if row["infixes"] and row["infixes"] != "NULL": # yeah seriously
ret.append({"id": row["id"], "navi": row["navi"].replace("+", "").replace("-", ""), "infix": row["infixes"].lower(), "type": row["partOfSpeech"]})
ret.append({"id": row["id"], "navi": row["navi"].replace("+", "").replace("-", ""), "infix": row["infixes"].lower(), "type": row["partOfSpeech"], "lenited": False})
else:
ret.append({"id": row["id"], "navi": row["navi"].replace("+", "").replace("-", ""), "infix": row["navi"].replace("+", "").replace("-", "").lower(), "type": row["partOfSpeech"]})
ret.append({"id": row["id"], "navi": row["navi"].replace("+", "").replace("-", ""), "infix": row["navi"].replace("+", "").replace("-", "").lower(), "type": row["partOfSpeech"], "lenited": False})
cur.close()
db.close()
return ret
/tsimapiak/parse.py
31,22 → 31,22
 
# XXX HACK - These are words that are either not in Eana Eltu, or that get interpreted wrongly for whatever reason. The latter should be removed from this list when the parser gets more sophisticated. The former should also have an entry in the equivalent array in the translator! If it can take infixes, consider adding it to the main wordlist above (see the examples). The order is - original, Na'vi root, 0-pos infix, 1-pos infix, 2-pos infix, prefixes, suffixes. Things that can take affixes should go in the above list instead.
BROKENWORDS = (
(u"sami", u"si", u"", u"am", u"", (()), (()), False),
(u"to", u"to", u"", u"", u"", (()), (()), False),
(u"sami", u"si", u"", u"am", u"", (()), (()), False), # otherwise parses as sa (tsa-lenited) + mi
#(u"to", u"to", u"", u"", u"", (()), (()), False),
#(u"frato", u"to", u"", u"", u"", [[u"fra"]], (()), False),
(u"soaiä", u"soaia", u"", u"", u"", (()), [[(u"ä", None)]], False),
(u"mengenga", u"ngenga", u"", u"", u"", [[u"me"]], (()), False),
(u"pxengenga", u"ngenga", u"", u"", u"", [[u"pxe"]], (()), False),
(u"kìmä", u"kä", u"", u"ìm", u"", (()), (()), False),
(u"apxay", u"pxay", u"", u"", u"", [[u"a"]], (()), False),
(u"soaiä", u"soaia", u"", u"", u"", (()), [[(u"ä", None)]], False), # does not parse, irregular form
#(u"mengenga", u"ngenga", u"", u"", u"", [[u"me"]], (()), False),
#(u"pxengenga", u"ngenga", u"", u"", u"", [[u"pxe"]], (()), False),
(u"kìmä", u"kä", u"", u"ìm", u"", (()), (()), False), # otherwise parses as kìm (spin) + ä (genitive)
(u"apxay", u"pxay", u"", u"", u"", [[u"a"]], (()), False), # otherwise parses as apxa + -y (genitive)
#(u"akawng", u"kawng", u"", u"", u"", [[u"a"]], (()), False),
#(u"kawnga", u"kawng", u"", u"", u"", (()), [[(u"a", None)]], False),
#(u"kawng", u"kawng", u"", u"", u"", (()), (()), False),
(u"ka", u"ka", u"", u"", u"", (()), (()), False),
(u"uo", u"uo", u"", u"", u"", (()), (()), False),
(u"sìk", u"sìk", u"", u"", u"", (()), (()), False),
(u"sim", u"sim", u"", u"", u"", (()), (()), False), # probably not tsim lenited
(u"tse", u"tse", u"", u"", u"", (()), (()), False),
#(u"ka", u"ka", u"", u"", u"", (()), (()), False),
#(u"uo", u"uo", u"", u"", u"", (()), (()), False),
#(u"sìk", u"sìk", u"", u"", u"", (()), (()), False),
#(u"sim", u"sim", u"", u"", u"", (()), (()), False), # probably not tsim lenited
(u"tse", u"tse", u"", u"", u"", (()), (()), False), # otherwise parses as tsa'u abbreviated (special case)
)
 
#INFIXES1 = (u"awn", u"eyk", u"us", u"äp", u"")
85,6 → 85,16
infixes = sorted(infixes + EXTRAINFIXES, key=lambda x: len(x["navi"]), reverse=True)
postfixes = sorted(postfixes + EXTRAPOSTFIXES, key=lambda x: len(x["navi"]), reverse=True)
 
# Let's lenit the dictionary
extrawords = []
for word in wordlist:
for letter, replacement in LENIT:
if word["navi"].startswith(letter):
new_word = word["navi"].replace(letter, replacement, 1)
new_infix = word["infix"].replace(letter, replacement, 1)
extrawords.append({"id": word["id"], "navi": new_word, "infix": new_infix, "type": word["type"], "lenited": True})
wordlist = sorted(wordlist + extrawords, key=lambda x: len(x["navi"]) * 2 + (0 if x["lenited"] else 1), reverse=True)
 
def parseword(wordin):
tempid = 0
temptype = u""
95,13 → 105,12
tempid = word["id"]
temptype = word["type"]
return {"word": {"id": tempid, "navi": brokenword[1], "infix": u"", "type": temptype}, "pref": brokenword[5], "post": brokenword[6], "len": brokenword[7], "inf": (brokenword[2], brokenword[3], brokenword[4]) }
ret = {"word": {"id": 0, "navi": u"[" + wordin[0] + u"]", "infix": u"", "type": u""}}
ret = {"word": {"id": 0, "navi": u"[" + wordin[0] + u"]", "infix": u"", "type": u""}, "len": False}
for word in wordlist:
word["navi"] = word["navi"].lower()
foundit = True
foundprefs = []
foundposts = []
lenited = False
splitword = word["infix"].split(u" ")
foundins = [u"", u"", u""]
if len(wordin) < len(splitword):
140,14 → 149,6
else:
if splitword[wor] in wordin[wor]:
center = splitword[wor]
if center == u"" and (wordin[wor] == u"paya" or splitword[wor] != u"pxay"): # XXX HACK - workaround to fix pay being lenited pxay. Maybe fixable without hardcoding?
for i in LENIT:
temp = u""
if splitword[wor].startswith(i[0]):
temp = i[1] + splitword[wor][len(i[0]):]
if temp in wordin[wor]:
lenited = True
center = temp
if center == u"":
if splitword[wor].endswith(u"nga"):
temp = splitword[wor][:-3] + u"nge"
216,7 → 217,7
if posf.startswith(pos):
if (pos, posid) in foundposts[wor]:
break
if pos != u"ä" or word["navi"] != u"pey": # XXX HACK - fix for peyä. THIS SHOULD NOT BE HERE!
if pos != u"ä" or (word["navi"] != u"pey" and word["navi"] != "fey"): # XXX HACK - fix for peyä. THIS SHOULD NOT BE HERE!
foundposts[wor].append((pos, posid))
posf = posf[len(pos):]
break
232,8 → 233,8
ret["pref"] = foundprefs
ret["post"] = foundposts
ret["inf"] = foundins
ret["len"] = lenited
if foundit == True:
ret["len"] = word["lenited"]
ret["word"] = foundword
return ret
 
/webapp/templates/parse.html
58,7 → 58,7
{% end %}
</table>
{% end %}
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, Mark Miller, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p>
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, Seze, Mark Miller, Tìtstewan, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p>
<script type="text/javascript">
document.getElementById("word").focus();
</script>
/webapp/templates/translate.html
75,7 → 75,7
{% end %}
</table>
{% end %}
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, Mark Miller, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p>
<p>This program uses Eana Eltu for the list of words, affixes, and infix position data, created by Tuiq, Taronyu, Seze, Mark Miller, Tìtstewan, and Tirea Aean. Thanks also go to the rest of the Learn Na'vi community!</p>
<script type="text/javascript">
document.getElementById("word").focus();
</script>