Subversion Repositories navi

Compare Revisions

Ignore whitespace Rev 48 → Rev 154

/webapp/templates/translate.html
0,0 → 1,68
{% extends "base.html" %}
 
{% block title %}Translator{% end %}
 
{% block body %}
<b>Na'vi sentence:</b><br />
<form action="/translate" method="post">
<input id="word" name="word" type="text" value="{{last}}" style="width: 100%;" />
<select name="lang" id="lang">
<option value="eng" selected="selected">English</option>
<option value="hu">Hungarian - Magyar</option>
<option value="de">German - Deutsch</option>
<option value="est">Estonian - Eesti</option>
<option value="ptbr">Brazilian Portuguese - Português do Brasil</option>
</select>
<input name="btn" type="submit" value="Translate!" />
</form>
{% if out %}
<table border="1">
<tr>
<th>Words</th>
<th>English</th>
<th>Parts</th>
<th>Data</th>
</tr>
{% for wor in out %}
<tr>
<td rowspan="4">{{ wor["word"]["navi"] }}</td>
<td rowspan="4">{{ wor["translated"] }}</td>
<td>Infixes:</td>
<td>{{ u", ".join(wor["inf"]) }}</td>
</tr>
<tr>
<td>Prefixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["pref"]) }}</td>
</tr>
<tr>
<td>Postfixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["post"]) }}</td>
</tr>
<tr>
<td>Lenited:</td>
<td>{{ str(wor["len"]) }}</td>
</tr>
{% end %}
</table>
{% end %}
<p>This program uses Eana Eltu for the list of words and infix positions (but nothing else), created by Tuiq and Taronyu. Thanks also go to the rest of the Learn Na'vi community!</p>
<script type="text/javascript">
document.getElementById("word").focus();
</script>
{% if lang != "eng" %}
<script type="text/javascript">
if("{{ lang }}" == "hu"){
document.getElementById("lang").selectedIndex = 1
}
if("{{ lang }}" == "de"){
document.getElementById("lang").selectedIndex = 2
}
if("{{ lang }}" == "est"){
document.getElementById("lang").selectedIndex = 3
}
if("{{ lang }}" == "ptbr"){
document.getElementById("lang").selectedIndex = 4
}
</script>
{% end %}
{% end %}
/webapp/templates/parse.html
0,0 → 1,43
{% extends "base.html" %}
 
{% block title %}Sentence parser{% end %}
 
{% block body %}
<b>Na'vi sentence:</b><br />
<form action="/parse" method="post">
<input id="word" name="word" type="text" value="{{last}}" style="width: 100%;" />
<input name="btn" type="submit" value="Parse!" />
</form>
{% if out %}
<table border="1">
<tr>
<th>Words</th>
<th>Parts</th>
<th>Data</th>
</tr>
{% for wor in out %}
<tr>
<td rowspan="4">{{ wor["word"]["navi"] }}</td>
<td>Infixes:</td>
<td>{{ u", ".join(wor["inf"]) }}</td>
</tr>
<tr>
<td>Prefixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["pref"]) }}</td>
</tr>
<tr>
<td>Postfixes:</td>
<td>{{ u"; ".join(u", ".join(x) for x in wor["post"]) }}</td>
</tr>
<tr>
<td>Lenited:</td>
<td>{{ str(wor["len"]) }}</td>
</tr>
{% end %}
</table>
{% end %}
<p>This program uses Eana Eltu for the list of words and infix positions (but nothing else), created by Tuiq and Taronyu. Thanks also go to the rest of the Learn Na'vi community!</p>
<script type="text/javascript">
document.getElementById("word").focus();
</script>
{% end %}
/webapp/templates/base.html
1,6 → 1,7
<html>
<head>
<title>Tsim Apiak - {% block title %}Title{% end %}</title>
<link rel="shortcut icon" type="image/x-icon" href="static/favicon.ico" />
<style type="text/css">
body {
background: #145179;
/webapp/templates/index.html
0,0 → 1,8
{% extends "base.html" %}
 
{% block title %}Home{% end %}
 
{% block body %}
<a href="/number"><b>Number translator</b></a> - this webapp allows you to translate written-out Na'vi numbers into decimal and octal.<br />
<a href="/parse"><b>Parser</b></a> - this webapp can parse Na'vi sentences into the base words, prefixes, infixes and suffixes. It does not currently translate the words, but that will come.
{% end %}
/webapp/templates/number.html
17,4 → 17,4
<script type="text/javascript">
document.getElementById("num").focus();
</script>
{% end %}
{% end %}
/webapp/main.py
9,12 → 9,14
import os
import re
 
from tsimapiak.parsenum import parse
from tsimapiak.dbconnector import getnavilist
from tsimapiak import parsenum
from tsimapiak import dbconnector
from tsimapiak import parse
from tsimapiak import translate
 
class Index(tornado.web.RequestHandler):
def get(self):
self.redirect("/number")
self.render("templates/index.html")
 
class Number(tornado.web.RequestHandler):
def get(self):
25,10 → 27,12
num = self.get_argument("num").strip()
except:
self.redirect("/number")
numout = parse(num.replace(" ",""))
numout = parsenum.parse(num.replace(" ",""))
if numout == None:
numout = -1
self.render("templates/number.html", last=num, numout=numout)
numoutt = -1
else:
numoutt = (numout["dec"], numout["oct"])
self.render("templates/number.html", last=num, numout=numoutt)
 
class Restart(tornado.web.RequestHandler):
def get(self):
37,20 → 41,51
 
class TestDB(tornado.web.RequestHandler):
def get(self):
lis = getnavilist()
lis = dbconnector.getnavilist()
text = u"id | navi | infix | partofspeech<br />"
text += u"<br />".join(u" | ".join(unicode(y) for y in x) for x in lis)
self.write(text)
 
class Parse(tornado.web.RequestHandler):
def get(self):
self.render("templates/parse.html", last="", out=None)
def post(self):
try:
word = self.get_argument("word")
except:
self.redirect("/parse")
out = parse.parsesent(word)
self.render("templates/parse.html", last=word, out=out)
 
class Translate(tornado.web.RequestHandler):
def get(self):
self.render("templates/translate.html", last="", out=None, lang="eng")
def post(self):
try:
word = self.get_argument("word")
lang = self.get_argument("lang")
except:
self.redirect("/translate")
out = translate.translatesent(word, lang)
self.render("templates/translate.html", last=word, out=out, lang=lang)
 
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static")
}
 
application = tornado.web.Application([
("/", Index),
("/number", Number),
("/restart", Restart),
("/testdb", TestDB)
])
("/testdb", TestDB),
("/parse", Parse),
("/translate", Translate)
], **settings)
 
if __name__ == "__main__":
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(1337)
tornado.autoreload.start()
tornado.ioloop.IOLoop.instance().start()
#tornado.autoreload.start()
tornado.ioloop.IOLoop.instance().start()
/webapp/static/favicon.ico
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
/webapp/static/favicon.ico
Property changes:
Added: svn:mime-type
## -0,0 +1 ##
+application/octet-stream
\ No newline at end of property
Index: tsimapiak/translate.py
===================================================================
--- tsimapiak/translate.py (nonexistent)
+++ tsimapiak/translate.py (revision 154)
@@ -0,0 +1,10 @@
+import parse
+import dbconnector
+def translatesent(sent, lang):
+ sent = parse.parsesent(sent)
+ for word in sent:
+ if word["word"]["id"] != 0:
+ word["translated"] = dbconnector.translate(word["word"]["id"],lang)
+ else:
+ word["translated"] = word["word"]["navi"]
+ return sent
\ No newline at end of file
Index: tsimapiak/parsenum.py
===================================================================
--- tsimapiak/parsenum.py (revision 48)
+++ tsimapiak/parsenum.py (revision 154)
@@ -1,81 +1,129 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-import re
-
-num = [u"kew",
- u"'aw",
- u"mune",
- u"pxey",
- u"tsìng",
- u"mrr",
- u"pukap",
- u"kinä"]
-
-rem = [u"aw",
- u"mun",
- u"pey",
- u"sìng",
- u"mrr",
- u"fu",
- u"hin"]
-
-base = [u"",
- u"me",
- u"pxe",
- u"tsì",
- u"mrr",
- u"pu",
- u"ki"]
-
-
-numre = \
- u"^(?:(" + "|".join(base) + u")zazam??)?" + \
- u"(?:(" + "|".join(base) + u")vozam??)?" + \
- u"(?:(" + "|".join(base) + u")zam??)?" + \
- u"(?:(" + "|".join(base) + u")vo(?:l(?=a|))?)?" + \
- u"((?:" + "|".join(rem) + u")|" + \
- u"(?:" + "|".join(num) + u"))?$"
-numre = re.compile(numre)
-
-def parse(numin):
- if type(numin) != unicode:
- return None
- if numin == u"":
- return None
- numin = numin.replace(u"í",u"ì").replace(u"á",u"ä")
- try:
- mat = numre.match(numin).groups()
- except:
- return None
- numout = 0
- numoct = 0
- try:
- numout += rem.index(mat[4]) + 1
- numoct += rem.index(mat[4]) + 1
- except:
- try:
- numout += num.index(mat[4])
- numoct += num.index(mat[4])
- except: pass
- try:
- numout += (base.index(mat[3]) + 1) * 8
- numoct += (base.index(mat[3]) + 1) * 10
- except: pass
- try:
- numout += (base.index(mat[2]) + 1) * 8**2
- numoct += (base.index(mat[2]) + 1) * 10**2
- except: pass
- try:
- numout += (base.index(mat[1]) + 1) * 8**3
- numoct += (base.index(mat[1]) + 1) * 10**3
- except: pass
- try:
- numout += (base.index(mat[0]) + 1) * 8**4
- numoct += (base.index(mat[0]) + 1) * 10**4
- except: pass
- return numout, numoct
-
-
-if __name__ == "__main__":
- print parse(u"mrrvolaw")
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+num = [u"kew",
+ u"'aw",
+ u"mune",
+ u"pxey",
+ u"tsìng",
+ u"mrr",
+ u"pukap",
+ u"kinä"]
+
+numord = [u"kew",
+ u"'aw",
+ u"mu",
+ u"pxey",
+ u"tsì",
+ u"mrr",
+ u"pu",
+ u"ki"]
+
+rem = [u"aw",
+ u"mun",
+ u"pey",
+ u"sìng",
+ u"mrr",
+ u"fu",
+ u"hin"]
+
+remord = [u"aw",
+ u"mu",
+ u"pey",
+ u"sì",
+ u"mrr",
+ u"fu",
+ u"hi"]
+
+base = [u"",
+ u"me",
+ u"pxe",
+ u"tsì",
+ u"mrr",
+ u"pu",
+ u"ki"]
+
+def parse(numin):
+ if u"mm" in numin:
+ return None
+ if (numin[0] == u"a") and (numin[len(numin)-1] == u"a"):
+ return None
+ prefs = []
+ posts = []
+ outoct = 0
+ outdec = 0
+ ret = {"word": {"id": 0, "navi": u"", "infix": u"", "type": u""}, "pref": [prefs], "post": [posts], "inf": [u"", u"", u""], "len": False, "dec": outdec, "oct": outoct}
+ if numin[0] == u"a":
+ prefs.append(u"a")
+ numin = numin[1:]
+ if numin[len(numin)-1] == u"a":
+ posts.append(u"a")
+ numin = numin[:-1]
+ if numin[-2:] == u"ve":
+ posts.append(u"ve")
+ numin = numin[:-2]
+
+ #base numbers
+ for n in range(len(num)):
+ if u"ve" in posts:
+ if numin == numord[n]:
+ outoct = n
+ outdec = n
+ ret["word"]["navi"] = unicode(outdec) + u"."
+ ret["dec"] = outdec
+ ret["oct"] = outoct
+ return ret
+ else:
+ if numin == num[n]:
+ outoct = n
+ outdec = n
+ ret["word"]["navi"] = unicode(outdec)
+ ret["dec"] = outdec
+ ret["oct"] = outoct
+ return ret
+ #other numbers
+ for n in range(len(base)):
+ if numin.startswith(base[n] + u"zazam"):
+ outoct += (n+1) * (10**4)
+ outdec += (n+1) * (8**4)
+ numin = numin[len(base[n]) + 5:]
+ for n in range(len(base)):
+ if numin.startswith(base[n] + u"vozam"):
+ outoct += (n+1) * (10**3)
+ outdec += (n+1) * (8**3)
+ numin = numin[len(base[n]) + 5:]
+ for n in range(len(base)):
+ if numin.startswith(base[n] + u"zam"):
+ outoct += (n+1) * (10**2)
+ outdec += (n+1) * (8**2)
+ numin = numin[len(base[n]) + 3:]
+ for n in range(len(base)):
+ if numin.startswith(base[n] + u"vol"):
+ outoct += (n+1) * 10
+ outdec += (n+1) * 8
+ numin = numin[len(base[n]) + 3:]
+ if numin.startswith(base[n] + u"vo"):
+ outoct += (n+1) * 10
+ outdec += (n+1) * 8
+ numin = numin[len(base[n]) + 2:]
+ for n in range(len(rem)):
+ if u"ve" in posts:
+ if numin == remord[n]:
+ outoct += n + 1
+ outdec += n + 1
+ numin = u""
+ else:
+ if numin == rem[n]:
+ outoct += n + 1
+ outdec += n + 1
+ numin = u""
+ if numin == u"":
+ ret["word"]["navi"] = unicode(outdec) if not u"ve" in posts else unicode(outdec) + u"."
+ ret["dec"] = outdec
+ ret["oct"] = outoct
+ return ret
+ else:
+ return None
+
+if __name__ == "__main__":
+ print parse(u"mevolawve")
\ No newline at end of file
Index: tsimapiak/parse.py
===================================================================
--- tsimapiak/parse.py (nonexistent)
+++ tsimapiak/parse.py (revision 154)
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+import re
+import dbconnector
+import parsenum
+
+wordlist = dbconnector.getnavilist()
+
+infixes1 = (u"awn", u"eyk", u"us", u"äp", u"")
+infixes2 = (u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u"")
+infixes3 = (u"äng", u"ats", u"eiy", u"ei", u"uy", u"")
+prefixes = (u"tsay", u"fay", u"fra", u"pxe", u"fne", u"tsa", u"pe", u"le", u"nì", u"sä", u"tì", u"ay", u"me", u"fì", u"ke", u"a")
+adpositions = (u"mungwrr", u"kxamlä", u"pximaw", u"pxisre", u"tafkip", u"nemfa", u"takip", u"mìkam", u"teri", u"fkip", u"luke", u"pxel", u"pxaw", u"rofa", u"ìla", u"fpi", u"ftu", u"kip", u"lok", u"maw", u"sre", u"sìn", u"vay", u"eo", u"fa", u"hu", u"io", u"ka", u"mì", u"na", u"ne", u"ro", u"ta", u"uo", u"wä", u"äo", u"to")
+postfixes = adpositions + (u"tsyìp", u"eyä", u"ìri", u"ìl", u"it", u"lo", u"ri", u"ru", u"ti", u"ur", u"ve", u"yä", u"ya", u"tu", u"vi", u"yu", u"an", u"ng", u"ke", u"e", u"o", u"l", u"t", u"y", u"a", u"ä", u"r")
+#prefixesn = ur"(?P(?:(?:fì|tsa)?(?:me|pxe|ay|fra)?|(?:fay)?|(?:tsay)?)(?:fne)?(?:tì|sä)?"
+#prefixesv = ur"(?P(?:nì|sä|tì|rä'ä |ke )?)"
+
+lenit = ((u"px", u"p"), (u"tx", u"t"), (u"kx", u"k"), (u"ts", u"s"), (u"t", u"s"), (u"p", u"f"), (u"k", u"h"), (u"'", u""))
+
+def parseword(wordin):
+ ret = {"word": {"id": 0, "navi": u"[" + wordin[0] + u"]", "infix": u"", "type": u""}}
+ for word in wordlist:
+ foundit = True
+ foundprefs = []
+ foundposts = []
+ lenited = False
+ splitword = word["infix"].split(u" ")
+ if len(wordin) < len(splitword):
+ foundit = False
+ next
+ for wor in range(len(splitword)):
+ if not foundit:
+ break
+ foundprefs.append([])
+ foundposts.append([])
+ center = u""
+ foundins = [u"", u"", u""]
+ pre = []
+ post = []
+ if u"<1>" in splitword[wor]:
+ for in1 in infixes1:
+ for in2 in infixes2:
+ for in3 in infixes3:
+ if splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3) in wordin[wor]:
+ center = splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3)
+ foundins = [in1, in2, in3]
+ break
+ if center != u"": break
+ if center != u"": break
+ else:
+ if splitword[wor] in wordin[wor]:
+ center = splitword[wor]
+ if center == u"":
+ for i in lenit:
+ temp = u""
+ if splitword[wor].startswith(i[0]):
+ temp = i[1] + splitword[wor][len(i[0]):]
+ if temp in wordin[wor]:
+ lenited = True
+ center = temp
+ if center == u"":
+ if splitword[wor].endswith(u"nga"):
+ temp = splitword[wor][:-3] + u"ng"
+ if temp in wordin[wor]:
+ center = temp
+ if splitword[wor].endswith(u"po"):
+ temp = splitword[wor][:-3] + u"p"
+ if temp in wordin[wor]:
+ center = temp
+ if center == u"":
+ foundit = False
+ break
+ temp = wordin[wor].split(center)
+ if len(temp) != 2:
+ foundit = False
+ break
+ pref, posf = temp
+ last = u""
+ while last != pref:
+ last = pref
+ for pre in prefixes:
+ if pref != u"":
+ if pref.endswith(pre):
+ if pre in foundprefs[wor]:
+ break
+ foundprefs[wor].append(pre)
+ pref = pref[:-len(pre)]
+ break
+ if pref != u"":
+ foundit = False
+ break
+ last = u""
+ while last != posf:
+ last = posf
+ for pos in postfixes:
+ if posf != u"":
+ if posf.startswith(pos):
+ if pos in foundposts[wor]:
+ break
+ foundposts[wor].append(pos)
+ posf = posf[len(pos):]
+ break
+ if posf != u"":
+ foundit = False
+ break
+ if foundit == True:
+ foundword = word
+ break
+ ret["pref"] = foundprefs
+ ret["post"] = foundposts
+ ret["inf"] = foundins
+ ret["len"] = lenited
+ if foundit == True:
+ ret["word"] = foundword
+ return ret
+
+def parsesent(sent):
+ sent = sent.strip().lower().replace(u"’", u"'")
+ sent = re.sub(ur"[^\wìä' ]",u"",sent)
+ sent = re.sub(ur"\ +",u" ",sent)
+ sent = sent.split(u" ")
+ ret = []
+ left = len(sent)
+ while left:
+ word = parsenum.parse(sent[len(sent)-left])
+ if word == None:
+ word = parseword(sent[-left:])
+ left -= len(word["word"]["navi"].split(" "))
+ ret.append(word)
+ return ret
Index: tsimapiak/dbconnector.py
===================================================================
--- tsimapiak/dbconnector.py (revision 48)
+++ tsimapiak/dbconnector.py (revision 154)
@@ -9,44 +9,24 @@
current = u""
db = tornado.database.Connection("127.0.0.1", "navi", user="navi", password="navi")
for row in db.query("""
- SELECT *, CHAR_LENGTH(navi) AS NL
+ SELECT *
FROM `metaWords`
- ORDER BY NL DESC"""):
- if row["partOfSpeech"] in (u"v.", u"vin.", u"vtr."):
- current = unicode(row["ipa"])
- current = current.replace(ur"ɛ",ur"e").replace(ur".",ur"").replace(ur"ɾ",ur"r") \
- .replace(ur"ɪ",ur"ì").replace(ur"ˈ",ur"").replace(ur"'",ur"x") \
- .replace(ur"ŋ",ur"ng").replace(ur"j",ur"y").replace(ur"ʔ",ur"'") \
- .replace(ur"æ",ur"ä").replace(ur"ˌ",ur"").replace(ur"\t{ts}",ur"ts") \
- .replace(ur"ṛ",ur"rr").replace(ur"ḷ",ur"ll").replace(ur"k̚",ur"k ") \
- .replace(ur"p̚",ur"p ").replace(ur"t̚",ur"t ").replace(ur"'̚",ur"' ") \
- .replace(u"\\",ur"").replace(ur"(",ur"").replace(ur")",ur"") \
- .replace(ur"[",ur"").replace(ur"]",ur"").replace(ur" "," ") \
- .strip()
- current = re.sub(ur" or.*","",current)
- current = re.sub(ur"z(.*)engk(.*)e",ur"z\1enk\2e",current)
- current = re.sub(ur"t(.*)ì(m|n)\ ",ur"t\1ìng ",current)
- current = current.split(ur"$cdot$")
- if len(current) == 3:
- current = current[0] + u"<0><1>" + current[1] + u"<2>" + current[2]
- elif len(current) == 2:
- current = current[0] + u"<0><1><2>" + current[1]
- else:
- current = u"<0><1><2>" + current[0]
+ WHERE partOfSpeech <> 'num.' AND partOfSpeech <> "prefix"
+ ORDER BY CHAR_LENGTH(navi) DESC"""):
+ if row["infixes"]:
+ ret.append({"id": row["id"], "navi": row["navi"], "infix": row["infixes"].lower(), "type": row["partOfSpeech"]})
else:
- current = unicode(row["navi"])
- ret.append([row["id"], row["navi"], current, row["partOfSpeech"]])
+ ret.append({"id": row["id"], "navi": row["navi"], "infix": row["navi"].lower(), "type": row["partOfSpeech"]})
db.close()
return ret
-def getnavi(word):
- ret = []
+def translate(wid,language):
db = tornado.database.Connection("127.0.0.1", "navi", user="navi", password="navi")
for row in db.query("""
SELECT *
- FROM `metaWords`
- WHERE navi = ?""",word):
- ret.append([row["id"],row["navi"], row["infix"], row["partOfSpeech"]])
+ FROM `localizedWords`
+ WHERE id = %s AND languageCode = %s""",wid,language):
+ ret = row["localized"]
+ break
db.close()
- return ret
-
\ No newline at end of file
+ return ret
\ No newline at end of file
/dev/naviparse.py
File deleted