/tsimapiak/parsenum.py |
---|
30,20 → 30,15 |
numre = \ |
u"^(?:(" + "|".join(base) + u")zazam??)?" + \ |
u"^(a?)(?:(" + "|".join(base) + u")zazam??)?" + \ |
u"(?:(" + "|".join(base) + u")vozam??)?" + \ |
u"(?:(" + "|".join(base) + u")zam??)?" + \ |
u"(?:(" + "|".join(base) + u")vo(?:l(?=a|))?)?" + \ |
u"((?:" + "|".join(rem) + u")|" + \ |
u"(?:" + "|".join(num) + u"))?$" |
u"(?:" + "|".join(num) + u"))?((?:ve)?)(a?)$" |
numre = re.compile(numre) |
def parse(numin): |
if type(numin) != unicode: |
return None |
if numin == u"": |
return None |
numin = numin.replace(u"í",u"ì").replace(u"á",u"ä") |
try: |
mat = numre.match(numin).groups() |
except: |
51,30 → 46,34 |
numout = 0 |
numoct = 0 |
try: |
numout += rem.index(mat[4]) + 1 |
numoct += rem.index(mat[4]) + 1 |
numout += rem.index(mat[5]) + 1 |
numoct += rem.index(mat[5]) + 1 |
except: |
try: |
numout += num.index(mat[4]) |
numoct += num.index(mat[4]) |
numout += num.index(mat[5]) |
numoct += num.index(mat[5]) |
except: pass |
try: |
numout += (base.index(mat[3]) + 1) * 8 |
numoct += (base.index(mat[3]) + 1) * 10 |
numout += (base.index(mat[4]) + 1) * 8 |
numoct += (base.index(mat[4]) + 1) * 10 |
except: pass |
try: |
numout += (base.index(mat[2]) + 1) * 8**2 |
numoct += (base.index(mat[2]) + 1) * 10**2 |
numout += (base.index(mat[3]) + 1) * 8**2 |
numoct += (base.index(mat[3]) + 1) * 10**2 |
except: pass |
try: |
numout += (base.index(mat[1]) + 1) * 8**3 |
numoct += (base.index(mat[1]) + 1) * 10**3 |
numout += (base.index(mat[2]) + 1) * 8**3 |
numoct += (base.index(mat[2]) + 1) * 10**3 |
except: pass |
try: |
numout += (base.index(mat[0]) + 1) * 8**4 |
numoct += (base.index(mat[0]) + 1) * 10**4 |
numout += (base.index(mat[1]) + 1) * 8**4 |
numoct += (base.index(mat[1]) + 1) * 10**4 |
except: pass |
return numout, numoct |
retnum = unicode(numout) |
if mat[6] != u"": |
retnum += u"." |
return {"word": {"id": 0, "navi": retnum, "infix": u"", "type": u""}, "pref": [mat[0]], "post": [mat[6], mat[7]], "inf": [u"", u"", u""], "len": False, "dec": numout, "oct": numdec} |
#return numout, numoct |
if __name__ == "__main__": |
/tsimapiak/parse2.py |
---|
2,37 → 2,36 |
# -*- coding: utf-8 -*- |
import re |
#import dbconnector |
import dbconnector |
import parsenum |
wordlist = dbconnector.getnavilist() |
#wordlist = [{"id": 1, "navi": u"si", "infix": u"s<1><2><3>i", "type": u"v."}, |
#{"id": 2, "navi": u"wou", "infix": u"w<1><2>o<3>u", "type": u"v."}, |
#{"id": 3, "navi": u"kame", "infix": u"k<1><2>am<3>e", "type": u"v."}, |
#{"id": 4, "navi": u"tìng", "infix": u"t<1><2><3>ìng", "type": u"v."}, |
#{"id": 5, "navi": u"yerik", "infix": u"yerik", "type": u"n."}, |
#{"id": 6, "navi": u"nari", "infix": u"nari", "type": u"n."}] |
infixes1 = [u"awn", u"eyk", u"us", u"äp", u""] |
infixes2 = [u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u""] |
infixes3 = [u"äng", u"ats", u"eiy", u"ei", u"uy", u""] |
infixes1 = (u"awn", u"eyk", u"us", u"äp", u"") |
infixes2 = (u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u"") |
infixes3 = (u"äng", u"ats", u"eiy", u"ei", u"uy", u"") |
prefixes = (u"a", u"pe", u"le", u"nì", u"sä", u"tì", u"fne", u"tsay", u"fay", u"fra", u"pxe", u"ay", u"me", u"tsa", u"fì", u"ke") |
adpositions = (u"kxamlä", u"mungwrr", u"nemfa", u"pximaw", u"pxisre", u"tafkip", u"takip", u"teri", u"mìkam", u"ìla", u"fkip", u"fpi", u"ftu", u"kip", u"lok", u"luke", u"maw", u"pxel", u"pxaw", u"rofa", u"sre", u"sìn", u"vay", u"eo", u"fa", u"hu", u"io", u"ka", u"mì", u"na", u"ne", u"ro", u"ta", u"uo", u"wä", u"äo") |
postfixes = (u"an", u"ng", u"eyä", u"e", u"tsyìp", u"o", u"ìri", u"ìl", u"it", u"lo", u"ri", u"ru", u"ti", u"ur", u"ve", u"yä", u"ya", u"tu", u"vi", u"yu", u"l", u"t", u"y", u"a", u"ä") + adpositions |
#prefixesn = ur"(?P<npr>(?:(?:fì|tsa)?(?:me|pxe|ay|fra)?|(?:fay)?|(?:tsay)?)(?:fne)?(?:tì|sä)?" |
prefixes = [u"ay", u"me", u"pxe", u"pe", u"a", u"le", u"nì", u"sä", u"tì", u"ke", u"fì", u"fay", u"tsa", u"tsay", u"fra"] |
#postfixesv = ur"(?P<vpr>(?:nì|sä|tì|rä'ä |ke )?)" |
#prefixesv = ur"(?P<vpr>(?:nì|sä|tì|rä'ä |ke )?)" |
lenit = ((u"px", u"p"), (u"tx", u"t"), (u"kx", u"k"), (u"ts", u"s"), (u"t", u"s"), (u"p", u"f"), (u"k", u"h"), (u"'", u"")) |
def parseword(wordin): |
if type(wordin) == type(u"x"): |
wordin = wordin.split(" ") |
ret = {"word": {"id": 0, "navi": u" ".join(wordin), "infix": u"", "type": u""}} |
foundword = u"" |
ret = {"word": {"id": 0, "navi": u"[" + wordin[0] + u"]", "infix": u"", "type": u""}} |
for word in wordlist: |
foundit = True |
foundprefs = [] |
foundposts = [] |
splitword = word["infix"].split(" ") |
if len(wordin) != len(splitword): next |
lenited = False |
splitword = word["infix"].split(u" ") |
if len(wordin) < len(splitword): |
foundit = False |
next |
for wor in range(len(splitword)): |
foundinf = False |
if not foundit: |
break |
foundprefs.append([]) |
foundposts.append([]) |
center = u"" |
39,7 → 38,6 |
foundins = [u"", u"", u""] |
pre = [] |
post = [] |
found = False |
if u"<1>" in splitword[wor]: |
for in1 in infixes1: |
for in2 in infixes2: |
47,41 → 45,75 |
if splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3) in wordin[wor]: |
center = splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3) |
foundins = [in1, in2, in3] |
foundinf = True |
break |
if foundinf: break |
if foundinf: break |
if center != u"": break |
if center != u"": break |
else: |
if splitword[wor] in wordin[wor]: |
center = splitword[wor] |
foundinf = True |
if not foundinf: |
if center == u"": |
for i in lenit: |
temp = u"" |
if splitword[wor].startswith(i[0]): |
temp = i[1] + splitword[wor][len(i[0]):] |
if temp in wordin[wor]: |
lenited = True |
center = temp |
if center == u"": |
if splitword[wor].endswith(u"nga"): |
temp = splitword[wor][:-3] + u"ng" |
if temp in wordin[wor]: |
center = temp |
if splitword[wor].endswith(u"po"): |
temp = splitword[wor][:-3] + u"p" |
if temp in wordin[wor]: |
center = temp |
if center == u"": |
foundit = False |
break |
pref, posf = wordin[wor].replace(center,u" ").split(" ") |
temp = wordin[wor].split(center) |
if len(temp) != 2: |
foundit = False |
break |
pref, posf = temp |
for pre in prefixes: |
if pref != u"": |
if pref.endswith(pre): |
foundprefs[wor].append(pre) |
pref = pref[:len(pref)-len(pre)] |
pref = pref[:-len(pre)] |
if pref != u"": |
foundit = False |
break |
foundposts[wor] = posf |
for pos in postfixes: |
if posf != u"": |
if posf.startswith(pos): |
foundposts[wor].append(pos) |
posf = posf[len(pos):] |
if posf != u"": |
foundit = False |
break |
if foundit == True: |
foundword = word |
break |
if foundit == True: |
ret["pref"] = foundprefs |
ret["post"] = foundposts |
ret["inf"] = foundins |
ret["len"] = lenited |
if foundit == True: |
ret["word"] = foundword |
return ret |
#print(parseword([u"tìtìng"])) |
#print(parseword([u"keykame"])) |
#print(parseword([u"kameie"])) |
#print(parseword([u"tìsusiti"])) |
#print(parseword([u"weykayoeiu"])) |
#print(parseword([u"yerikit"])) |
#print(parseword([u"menari"])) |
#print(parseword([u"menariri"])) |
def parsesent(sent): |
sent = sent.strip().lower().replace(u"’", u"'") |
sent = re.sub(ur"[^\wìä' ]",u"",sent) |
sent = re.sub(ur"\ +",u" ",sent) |
sent = sent.split(u" ") |
ret = [] |
left = len(sent) |
while left: |
word = parsenum.parse(sent[-left:-left]) |
if word == None: |
word = parseword(sent[-left:]) |
left -= len(word["word"]["navi"].split(" ")) |
ret.append(word) |
return ret |
/tsimapiak/dbconnector.py |
---|
9,13 → 9,14 |
current = u"" |
db = tornado.database.Connection("127.0.0.1", "navi", user="navi", password="navi") |
for row in db.query(""" |
SELECT *, CHAR_LENGTH(navi) AS NL |
SELECT * |
FROM `metaWords` |
ORDER BY NL DESC"""): |
WHERE partOfSpeech <> 'num.' AND partOfSpeech <> "prefix" |
ORDER BY CHAR_LENGTH(navi) DESC"""): |
if row["infixes"]: |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["infixes"], "type": row["partOfSpeech"]}) |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["infixes"].lower(), "type": row["partOfSpeech"]}) |
else: |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["navi"], "type": row["partOfSpeech"]}) |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["navi"].lower(), "type": row["partOfSpeech"]}) |
db.close() |
return ret |
27,9 → 28,9 |
FROM `metaWords` |
WHERE navi = ?""",word): |
if row["infixes"]: |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["infixes"], "type": row["partOfSpeech"]}) |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["infixes"].lower(), "type": row["partOfSpeech"]}) |
else: |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["navi"], "type": row["partOfSpeech"]}) |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["navi"].lower(), "type": row["partOfSpeech"]}) |
db.close() |
return ret |
/tsimapiak/parse.py |
---|
26,7 → 26,7 |
regex = re.sub(u"<1><2>",u"[^ ]*",regex) |
regex = re.sub(u"<3>",u"[^ ]*",regex) |
if re.match(regex,original): |
realword = eachword[2] |
realword = eachword["infix"] |
break |
if realword == u"": |
return [u"[" + original + u"]",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u""] |
/webapp/main.py |
---|
29,8 → 29,10 |
self.redirect("/number") |
numout = parsenum.parse(num.replace(" ","")) |
if numout == None: |
numout = -1 |
self.render("templates/number.html", last=num, numout=numout) |
numoutt = -1 |
else: |
numoutt = [numout["dec"], numout["oct"]] |
self.render("templates/number.html", last=num, numout=numoutt) |
class Restart(tornado.web.RequestHandler): |
def get(self): |
62,10 → 64,10 |
def post(self): |
try: |
word = self.get_argument("word").strip() |
word = self.get_argument("word") |
except: |
self.redirect("/parse2") |
out = parse2.parseword(word) |
out = parse2.parsesent(word) |
self.render("templates/parse2.html", last=word, out=out) |
application = tornado.web.Application([ |
80,5 → 82,5 |
if __name__ == "__main__": |
http_server = tornado.httpserver.HTTPServer(application) |
http_server.listen(1337) |
tornado.autoreload.start() |
#tornado.autoreload.start() |
tornado.ioloop.IOLoop.instance().start() |
/webapp/templates/parse2.html |
---|
8,7 → 8,34 |
<input id="word" name="word" type="text" value="{{last}}" style="width: 100%;" /> |
<input name="btn" type="submit" value="Parse!" /> |
</form> |
{{ out }} |
{% if out %} |
<table border="1"> |
<tr> |
<th>Words</th> |
<th>Parts</th> |
<th>Data</th> |
</tr> |
{% for wor in out %} |
<tr> |
<td rowspan="4">{{ wor["word"]["navi"] }}</td> |
<td>Infixes:</td> |
<td>{{ u", ".join(wor["inf"]) }}</td> |
</tr> |
<tr> |
<td>Prefixes:</td> |
<td>{{ u"; ".join(u", ".join(x) for x in wor["pref"]) }}</td> |
</tr> |
<tr> |
<td>Postfixes:</td> |
<td>{{ u"; ".join(u", ".join(x) for x in wor["post"]) }}</td> |
</tr> |
<tr> |
<td>Lenited:</td> |
<td>{{ str(wor["len"]) }}</td> |
</tr> |
{% end %} |
</table> |
{% end %} |
<script type="text/javascript"> |
document.getElementById("word").focus(); |
</script> |