/tsimapiak/parse2.py |
---|
4,20 → 4,84 |
import re |
import dbconnector |
infixes0 = ur"(?P<in0>(?:awn|eyk|us|äp)?)" |
infixes1 = ur"(?P<in1>(?:ìyev|iyev|ìmìy|arm|asy|ilv|ìmv|imv|ìrm|irv|ìsy|aly|ary|ìly|ìry|ìlm|alm|am|ay|er|ìm|iv|ìy|ol)?)" |
infixes2 = ur"(?P<in2>(?:äng|ats|eiy|ei|uy)?)" |
#prefixesn = ur"(?P<pre>(?:(?:fì|tsa)?(?:me|pxe|ay|fra)?|(?:fay)?|(?:tsay)?)(?:fne)?(?:tì|sä)?" |
wordlist = dbconnector.getnavilist() |
#wordlist = [{"id": 1, "navi": u"si", "infix": u"s<1><2><3>i", "type": u"v."}, |
#{"id": 2, "navi": u"wou", "infix": u"w<1><2>o<3>u", "type": u"v."}, |
#{"id": 3, "navi": u"kame", "infix": u"k<1><2>am<3>e", "type": u"v."}, |
#{"id": 4, "navi": u"tìng", "infix": u"t<1><2><3>ìng", "type": u"v."}, |
#{"id": 5, "navi": u"yerik", "infix": u"yerik", "type": u"n."}, |
#{"id": 6, "navi": u"nari", "infix": u"nari", "type": u"n."}] |
infixes1 = [u"awn", u"eyk", u"us", u"äp", u""] |
infixes2 = [u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u""] |
infixes3 = [u"äng", u"ats", u"eiy", u"ei", u"uy", u""] |
#prefixesn = ur"(?P<npr>(?:(?:fì|tsa)?(?:me|pxe|ay|fra)?|(?:fay)?|(?:tsay)?)(?:fne)?(?:tì|sä)?" |
prefixes = [u"ay", u"me", u"pxe", u"pe", u"a", u"le", u"nì", u"sä", u"tì", u"ke", u"fì", u"fay", u"tsa", u"tsay", u"fra"] |
#postfixesv = ur"(?P<vpr>(?:nì|sä|tì|rä'ä |ke )?)" |
def parseword(wordin): |
wordfound = [0, u"", u"", u""] |
wordre = None |
ret = [wordfound, u"", u"", u""] |
for word in dbconnector.getnavilist(): |
wordre = u" ".join(ur"(?:.*)" + x + ur"(?:.*)" for x in word[2].split(u" ")).replace(u"<0><1>", infixes0 + infixes1).replace(u"<2>",infixes2) |
rem = re.match(wordre,wordin) |
if rem: |
wordfound = word |
ret = [wordfound, rem.group("in0"), rem.group("in1"), rem.group("in2")] |
if type(wordin) == type(u"x"): |
wordin = wordin.split(" ") |
ret = {"word": {"id": 0, "navi": u" ".join(wordin), "infix": u"", "type": u""}} |
foundword = u"" |
for word in wordlist: |
foundit = True |
foundprefs = [] |
foundposts = [] |
splitword = word["infix"].split(" ") |
if len(wordin) != len(splitword): next |
for wor in range(len(splitword)): |
foundinf = False |
foundprefs.append([]) |
foundposts.append([]) |
center = u"" |
foundins = [u"", u"", u""] |
pre = [] |
post = [] |
found = False |
if u"<1>" in splitword[wor]: |
for in1 in infixes1: |
for in2 in infixes2: |
for in3 in infixes3: |
if splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3) in wordin[wor]: |
center = splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3) |
foundins = [in1, in2, in3] |
foundinf = True |
break |
if foundinf: break |
if foundinf: break |
else: |
if splitword[wor] in wordin[wor]: |
center = splitword[wor] |
foundinf = True |
if not foundinf: |
foundit = False |
break |
pref, posf = wordin[wor].replace(center,u" ").split(" ") |
for pre in prefixes: |
if pref.endswith(pre): |
foundprefs[wor].append(pre) |
pref = pref[:len(pref)-len(pre)] |
if pref != u"": |
foundit = False |
break |
foundposts[wor] = posf |
if foundit == True: |
foundword = word |
break |
return ret |
if foundit == True: |
ret["pref"] = foundprefs |
ret["post"] = foundposts |
ret["inf"] = foundins |
ret["word"] = foundword |
return ret |
#print(parseword([u"tìtìng"])) |
#print(parseword([u"keykame"])) |
#print(parseword([u"kameie"])) |
#print(parseword([u"tìsusiti"])) |
#print(parseword([u"weykayoeiu"])) |
#print(parseword([u"yerikit"])) |
#print(parseword([u"menari"])) |
#print(parseword([u"menariri"])) |
/tsimapiak/parse.py |
---|
20,11 → 20,11 |
infix_1 = u"" |
infix_2 = u"" |
for eachword in wordlist: |
regex = re.sub(u" ",u"[^ ]* [^ ]*",eachword[2]) |
regex = re.sub(u" ",u"[^ ]* [^ ]*",eachword["infix"]) |
regex = re.sub(u"^",u"[^ ]*",regex) |
regex = re.sub(u"$",u"[^ ]*",regex) |
regex = re.sub(u"<0><1>",u"[^ ]*",regex) |
regex = re.sub(u"<2>",u"[^ ]*",regex) |
regex = re.sub(u"<1><2>",u"[^ ]*",regex) |
regex = re.sub(u"<3>",u"[^ ]*",regex) |
if re.match(regex,original): |
realword = eachword[2] |
break |
32,11 → 32,11 |
return [u"[" + original + u"]",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u""] |
else: |
if re.search(u"<",realword): |
beginning = re.sub(u"<0><1>.*",u"",realword) |
middle = re.sub(u".*<0><1>(.*)<2>.*",ur"\1",realword) |
end = re.sub(u".*<2>",u"",realword) |
infix01 = re.sub(u".*?" + re.sub(u"<0><1>",u"([^ ]*)",re.sub(u"<2>",u"[^ ]*",realword)) + u".*?",ur"\1",original) |
infix_2 = re.sub(u".*?" + re.sub(u"<2>",u"([^ ]*)",re.sub(u"<0><1>",u"[^ ]*",realword)) + u".*?",ur"\1",original) |
beginning = re.sub(u"<1><2>.*",u"",realword) |
middle = re.sub(u".*<1><2>(.*)<3>.*",ur"\1",realword) |
end = re.sub(u".*<3>",u"",realword) |
infix01 = re.sub(u".*?" + re.sub(u"<1><2>",u"([^ ]*)",re.sub(u"<3>",u"[^ ]*",realword)) + u".*?",ur"\1",original) |
infix_2 = re.sub(u".*?" + re.sub(u"<3>",u"([^ ]*)",re.sub(u"<1><2>",u"[^ ]*",realword)) + u".*?",ur"\1",original) |
for eachinfix in infixes0: |
if infix01.startswith(eachinfix): |
infix0 = eachinfix |
51,7 → 51,7 |
infix1 = eachinfix |
infix_1 = infix_1[len(eachinfix):] |
if infix_1 != u"": |
if re.search(u"<0><1><2>",realword): |
if re.search(u"<1><2><3>",realword): |
infix_2 = infix_1 |
else: |
return [u"[" + original + u"]",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u"",u""] |
58,7 → 58,7 |
gotinfix1 = True |
break |
if gotinfix1 == False: |
if re.search(u"<0><1><2>",realword): |
if re.search(u"<1><2><3>",realword): |
if infix_1 == u"": |
infix_2 = infix_1 |
infix1 = u"" |
/tsimapiak/dbconnector.py |
---|
12,8 → 12,10 |
SELECT *, CHAR_LENGTH(navi) AS NL |
FROM `metaWords` |
ORDER BY NL DESC"""): |
infix = makeinfix(row) |
ret.append([row["id"], row["navi"], infix, row["partOfSpeech"]]) |
if row["infixes"]: |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["infixes"], "type": row["partOfSpeech"]}) |
else: |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["navi"], "type": row["partOfSpeech"]}) |
db.close() |
return ret |
24,37 → 26,13 |
SELECT * |
FROM `metaWords` |
WHERE navi = ?""",word): |
infix = makeinfix(row) |
ret.append([row["id"],row["navi"], infix, row["partOfSpeech"]]) |
if row["infixes"]: |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["infixes"], "type": row["partOfSpeech"]}) |
else: |
ret.append({"id": row["id"], "navi": row["navi"], "infix": row["navi"], "type": row["partOfSpeech"]}) |
db.close() |
return ret |
def makeinfix(row): |
if row["partOfSpeech"] in (u"v.", u"vin.", u"vtr."): |
current = unicode(row["ipa"]) |
current = current.replace(ur"ɛ",ur"e").replace(ur".",ur"").replace(ur"ɾ",ur"r") \ |
.replace(ur"ɪ",ur"ì").replace(ur"ˈ",ur"").replace(ur"'",ur"x") \ |
.replace(ur"ŋ",ur"ng").replace(ur"j",ur"y").replace(ur"ʔ",ur"'") \ |
.replace(ur"æ",ur"ä").replace(ur"ˌ",ur"").replace(ur"\t{ts}",ur"ts") \ |
.replace(ur"ṛ",ur"rr").replace(ur"ḷ",ur"ll").replace(ur"k̚",ur"k ") \ |
.replace(ur"p̚",ur"p ").replace(ur"t̚",ur"t ").replace(ur"'̚",ur"' ") \ |
.replace(u"\\",ur"").replace(ur"(",ur"").replace(ur")",ur"") \ |
.replace(ur"[",ur"").replace(ur"]",ur"").replace(ur" "," ") \ |
.strip() |
current = re.sub(ur" or.*","",current) |
current = re.sub(ur"z(.*)engk(.*)e",ur"z\1enk\2e",current) |
current = re.sub(ur"t(.*)ì(m|n)\ ",ur"t\1ìng ",current) |
current = current.split(ur"$cdot$") |
if len(current) == 3: |
current = current[0] + u"<0><1>" + current[1] + u"<2>" + current[2] |
elif len(current) == 2: |
current = current[0] + u"<0><1><2>" + current[1] |
else: |
current = u"<0><1><2>" + current[0] |
else: |
current = unicode(row["navi"]) |
return current |
#def gettrans(id, cod): |
#ret = [] |
#if cod not in (u"est",u"ptbr",u"de",u"eng",u"all"): |
/webapp/templates/parse2.html |
---|
0,0 → 1,15 |
{% extends "base.html" %} |
{% block title %}Word parser{% end %} |
{% block body %} |
<b>Na'vi word:</b><br /> |
<form action="/parse2" method="post"> |
<input id="word" name="word" type="text" value="{{last}}" style="width: 100%;" /> |
<input name="btn" type="submit" value="Parse!" /> |
</form> |
{{ out }} |
<script type="text/javascript"> |
document.getElementById("word").focus(); |
</script> |
{% end %} |
/webapp/templates/parse.html |
---|
1,6 → 1,6 |
{% extends "base.html" %} |
{% block title %}Number parser{% end %} |
{% block title %}Word parser{% end %} |
{% block body %} |
<b>Na'vi word:</b><br /> |
17,4 → 17,4 |
<script type="text/javascript"> |
document.getElementById("word").focus(); |
</script> |
{% end %} |
{% end %} |
/webapp/main.py |
---|
58,7 → 58,7 |
class Parse2(tornado.web.RequestHandler): |
def get(self): |
self.render("templates/parse.html", last="", out=None) |
self.render("templates/parse2.html", last="", out=None) |
def post(self): |
try: |
66,7 → 66,7 |
except: |
self.redirect("/parse2") |
out = parse2.parseword(word) |
self.render("templates/parse.html", last=word, out=out) |
self.render("templates/parse2.html", last=word, out=out) |
application = tornado.web.Application([ |
("/", Index), |