/ircbot/ircbot.py |
---|
24,13 → 24,11 |
write simpler bots. |
""" |
from UserDict import UserDict |
from irclib import ServerConnectionError, SimpleIRCClient, nm_to_n, irc_lower, \ |
parse_channel_modes, is_channel |
import sys |
from UserDict import UserDict |
from irclib import SimpleIRCClient |
from irclib import nm_to_n, irc_lower, all_events |
from irclib import parse_channel_modes, is_channel |
from irclib import ServerConnectionError |
class SingleServerIRCBot(SimpleIRCClient): |
"""A single-server IRC bot class. |
66,7 → 64,7 |
self.channels = IRCDict() |
self.server_list = server_list |
if not reconnection_interval or reconnection_interval < 0: |
reconnection_interval = 2**31 |
reconnection_interval = 2 ** 31 |
self.reconnection_interval = reconnection_interval |
self._nickname = nickname |
75,7 → 73,7 |
"namreply", "nick", "part", "quit"]: |
self.connection.add_global_handler(i, |
getattr(self, "_on_" + i), |
-10) |
- 10) |
def _connected_checker(self): |
"""[Internal]""" |
if not self.connection.is_connected(): |
256,18 → 254,18 |
Otherwise, it should behave exactly as a normal dictionary. |
""" |
def __init__(self, dict=None): |
def __init__(self, dictt=None): |
self.data = {} |
self.canon_keys = {} # Canonical keys |
if dict is not None: |
self.update(dict) |
if dictt is not None: |
self.update(dictt) |
def __repr__(self): |
return repr(self.data) |
def __cmp__(self, dict): |
if isinstance(dict, IRCDict): |
return cmp(self.data, dict.data) |
def __cmp__(self, dictt): |
if isinstance(dictt, IRCDict): |
return cmp(self.data, dictt.data) |
else: |
return cmp(self.data, dict) |
return cmp(self.data, dictt) |
def __len__(self): |
return len(self.data) |
def __getitem__(self, key): |
301,8 → 299,8 |
return self.data.values() |
def has_key(self, key): |
return irc_lower(key) in self.canon_keys |
def update(self, dict): |
for k, v in dict.items(): |
def update(self, dictt): |
for k, v in dictt.items(): |
self.data[k] = v |
def get(self, key, failobj=None): |
return self.data.get(key, failobj) |
424,7 → 422,7 |
def limit(self): |
if self.has_limit(): |
return self.modes[l] |
return self.modes["l"] |
else: |
return None |
/ircbot/bot.py |
---|
12,7 → 12,7 |
are given by prefixing the text by the bot name followed by a colon.""" |
from ircbot import SingleServerIRCBot |
from irclib import nm_to_n, nm_to_h, irc_lower, ip_numstr_to_quad, ip_quad_to_numstr |
from irclib import nm_to_n |
from tsimapiak import translate |
class Bot(SingleServerIRCBot): |
28,12 → 28,12 |
c.privmsg("NiceBot", "asztallab") |
def on_privmsg(self, c, e): |
self.do_command(e, e.arguments()[0],True) |
self.do_command(e, e.arguments()[0], True) |
def on_pubmsg(self, c, e): |
a = e.arguments()[0] |
if a[0] == "!": |
self.do_command(e, a[1:].strip(),False) |
self.do_command(e, a[1:].strip(), False) |
return |
def do_command(self, e, cmd, priv): |
49,7 → 49,7 |
if (cmd.split(" ")[0] == "tr") or (cmd.split(" ")[0] == "translate"): |
lang = "eng" |
if cmd.split(" ")[1].startswith("-"): |
if len(cmd.split(" ")) > 1 and cmd.split(" ")[1].startswith("-"): |
if cmd.split(" ")[1][1:] in ("hu", "de", "ptbr", "est", "sv"): |
lang = cmd.split(" ")[1][1:] |
sent = " ".join(cmd.split(" ")[2:]) |
/ircbot/irclib.py |
---|
17,6 → 17,13 |
# keltus <keltus@users.sourceforge.net> |
# |
# $Id: irclib.py,v 1.47 2008/09/25 22:00:59 keltus Exp $ |
import bisect |
import re |
import select |
import socket |
import string |
import time |
import types |
"""irclib -- Internet Relay Chat (IRC) protocol client library. |
61,14 → 68,6 |
.. [IRC specifications] http://www.irchelp.org/irchelp/rfc/ |
""" |
import bisect |
import re |
import select |
import socket |
import string |
import sys |
import time |
import types |
VERSION = 0, 4, 8 |
DEBUG = 0 |
287,7 → 286,7 |
arguments -- Arguments to give the function. |
""" |
self.execute_delayed(at-time.time(), function, arguments) |
self.execute_delayed(at - time.time(), function, arguments) |
def execute_delayed(self, delay, function, arguments=()): |
"""Execute a function after a specified time. |
300,7 → 299,7 |
arguments -- Arguments to give the function. |
""" |
bisect.insort(self.delayed_commands, (delay+time.time(), function, arguments)) |
bisect.insort(self.delayed_commands, (delay + time.time(), function, arguments)) |
if self.fn_to_add_timeout: |
self.fn_to_add_timeout(delay) |
341,7 → 340,7 |
def __init__(self, irclibobj): |
self.irclibobj = irclibobj |
def _get_socket(): |
def _get_socket(self): |
raise IRCError, "Not overridden" |
############################## |
488,9 → 487,9 |
try: |
if self.ssl: |
new_data = self.ssl.read(2**14) |
new_data = self.ssl.read(2 ** 14) |
else: |
new_data = self.socket.recv(2**14) |
new_data = self.socket.recv(2 ** 14) |
except socket.error, x: |
# The server hung up. |
self.disconnect("Connection reset by peer") |
845,10 → 844,10 |
"""Send a WHOIS command.""" |
self.send_raw("WHOIS " + ",".join(targets)) |
def whowas(self, nick, max="", server=""): |
def whowas(self, nick, maxx="", server=""): |
"""Send a WHOWAS command.""" |
self.send_raw("WHOWAS %s%s%s" % (nick, |
max and (" " + max), |
maxx and (" " + maxx), |
server and (" " + server))) |
class DCCConnectionError(IRCError): |
955,7 → 954,7 |
return |
try: |
new_data = self.socket.recv(2**14) |
new_data = self.socket.recv(2 ** 14) |
except socket.error, x: |
# The server hung up. |
self.disconnect("Connection reset by peer") |
972,7 → 971,7 |
# Save the last, unfinished line. |
self.previous_buffer = chunks[-1] |
if len(self.previous_buffer) > 2**14: |
if len(self.previous_buffer) > 2 ** 14: |
# Bad peer! Naughty peer! |
self.disconnect() |
return |
1228,14 → 1227,14 |
messages = [] |
i = 0 |
while i < len(chunks)-1: |
while i < len(chunks) - 1: |
# Add message if it's non-empty. |
if len(chunks[i]) > 0: |
messages.append(chunks[i]) |
if i < len(chunks)-2: |
if i < len(chunks) - 2: |
# Aye! CTCP tagged data ahead! |
messages.append(tuple(chunks[i+1].split(" ", 1))) |
messages.append(tuple(chunks[i + 1].split(" ", 1))) |
i = i + 2 |
1352,8 → 1351,6 |
for ch in mode_part: |
if ch in "+-": |
sign = ch |
elif ch == " ": |
collecting_arguments = 1 |
elif ch in unary_modes: |
if len(args) >= arg_count + 1: |
modes.append([sign, ch, args[arg_count]]) |
1374,7 → 1371,7 |
"002": "yourhost", |
"003": "created", |
"004": "myinfo", |
"005": "featurelist", # XXX |
"005": "featurelist", # XXX |
"200": "tracelink", |
"201": "traceconnecting", |
"202": "tracehandshake", |
1467,7 → 1464,7 |
"374": "endofinfo", |
"375": "motdstart", |
"376": "endofmotd", |
"377": "motd2", # 1997-10-16 -- tkil |
"377": "motd2", # 1997-10-16 -- tkil |
"381": "youreoper", |
"382": "rehashing", |
"384": "myportis", |
1496,7 → 1493,7 |
"432": "erroneusnickname", # Thiss iz how its speld in thee RFC. |
"433": "nicknameinuse", |
"436": "nickcollision", |
"437": "unavailresource", # "Nick temporally unavailable" |
"437": "unavailresource", # "Nick temporally unavailable" |
"441": "usernotinchannel", |
"442": "notonchannel", |
"443": "useronchannel", |
1517,12 → 1514,12 |
"474": "bannedfromchan", |
"475": "badchannelkey", |
"476": "badchanmask", |
"477": "nochanmodes", # "Channel doesn't support modes" |
"477": "nochanmodes", # "Channel doesn't support modes" |
"478": "banlistfull", |
"481": "noprivileges", |
"482": "chanoprivsneeded", |
"483": "cantkillserver", |
"484": "restricted", # Connection is restricted |
"484": "restricted", # Connection is restricted |
"485": "uniqopprivsneeded", |
"491": "nooperhost", |
"492": "noservicehost", |
/tsimapiak/parse.py |
---|
20,30 → 20,30 |
# You should have received a copy of the GNU General Public License |
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>. |
import re |
import dbconnector |
import parsenum |
import re |
#wordlist = [{"id": 0, "navi": u"tawtute", "infix": u"tawtute", "type": u"n."}] + dbconnector.getnavilist() + [{"id": 0, "navi": u"na'vi", "infix": u"na'vi", "type": u"n."}] # XXX HACK - extra proper nouns |
wordlist = dbconnector.getnavilist() |
brokenwords = ((u"sami", u"si", u"", u"am", u"", (()), (()), False), (u"to", u"to", u"", u"", u"", (()), (()), False), (u"frato", u"to", u"", u"", u"", [[u"fra"]], (()), False)) # XXX HACK - These are words that are either not in Eana Eltu, or that get interpreted wrongly for whatever reason. The latter should be removed from this list when the parser gets more sophisticated. The former should also have an entry in the equivalent array in the translator! If it can take infixes, consider adding it to the main wordlist above (see the examples). The order is - original, Na'vi root, 0-pos infix, 1-pos infix, 2-pos infix, prefixes, suffixes. Things that can take affixes should go in the above list instead. |
infixes1 = (u"awn", u"eyk", u"us", u"äp", u"") |
infixes2 = (u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u"") |
infixes3 = (u"äng", u"ats", u"eiy", u"ei", u"uy", u"") |
prefixes = (u"tsay", u"say", u"fay", u"fra", u"pxe", u"fne", u"tsa", u"sa", u"pe", u"fe", u"le", u"nì", u"sä", u"tì", u"sì", u"ay", u"me", u"fì", u"ke", u"he", u"a") |
adpositions = (u"mungwrr", u"kxamlä", u"pximaw", u"pxisre", u"tafkip", u"nemfa", u"takip", u"mìkam", u"teri", u"fkip", u"luke", u"pxel", u"pxaw", u"rofa", u"ìlä", u"fpi", u"ftu", u"kip", u"lok", u"maw", u"sre", u"sìn", u"vay", u"eo", u"fa", u"hu", u"io", u"ka", u"mì", u"na", u"ne", u"ro", u"ta", u"uo", u"wä", u"äo", u"to", u"sì") |
postfixes = adpositions + (u"tsyìp", u"eyä", u"ìri", u"ìl", u"it", u"lo", u"ri", u"ru", u"ti", u"ur", u"ve", u"yä", u"ya", u"tu", u"vi", u"yu", u"an", u"ng", u"ke", u"e", u"o", u"l", u"t", u"y", u"a", u"ä", u"r") |
BROKENWORDS = ((u"sami", u"si", u"", u"am", u"", (()), (()), False), (u"to", u"to", u"", u"", u"", (()), (()), False), (u"frato", u"to", u"", u"", u"", [[u"fra"]], (()), False)) # XXX HACK - These are words that are either not in Eana Eltu, or that get interpreted wrongly for whatever reason. The latter should be removed from this list when the parser gets more sophisticated. The former should also have an entry in the equivalent array in the translator! If it can take infixes, consider adding it to the main wordlist above (see the examples). The order is - original, Na'vi root, 0-pos infix, 1-pos infix, 2-pos infix, PREFIXES, suffixes. Things that can take affixes should go in the above list instead. |
INFIXES1 = (u"awn", u"eyk", u"us", u"äp", u"") |
INFIXES2 = (u"ìyev", u"iyev", u"ìmìy", u"arm", u"asy", u"ilv", u"ìmv", u"imv", u"ìrm", u"irv", u"ìsy", u"aly", u"ary", u"ìly", u"ìry", u"ìlm", u"alm", u"am", u"ay", u"er", u"ìm", u"iv", u"ìy", u"ol", u"") |
INFIXES3 = (u"äng", u"ats", u"eiy", u"ei", u"uy", u"") |
PREFIXES = (u"tsay", u"say", u"fay", u"fra", u"pxe", u"fne", u"tsa", u"sa", u"pe", u"fe", u"le", u"nì", u"sä", u"tì", u"sì", u"ay", u"me", u"fì", u"ke", u"he", u"a") |
ADPOSITIONS = (u"mungwrr", u"kxamlä", u"pximaw", u"pxisre", u"tafkip", u"nemfa", u"takip", u"mìkam", u"teri", u"fkip", u"luke", u"pxel", u"pxaw", u"rofa", u"ìlä", u"fpi", u"ftu", u"kip", u"lok", u"maw", u"sre", u"sìn", u"vay", u"eo", u"fa", u"hu", u"io", u"ka", u"mì", u"na", u"ne", u"ro", u"ta", u"uo", u"wä", u"äo", u"to", u"sì") |
POSTFIXES = ADPOSITIONS + (u"tsyìp", u"eyä", u"ìri", u"ìl", u"it", u"lo", u"ri", u"ru", u"ti", u"ur", u"ve", u"yä", u"ya", u"tu", u"vi", u"yu", u"an", u"ng", u"ke", u"e", u"o", u"l", u"t", u"y", u"a", u"ä", u"r") |
#prefixesn = ur"(?P<npr>(?:(?:fì|tsa)?(?:me|pxe|ay|fra)?|(?:fay)?|(?:tsay)?)(?:fne)?(?:tì|sä)?" |
#prefixesv = ur"(?P<vpr>(?:nì|sä|tì|rä'ä |ke )?)" |
lenit = ((u"px", u"p"), (u"tx", u"t"), (u"kx", u"k"), (u"ts", u"s"), (u"t", u"s"), (u"p", u"f"), (u"k", u"h"), (u"'", u"")) |
LENIT = ((u"px", u"p"), (u"tx", u"t"), (u"kx", u"k"), (u"ts", u"s"), (u"t", u"s"), (u"p", u"f"), (u"k", u"h"), (u"'", u"")) |
def parseword(wordin): |
tempid = 0 |
temptype = u"" |
for brokenword in brokenwords: # XXX HACK - this is all code to work around bugs that shouldn't exist |
for brokenword in BROKENWORDS: # XXX HACK - this is all code to work around bugs that shouldn't exist |
if wordin[0] == brokenword[0]: |
for word in wordlist: |
if brokenword[1] == word["navi"]: |
61,7 → 61,7 |
foundins = [u"", u"", u""] |
if len(wordin) < len(splitword): |
foundit = False |
next |
continue |
for wor in range(len(splitword)): |
if not foundit: |
break |
68,35 → 68,35 |
foundprefs.append([]) |
foundposts.append([]) |
center = u"" |
pre = [] |
post = [] |
if u"<1>" in splitword[wor]: |
tempin1 = [] |
tempin2 = [] |
tempin3 = [] |
for in1 in infixes1: |
for in1 in INFIXES1: |
if in1 in wordin[wor]: |
tempin1.append(in1) |
for in2 in infixes2: |
for in2 in INFIXES2: |
if in2 in wordin[wor]: |
tempin2.append(in2) |
for in3 in infixes3: |
for in3 in INFIXES3: |
if in3 in wordin[wor]: |
tempin3.append(in3) |
for in1 in tempin1: |
for in2 in tempin2: |
for in3 in tempin3: |
if splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3).replace(u"lll",u"l").replace(u"rrr",u"r") in wordin[wor]: |
center = splitword[wor].replace(u"<1><2>",in1+in2).replace(u"<3>",in3).replace(u"lll",u"l").replace(u"rrr",u"r") |
if splitword[wor].replace(u"<1><2>", in1 + in2).replace(u"<3>", in3).replace(u"lll", u"l").replace(u"rrr", u"r") in wordin[wor]: |
center = splitword[wor].replace(u"<1><2>", in1 + in2).replace(u"<3>", in3).replace(u"lll", u"l").replace(u"rrr", u"r") |
foundins = [in1, in2, in3] |
break |
if center != u"": break |
if center != u"": break |
if center != u"": |
break |
if center != u"": |
break |
else: |
if splitword[wor] in wordin[wor]: |
center = splitword[wor] |
if center == u"" and (wordin[wor] == u"paya" or splitword[wor] != u"pxay"): # XXX HACK - workaround to fix pay being lenited pxay. Maybe fixable without hardcoding? |
for i in lenit: |
for i in LENIT: |
temp = u"" |
if splitword[wor].startswith(i[0]): |
temp = i[1] + splitword[wor][len(i[0]):] |
123,7 → 123,7 |
last = u"" |
while last != pref: |
last = pref |
for pre in prefixes: |
for pre in PREFIXES: |
if pref != u"": |
if pref.endswith(pre): |
if pre in foundprefs[wor]: |
133,11 → 133,12 |
break |
if pref != u"": |
foundit = False |
foundprefs = [] |
break |
last = u"" |
while last != posf: |
last = posf |
for pos in postfixes: |
for pos in POSTFIXES: |
if posf != u"": |
if posf.startswith(pos): |
if pos in foundposts[wor]: |
150,6 → 151,7 |
break |
if posf != u"": |
foundit = False |
foundposts = [] |
break |
if foundit == True: |
foundword = word |
164,13 → 166,13 |
def parsesent(sent): |
sent = sent.strip().lower().replace(u"’", u"'") |
sent = re.sub(ur"[^\wìä' ]",u"",sent) |
sent = re.sub(ur"\ +",u" ",sent) |
sent = re.sub(ur"[^\wìä' ]", u"", sent) |
sent = re.sub(ur"\ +", u" ", sent) |
sent = sent.split(u" ") |
ret = [] |
left = len(sent) |
while left: |
word = parsenum.parse(sent[len(sent)-left]) |
word = parsenum.parse(sent[len(sent) - left]) |
if word == None: |
word = parseword(sent[-left:]) |
left -= len(word["word"]["navi"].split(" ")) |
/tsimapiak/parsenum.py |
---|
20,7 → 20,7 |
# You should have received a copy of the GNU General Public License |
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>. |
num = [u"kew", |
NUM = [u"kew", |
u"'aw", |
u"mune", |
u"pxey", |
29,7 → 29,7 |
u"pukap", |
u"kinä"] |
numord = [u"kew", |
NUMORD = [u"kew", |
u"'aw", |
u"mu", |
u"pxey", |
38,7 → 38,7 |
u"pu", |
u"ki"] |
rem = [u"aw", |
REM = [u"aw", |
u"mun", |
u"pey", |
u"sìng", |
46,7 → 46,7 |
u"fu", |
u"hin"] |
remord = [u"aw", |
REMORD = [u"aw", |
u"mu", |
u"pey", |
u"sì", |
54,7 → 54,7 |
u"fu", |
u"hi"] |
base = [u"", |
BASE = [u"", |
u"me", |
u"pxe", |
u"tsì", |
65,7 → 65,7 |
def parse(numin): |
if u"mm" in numin: |
return None |
if (numin == u"") or ((numin[0] == u"a") and (numin[len(numin)-1] == u"a")): |
if (numin == u"") or ((numin[0] == u"a") and (numin[len(numin) - 1] == u"a")): |
return None |
prefs = [] |
posts = [] |
75,17 → 75,17 |
if numin[0] == u"a": |
prefs.append(u"a") |
numin = numin[1:] |
if numin[len(numin)-1] == u"a": |
if numin[len(numin) - 1] == u"a": |
posts.append(u"a") |
numin = numin[:-1] |
if numin[-2:] == u"ve": |
posts.append(u"ve") |
numin = numin[:-2] |
#base numbers |
for n in range(len(num)): |
#BASE numbers |
for n in range(len(NUM)): |
if u"ve" in posts: |
if numin == numord[n]: |
if numin == NUMORD[n]: |
outoct = n |
outdec = n |
ret["word"]["navi"] = unicode(outdec) + u"." |
93,7 → 93,7 |
ret["oct"] = outoct |
return ret |
else: |
if numin == num[n]: |
if numin == NUM[n]: |
outoct = n |
outdec = n |
ret["word"]["navi"] = unicode(outdec) |
102,53 → 102,53 |
return ret |
#other numbers |
notbase = False |
for n in range(len(base)): |
if numin.startswith(base[n] + u"zazam"): |
outoct += (n+1) * (10**4) |
outdec += (n+1) * (8**4) |
if numin[len(base[n]) + 4:].startswith(u"mrr") or numin[len(base[n]) + 4:].startswith(u"me"): |
numin = numin[len(base[n]) + 4:] |
for n in range(len(BASE)): |
if numin.startswith(BASE[n] + u"zazam"): |
outoct += (n + 1) * (10 ** 4) |
outdec += (n + 1) * (8 ** 4) |
if numin[len(BASE[n]) + 4:].startswith(u"mrr") or numin[len(BASE[n]) + 4:].startswith(u"me"): |
numin = numin[len(BASE[n]) + 4:] |
else: |
numin = numin[len(base[n]) + 5:] |
numin = numin[len(BASE[n]) + 5:] |
notbase = True |
for n in range(len(base)): |
if numin.startswith(base[n] + u"vozam"): |
outoct += (n+1) * (10**3) |
outdec += (n+1) * (8**3) |
if numin[len(base[n]) + 4:].startswith(u"mrr") or numin[len(base[n]) + 4:].startswith(u"me"): |
numin = numin[len(base[n]) + 4:] |
for n in range(len(BASE)): |
if numin.startswith(BASE[n] + u"vozam"): |
outoct += (n + 1) * (10 ** 3) |
outdec += (n + 1) * (8 ** 3) |
if numin[len(BASE[n]) + 4:].startswith(u"mrr") or numin[len(BASE[n]) + 4:].startswith(u"me"): |
numin = numin[len(BASE[n]) + 4:] |
else: |
numin = numin[len(base[n]) + 5:] |
numin = numin[len(BASE[n]) + 5:] |
notbase = True |
for n in range(len(base)): |
if numin.startswith(base[n] + u"zam"): |
outoct += (n+1) * (10**2) |
outdec += (n+1) * (8**2) |
if numin[len(base[n]) + 2:].startswith(u"mrr") or numin[len(base[n]) + 2:].startswith(u"me"): |
numin = numin[len(base[n]) + 2:] |
for n in range(len(BASE)): |
if numin.startswith(BASE[n] + u"zam"): |
outoct += (n + 1) * (10 ** 2) |
outdec += (n + 1) * (8 ** 2) |
if numin[len(BASE[n]) + 2:].startswith(u"mrr") or numin[len(BASE[n]) + 2:].startswith(u"me"): |
numin = numin[len(BASE[n]) + 2:] |
else: |
numin = numin[len(base[n]) + 3:] |
numin = numin[len(BASE[n]) + 3:] |
notbase = True |
for n in range(len(base)): |
if numin.startswith(base[n] + u"vol"): |
outoct += (n+1) * 10 |
outdec += (n+1) * 8 |
numin = numin[len(base[n]) + 3:] |
for n in range(len(BASE)): |
if numin.startswith(BASE[n] + u"vol"): |
outoct += (n + 1) * 10 |
outdec += (n + 1) * 8 |
numin = numin[len(BASE[n]) + 3:] |
notbase = True |
if numin.startswith(base[n] + u"vo"): |
outoct += (n+1) * 10 |
outdec += (n+1) * 8 |
numin = numin[len(base[n]) + 2:] |
if numin.startswith(BASE[n] + u"vo"): |
outoct += (n + 1) * 10 |
outdec += (n + 1) * 8 |
numin = numin[len(BASE[n]) + 2:] |
notbase = True |
if notbase: |
for n in range(len(rem)): |
for n in range(len(REM)): |
if u"ve" in posts: |
if numin == remord[n]: |
if numin == REMORD[n]: |
outoct += n + 1 |
outdec += n + 1 |
numin = u"" |
else: |
if numin == rem[n]: |
if numin == REM[n]: |
outoct += n + 1 |
outdec += n + 1 |
numin = u"" |
/tsimapiak/translate.py |
---|
20,49 → 20,49 |
# You should have received a copy of the GNU General Public License |
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>. |
import dbconnector |
import parse |
import dbconnector |
brokenwords = [[u"to", u"superlative marker than", u"Komparativ Marker", u"võrdlemise märkija", u"középfokot jelez mint", u"palavre que"]] #, (u"tawtute", u"skyperson"), (u"na'vi", u"The People")) # XXX HACK - words not in EE |
infixes1 = ((u"awn", u"P.PART"), (u"eyk", u"CAUS"), (u"us", u"A.PART"), (u"äp", u"REFL.")) |
infixes2 = ((u"ìyev", u"FUT.SUBJ"), (u"iyev", u"FUT.SUBJ"), (u"ìmìy", u"REC.PAST.REC.FUT"), (u"arm", u"IMPF.PAST"), (u"asy", u"FUT.D"), (u"ilv", u"PRES.PER.SUBJ"), (u"ìmv", u"REC.PAST.SUBJ"), (u"imv", u"PAST.SUBJ"), (u"ìrm", u"IMPF.REC.PAST"), (u"irv", u"PRES.IMPF.SUBJ"), (u"ìsy", u"IMM.FUT.D"), (u"aly", u"PERF.FUT"), (u"ary", u"IMPF.FUT"), (u"ìly", u"PERF.IMM.FUT"), (u"ìry", u"IMPF.IMM.FUT"), (u"ìlm", u"PERF.REC.PAST"), (u"alm", u"PERF.PAST"), (u"am", u"PAST."), (u"ay", u"FUT."), (u"er", u"IMPF."), (u"ìm", u"REC.PAST"), (u"iv", u"SUBJ."), (u"ìy", u"IMM.FUT"), (u"ol", u"PERF.")) |
infixes3 = ((u"äng", u"PEJ."), (u"ats", u"INFR."), (u"eiy", u"LAUD."), (u"ei", u"LAUD."), (u"uy", u"HON.")) |
prefixes = ((u"tsay", u"those"), (u"say", u"those-LENTD"), (u"fay", u"these"), (u"fra", u"every"), (u"pxe", u"TRI."), (u"fne", u"type"), (u"tsa", u"that"), (u"sa", u"that-LENTD"), (u"pe", u"what"), (u"fe", u"what-LENTD"), (u"le", u"ADJD."), (u"nì", u"ADVD."), (u"sä", u"INSTD."), (u"tì", u"NOUND."), (u"sì", u"NOUND.-LENTD"), (u"ay", u"PL."), (u"me", u"DU."), (u"fì", u"this"), (u"ke", u"not"), (u"he", u"not-LENTD"), (u"a", u"ADJ.POST")) |
adpositions = ((u"mungwrr", u"except"), (u"kxamlä", u"through"), (u"pximaw", u"right.after"), (u"pxisre", u"right.before"), (u"tafkip", u"from.up.among"), (u"nemfa", u"into.inside"), (u"takip", u"from among"), (u"mìkam", u"between"), (u"teri", u"about.concerning"), (u"fkip", u"up.among"), (u"luke", u"without"), (u"pxel", u"like.as"), (u"pxaw", u"around"), (u"rofa", u"beside.alongside"), (u"ìlä", u"by.via.following"), (u"fpi", u"for.the.sake/benefit.of"), (u"ftu", u"from.direction"), (u"kip", u"among"), (u"lok", u"close.to"), (u"maw", u"after.time"), (u"sre", u"before.time"), (u"sìn", u"on.onto"), (u"vay", u"up.to"), (u"eo", u"before.in.front.of"), (u"fa", u"with.by.means.of"), (u"hu", u"with.accompaniment"), (u"io", u"above"), (u"ka", u"across"), (u"mì", u"in.on"), (u"na", u"like.as"), (u"ne", u"to.towards"), (u"ro", u"at.locative"), (u"ta", u"from"), (u"uo", u"behind"), (u"wä", u"against.opposition"), (u"äo", u"below"), (u"to", u"than"), (u"sì", u"and")) |
postfixes = adpositions + ((u"tsyìp", u"DIM."), (u"eyä", u"GEN."), (u"ìri", u"TOP."), (u"ìl", u"ERG."), (u"it", u"ACC"), (u"lo", u"MULT."), (u"ri", u"TOP."), (u"ru", u"DAT."), (u"ti", u"ACC."), (u"ur", u"DAT."), (u"ve", u"ORD."), (u"yä", u"GEN."), (u"ya", u"VOC."), (u"tu", u"OBJD."), (u"vi", u"PART."), (u"yu", u"AGENTD."), (u"an", u"MASC."), (u"ng", u"INCL."), (u"ke", u"not"), (u"e", u"FEM."), (u"o", u"INDEF."), (u"l", u"ERG."), (u"t", u"ACC."), (u"y", u"GEN."), (u"a", u"ADJ.PRE"), (u"ä", u"GEN."), (u"r", u"DAT.")) |
#BROKENWORDS = [[u"", u"", u"", u"", u"", u"", u""]] #, (u"tawtute", u"skyperson"), (u"na'vi", u"The People")) # XXX HACK - words not in EE |
INFIXES1 = ((u"awn", u"P.PART"), (u"eyk", u"CAUS"), (u"us", u"A.PART"), (u"äp", u"REFL.")) |
INFIXES2 = ((u"ìyev", u"FUT.SUBJ"), (u"iyev", u"FUT.SUBJ"), (u"ìmìy", u"REC.PAST.REC.FUT"), (u"arm", u"IMPF.PAST"), (u"asy", u"FUT.D"), (u"ilv", u"PRES.PER.SUBJ"), (u"ìmv", u"REC.PAST.SUBJ"), (u"imv", u"PAST.SUBJ"), (u"ìrm", u"IMPF.REC.PAST"), (u"irv", u"PRES.IMPF.SUBJ"), (u"ìsy", u"IMM.FUT.D"), (u"aly", u"PERF.FUT"), (u"ary", u"IMPF.FUT"), (u"ìly", u"PERF.IMM.FUT"), (u"ìry", u"IMPF.IMM.FUT"), (u"ìlm", u"PERF.REC.PAST"), (u"alm", u"PERF.PAST"), (u"am", u"PAST."), (u"ay", u"FUT."), (u"er", u"IMPF."), (u"ìm", u"REC.PAST"), (u"iv", u"SUBJ."), (u"ìy", u"IMM.FUT"), (u"ol", u"PERF.")) |
INFIXES3 = ((u"äng", u"PEJ."), (u"ats", u"INFR."), (u"eiy", u"LAUD."), (u"ei", u"LAUD."), (u"uy", u"HON.")) |
PREFIXES = ((u"tsay", u"those"), (u"say", u"those-LENTD"), (u"fay", u"these"), (u"fra", u"every"), (u"pxe", u"TRI."), (u"fne", u"type"), (u"tsa", u"that"), (u"sa", u"that-LENTD"), (u"pe", u"what"), (u"fe", u"what-LENTD"), (u"le", u"ADJD."), (u"nì", u"ADVD."), (u"sä", u"INSTD."), (u"tì", u"NOUND."), (u"sì", u"NOUND.-LENTD"), (u"ay", u"PL."), (u"me", u"DU."), (u"fì", u"this"), (u"ke", u"not"), (u"he", u"not-LENTD"), (u"a", u"ADJ.POST")) |
ADPOSITIONS = ((u"mungwrr", u"except"), (u"kxamlä", u"through"), (u"pximaw", u"right.after"), (u"pxisre", u"right.before"), (u"tafkip", u"from.up.among"), (u"nemfa", u"into.inside"), (u"takip", u"from among"), (u"mìkam", u"between"), (u"teri", u"about.concerning"), (u"fkip", u"up.among"), (u"luke", u"without"), (u"pxel", u"like.as"), (u"pxaw", u"around"), (u"rofa", u"beside.alongside"), (u"ìlä", u"by.via.following"), (u"fpi", u"for.the.sake/benefit.of"), (u"ftu", u"from.direction"), (u"kip", u"among"), (u"lok", u"close.to"), (u"maw", u"after.time"), (u"sre", u"before.time"), (u"sìn", u"on.onto"), (u"vay", u"up.to"), (u"eo", u"before.in.front.of"), (u"fa", u"with.by.means.of"), (u"hu", u"with.accompaniment"), (u"io", u"above"), (u"ka", u"across"), (u"mì", u"in.on"), (u"na", u"like.as"), (u"ne", u"to.towards"), (u"ro", u"at.locative"), (u"ta", u"from"), (u"uo", u"behind"), (u"wä", u"against.opposition"), (u"äo", u"below"), (u"to", u"than"), (u"sì", u"and")) |
POSTFIXES = ADPOSITIONS + ((u"tsyìp", u"DIM."), (u"eyä", u"GEN."), (u"ìri", u"TOP."), (u"ìl", u"ERG."), (u"it", u"ACC"), (u"lo", u"MULT."), (u"ri", u"TOP."), (u"ru", u"DAT."), (u"ti", u"ACC."), (u"ur", u"DAT."), (u"ve", u"ORD."), (u"yä", u"GEN."), (u"ya", u"VOC."), (u"tu", u"OBJD."), (u"vi", u"PART."), (u"yu", u"AGENTD."), (u"an", u"MASC."), (u"ng", u"INCL."), (u"ke", u"not"), (u"e", u"FEM."), (u"o", u"INDEF."), (u"l", u"ERG."), (u"t", u"ACC."), (u"y", u"GEN."), (u"a", u"ADJ.PRE"), (u"ä", u"GEN."), (u"r", u"DAT.")) |
def translatesent(sent, lang): |
sent = parse.parsesent(sent) |
for word in sent: |
word["translated"] = "" |
if word["word"]["id"] != 0: |
word["translated"] = dbconnector.translate(word["word"]["id"],lang) |
if word["word"]["id"] > 0: |
word["translated"] = dbconnector.translate(word["word"]["id"], lang) |
else: |
for brokenword in brokenwords: |
if brokenword[0] == word["word"]["navi"]: |
word["translated"] = brokenword[1] |
# for brokenword in BROKENWORDS: |
# if brokenword[0] == word["word"]["navi"]: |
# word["translated"] = brokenword[1] |
if word["translated"] == u"": |
word["translated"] = word["word"]["navi"] |
if word["inf"][0] != u"": |
for fix in infixes1: |
for fix in INFIXES1: |
if fix[0] == word["inf"][0]: |
word["translated"] += '-' + fix[1] |
if word["inf"][1] != u"": |
for fix in infixes2: |
for fix in INFIXES2: |
if fix[0] == word["inf"][1]: |
word["translated"] += '-' + fix[1] |
if word["inf"][2] != u"": |
for fix in infixes3: |
for fix in INFIXES3: |
if fix[0] == word["inf"][2]: |
word["translated"] += '-' + fix[1] |
for temp in word["pref"]: |
for navf in temp: |
for fix in prefixes: |
for fix in PREFIXES: |
if fix[0] == navf: |
word["translated"] += '-' + fix[1] |
for temp in word["post"]: |
for navf in temp: |
for fix in postfixes: |
for fix in POSTFIXES: |
if fix[0] == navf: |
word["translated"] += '-' + fix[1] |
if word["len"]: |
/tsimapiak/__init__.py |
---|
0,0 → 1,0 |
/tsimapiak/dbconnector.py |
---|
22,11 → 22,9 |
import tornado.database |
import re |
def getnavilist(): |
ret = [] |
current = u"" |
db = tornado.database.Connection("127.0.0.1", "navi", user="navi", password="navi") |
for row in db.query(""" |
SELECT * |
40,13 → 38,13 |
db.close() |
return ret |
def translate(wid,language): |
def translate(wid, language): |
ret = None |
db = tornado.database.Connection("127.0.0.1", "navi", user="navi", password="navi") |
for row in db.query(""" |
SELECT * |
FROM `localizedWords` |
WHERE id = %s AND languageCode = %s""",wid,language): |
WHERE id = %s AND languageCode = %s""", wid, language): |
ret = row["localized"] |
break |
if ret == None: |
/webapp/main.py |
---|
21,19 → 21,14 |
# along with Tsim Apiak. If not, see <http://www.gnu.org/licenses/>. |
from tsimapiak import parse, parsenum, translate |
import httplib |
import os |
import tornado.httpserver |
import tornado.ioloop |
import tornado.web |
import tornado.autoreload |
import os |
import re |
from tsimapiak import parsenum |
from tsimapiak import dbconnector |
from tsimapiak import parse |
from tsimapiak import translate |
class Index(tornado.web.RequestHandler): |
def get(self): |
self.render("templates/index.html") |
47,7 → 42,7 |
num = self.get_argument("num").strip() |
except: |
self.redirect("/number") |
numout = parsenum.parse(num.replace(" ","")) |
numout = parsenum.parse(num.replace(" ", "")) |
if numout == None: |
numoutt = -1 |
else: |
61,7 → 56,7 |
class Parse(tornado.web.RequestHandler): |
def get(self): |
self.render("templates/parse.html", last="", out=None) |
def post(self): |
try: |
word = self.get_argument("word") |
73,7 → 68,7 |
class Translate(tornado.web.RequestHandler): |
def get(self): |
self.render("templates/translate.html", last="", out=None, lang="eng") |
def post(self): |
try: |
word = self.get_argument("word") |
82,6 → 77,20 |
self.redirect("/translate") |
out = translate.translatesent(word, lang) |
self.render("templates/translate.html", last=word, out=out, lang=lang) |
class Errors(tornado.web.RequestHandler): |
def get_error_html(self, status_code, **kwargs): |
if status_code == 500: |
return "<html><title>%(code)d: %(message)s</title>" \ |
"<body>%(code)d: %(message)s<br /><br />Either we are currently working on the server, or you uncovered a bug. Please check back later on. If you still get this error, please report this bug to us in the forum thread or on IRC.</body></html>" % { |
"code": status_code, |
"message": httplib.responses[status_code], |
} |
else: |
return "<html><title>%(code)d: %(message)s</title>" \ |
"<body>%(code)d: %(message)s</body></html>" % { |
"code": status_code, |
"message": httplib.responses[status_code], |
} |
settings = { |
"static_path": os.path.join(os.path.dirname(__file__), "static") |
98,5 → 107,4 |
if __name__ == "__main__": |
http_server = tornado.httpserver.HTTPServer(application) |
http_server.listen(1337) |
#tornado.autoreload.start() |
tornado.ioloop.IOLoop.instance().start() |