changeset 73:c20912354d1d

merge work
author hychen@mluna
date Thu, 23 Oct 2008 00:33:22 +0800
parents a4c364888197 (diff) d26eea95c52d (current diff)
children f2a7de8ac5c1 fe5bbf852498 a344c54f15c7
files
diffstat 12 files changed, 491 insertions(+), 3 deletions(-) [+]
line wrap: on
line diff
--- a/agent.pl	Tue Oct 21 01:36:28 2008 +0800
+++ b/agent.pl	Thu Oct 23 00:33:22 2008 +0800
@@ -43,8 +43,7 @@
     return 0;
 }
 
-sub is_happiness
-{
+sub is_happiness {
     my ($self, $city) = @_;
     # TODO 以 fuzzy 取出合理 happiness 值
     return ($city->{happiness} >= 2 ?  1 : 0)
@@ -94,6 +93,26 @@
     return 0;
 }
 
+sub is_expansion_researched {
+    my ($self, $city) = @_;
+    return (defined($city->{research}->{1030}) ?  1 : 0);
+}
+
+sub is_wealth_researched {
+    my ($self, $city) = @_;
+    return (defined($city->{research}->{2030}) ?  1 : 0);
+}
+
+sub is_professionalarmy_researched {
+    my ($self, $city) = @_;
+    return (defined($city->{research}->{4030}) ?  1 : 0);
+}
+
+sub is_paper_researched {
+    my ($self, $city) = @_;
+    return (defined($city->{research}->{3020}) ?  1 : 0);
+}
+
 sub is_drydock_researched {
     my ($self, $city) = @_;
     return (defined($city->{research}->{4010}) ?  1 : 0);
--- a/building.yaml	Tue Oct 21 01:36:28 2008 +0800
+++ b/building.yaml	Thu Oct 23 00:33:22 2008 +0800
@@ -8,6 +8,27 @@
          # already building something.
          1:
            # adjust human resources
+           # 
+           # 相關基礎研究 (technologies tree)
+           # 財富
+            - is_wealth_researched:
+               0: research_economy
+               1: 
+                  # 擴張
+                  - is_expansion_researched:
+                     0: research_seafaring
+                     1:
+                        # 造紙
+                        - is_paper_researched:
+                           0: research_knowledge
+                           1:
+                              # 釀酒
+                              - is_winepress_researched:
+                                 0: research_economy
+                                 1:
+                                    # 正規軍
+                                    - is_professionalarmy_researched:
+                                       0: resaerch_military
          # let's find something to build up
          0:
             # 確認是否為主城
@@ -30,7 +51,7 @@
                   - is_shipyard:
                      0: 
                         - is_drydock_researched: 
-                           0: resaerch_drydock
+                           0: resaerch_military
                            1: build_shipyard
             - is_happiness:
                0: 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyikb/ikariam.py	Thu Oct 23 00:33:22 2008 +0800
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+import os,sys,re,string
+import cookielib,urllib2,urllib # for urlencode
+import time
+from sgmllib import SGMLParser  
+
+class ContentParser(SGMLParser):
+    def __init__(self):
+        SGMLParser.__init__(self)
+        self.anchor =  {'link':'', 'title':''}
+        self.anchorlist = []
+	self.liattr={}
+        self.inside_elements=['site']
+	self.pat=re.compile('\r|\t|\n')
+
+    def start_a(self, attributes):
+        """For each anchor tag, pay attention to the href and title attributes."""
+        href, title = '', ''
+        for name, value in attributes:
+            if name.lower() == 'href': href = value
+            if name.lower() == 'title': title = value
+        self.anchor['link'] = href
+        self.anchor['title'] = title
+        self.inside_elements.append('anchor')
+
+    def end_a(self):
+        self.anchorlist.append(self.anchor) # store the anchor in a list 
+        self.anchor = {'link':'', 'title':''}   # reset the dictionary,  
+        self.inside_elements.pop()
+
+    def handle_data(self, text):
+        if self.inside_elements[-1]=='anchor':
+            self.anchor['title'] = text
+	if self.inside_elements[-1]=='li':
+	    text=self.pat.sub(' ',text)
+	    text=string.split(text," ")
+	    if self.liattcl in self.liattr:
+	    	self.liattr[self.liattcl]=self.liattr[self.liattcl]+text
+	    else:
+	        self.liattr[self.liattcl]=text
+
+    def start_li(self,attributes):
+	self.liattcl=''
+        attrs = dict(attributes)
+	if attrs.has_key('class'):
+	     	self.liattcl=attrs['class']
+		self.inside_elements.append('li')
+
+    def end_li(self):
+	if self.inside_elements[-1]=='li':
+	    self.inside_elements.pop()
+	
+
+class connection(object):
+    def __init__(self):
+	self.page=''
+	self.server='s2.ikariam.tw'
+	self.baseurl='http://'+self.server
+        self.COOKIEFILE = '/tmp/ikcookies.lwp'
+	self.cj = cookielib.LWPCookieJar()
+	if os.path.isfile(self.COOKIEFILE):
+	    self.cj.load(self.COOKIEFILE)
+	opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
+	opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-TW; rv:1.8.1.12pre) Gecko/20071220 BonEcho/2.0.0.12pre')]
+	urllib2.install_opener(opener)
+
+    def login(self):
+        if not os.path.isfile(self.COOKIEFILE):
+	    print "create cookie file"+self.COOKIEFILE
+	    # /index.php?action=loginAvatar&function=login
+            params = {"universe":self.server, "name":'username', "password":'passwd'}
+            data = urllib.urlencode(params)
+            self.page=urllib2.urlopen(self.baseurl+'/index.php?action=loginAvatar&function=login',data).read()
+	self.cj.save(self.COOKIEFILE)
+	return 1
+
+    def parser(self):
+        parser=ContentParser()
+        parser.feed(self.page)
+        parser.close
+	for x in parser.liattr.keys():
+	    print x,parser.liattr[x]
+	#parser.anchorlist:
+
+    def logout(self):
+        logout=urllib2.urlopen(self.baseurl+'/index.php?action=loginAvatar&function=logout').read()
+	os.remove(self.COOKIEFILE)
+	return 1
+
+    def plunder(self):
+    	'/index.php?view=plunder&destinationCityId=1978'
+
+
+    def upgradetest(self):
+        urllib2.urlopen(self.baseurl+'/index.php?view=academy&id=117257&position=9').read()
+	params = {"action":'CityScreen', \
+	          "function":'upgradeBuilding', \
+		  "id":'117257',\
+		  "position":'9',\
+		  "level":'7',\
+		  "oldView":'academy'}
+	print urllib2.urlopen(self.baseurl+'/index.php?view=townHall&id=117257&position=0#upgrade',urllib.urlencode(params)).read()
+	return 1
+
+def help():
+        print ("Usage: %s [Option] [Channel] [second]") % os.path.basename(sys.argv[0])
+        print ("Option: ")
+	helplist=[
+	("-h","--help","show this usage message."),
+	("-g","--game","Login to the game")
+	]
+	helplist.sort()
+	for x in helplist:
+	    print ("\t%2s, %-25s %s" % x)
+
+if __name__=='__main__':
+    if len(sys.argv) == 1:
+	help()
+	sys.exit(2) # common exit code for syntax error
+    else:
+	arglist=sys.argv[1:]
+	if arglist[0] in ('--game','-g'):
+	     gc=connection()
+	     gc.login()
+	     gc.parser()
+	     gc.logout()
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyikriam/__init__.py	Thu Oct 23 00:33:22 2008 +0800
@@ -0,0 +1,57 @@
+from lazy.www import c
+import cookielib
+import os
+import urllib2
+import urllib
+class Ikariam:
+
+    cities = {}
+
+    def __init__(self, server, username, password):
+        self.COOKIEFILE = '/tmp/ikariam.lwp'
+        self.server=server
+        self.baseurl='http://'+self.server
+
+        self.cj = cookielib.LWPCookieJar()
+        if os.path.isfile(self.COOKIEFILE):
+            self.cj.load(self.COOKIEFILE)
+ 
+        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
+        opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-TW; rv:1.8.1.12pre) Gecko/20071220 BonEcho/2.0.0.12pre')]
+        urllib2.install_opener(opener)
+
+        self.login(username, password)
+
+    def login(self,username,password):     
+	print "login to %s...." % self.server
+        params = {"universe":self.server, "name":username, "password":password}
+        ret = c(self.baseurl+'/index.php?action=loginAvatar&function=login').get(params).get_content()
+        self.cj.save(self.COOKIEFILE)
+        
+    def logout(self):
+	print "logut from %s...." % self.server
+        c(self.baseurl+'/index.php?action=loginAvatar&function=logout')
+        os.remove(self.COOKIEFILE)
+        
+    def city(self, id):
+	return self.cities.get(id, IkariamCity(id=id, core=self) )
+    
+class IkariamCity:
+    
+    def __init__(self, id, core ):
+        self.core = core
+        self.id = id
+        self.params = {'view':'city','id':id}
+        
+    def sync(self):
+	print "pull datas of the city %s" % self.id
+        xpath_globalinfo = "/html/body[@id='city']/div[@id='container']/div[@id='container2']/div[@id='globalResources']/ul"
+
+        xpath_gold = xpath_globalinfo + "/li[2]/a/span[@id='value_gold']/text()"
+        self.gold = c(self.core.baseurl).get(self.params).find(xpath_gold).get_content()[0]
+        
+if __name__ == '__main__':
+    i = Ikariam('hychen','pwdyouknow')
+    city = i.city(117261)
+    city.sync()
+    print city.gold
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyikriam/example.py	Thu Oct 23 00:33:22 2008 +0800
@@ -0,0 +1,6 @@
+from __init__ import Ikariam
+
+i = Ikariam(server='s1.ikariam.tw', username='hychen', password='')
+city = i.city(117261)
+city.sync()
+print 'gold is'+city.gold
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyikriam/lazy/www/README	Thu Oct 23 00:33:22 2008 +0800
@@ -0,0 +1,18 @@
+Requirements:
+
+	lxml - python libxml binding
+
+	it needs to installing the following packages before install lxml.
+
+    *  libxml 2.6.21 or later. It can be found here: http://xmlsoft.org/downloads.html
+    *  libxslt 1.1.15 or later. It can be found here: http://xmlsoft.org/XSLT/downloads.html
+		
+	If you use Ubuntu, here is what you need to do.
+	
+	$ apt-get install libxml2-dev libxslt1-dev
+	$ eazy_install lxml
+
+Example:
+
+	product = c('http://www.google.com.tw').find("/foo/bar/").working_prodcut
+	print product.content
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyikriam/lazy/www/__init__.py	Thu Oct 23 00:33:22 2008 +0800
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2008 Hsin Yi, Chen
+"""
+    [Note] the project is not available yet.
+
+    A web page fetcing tool chain that has a JQuery-like selector and supports chain working.
+    
+    Here is an exmaple can show the the main idea, To restrive a content you want
+    in a div box in a web page, and then post and restrive next content in the other
+    web page with the param you just maked from the content in first restriving.
+    finally, storage the production.
+    
+    def func(s):
+        return {'msg':s}
+    
+    try:
+        c("http://example.tw/").get().find("#id > div") \
+            .build_param( func ).post_to("http://example2.com") \
+            .save_as('hellow.html')
+    except:
+        pass
+        
+    more complex example
+        
+    try:
+        c("http://example.tw/").retry(4, '5m').get() \
+            .find("#id > div"). \
+            .build_param( func ).post_to("http://example2.com") \
+            .save_as('hellow.html') \
+            .end().find("#id2 > img").download('pretty-%s.jpg'). \
+            tar_and_zip("pretty_girl.tar.gz")
+    except NotFound:
+        print "the web page is not found."
+    except NoPermissionTosave:
+        print "the files can not be save with incorrect permission."
+    else:
+        print "unknow error."
+"""
+from lazy.www.work import WorkFlow
+from lazy.www.work.fetch import Fetcher, install_opener
+from lazy.www.core import SemiProduct
+
+def c(url):
+    """
+    connect to a web apge
+    
+    >>> c('http://localhost:8080').get().worker.working_product.content
+    'It works!!\\n'
+    
+    >>> c('http://localhost:8080').get().find('//text()')
+    'It works!!\\n'    
+    """
+    s= SemiProduct(source=url)    
+    w = WorkFlow(Fetcher(s))
+    return w
+
+def lz_install(**kwds):
+    if('opener' == kwds.get('name')):
+       install_opener(kwds.get('cookiefile'))
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyikriam/lazy/www/core/__init__.py	Thu Oct 23 00:33:22 2008 +0800
@@ -0,0 +1,12 @@
+                          
+class SemiProduct:
+
+    last_work = None
+    source = None
+    content = None
+    
+    def __init__(self, **kwds):
+        self.source = kwds.get('source','')        
+        
+    def __str__(self):        
+        return self.content
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyikriam/lazy/www/core/utils.py	Thu Oct 23 00:33:22 2008 +0800
@@ -0,0 +1,4 @@
+
+def mix_in(py_class, mixin_class):
+    if mixin_class not in py_class.__bases__:
+        py_class.__bases__ += (mixin_class,)
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyikriam/lazy/www/work/__init__.py	Thu Oct 23 00:33:22 2008 +0800
@@ -0,0 +1,52 @@
+from lazy.www.work.fetch import Fetcher
+from lazy.www.work.find import Finder
+from lazy.www.core import SemiProduct
+class WorkFlow:
+    
+    serial_number = 0
+    working_product = None
+    worker = None
+    
+    def __init__(self, worker):
+        self.set_worker(worker)
+
+    def set_worker(self, worker):
+        self.worker = worker
+        if self.worker.working_product is None:
+            self.working_product = SemiProduct()
+        else:
+            self.working_product = self.worker.working_product
+    
+    def get_content(self):
+        return self.working_product.content
+     
+    def change_worker(self, new_worker):
+        self.serial_number += 1
+        self.worker = new_worker
+        
+    def is_fetcher(self, obj):
+        if  obj is not None:    return True
+    
+    def get(self, data = {} ):
+        if not self.is_fetcher(self.worker) :
+            self.change_worker( Fetcher(self.working_product) )
+        
+        self.working_product.content = self.worker.get(data)
+        return self
+            
+    def post(self, data = {} ):
+        if not self.is_fetcher(self.worker):
+            self.change_worker( Fetcher(self.working_product) )
+        
+        self.working_product.content = self.worker.post(data)
+        return self
+    
+    def is_finder(self, obj):
+        if obj is not None: return True
+    
+    def find(self, express):
+        #if not self.is_finder(self.worker):
+        self.worker = Finder(self.working_product)
+        self.working_product.content = self.worker.find(express)
+        
+        return self
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyikriam/lazy/www/work/fetch.py	Thu Oct 23 00:33:22 2008 +0800
@@ -0,0 +1,83 @@
+import urllib2
+import urllib
+import cookielib
+import os
+
+def install_opener(cookiefile):
+    COOKIEFILE = cookiefile
+    cj = cookielib.LWPCookieJar()
+    if os.path.isfile(COOKIEFILE):
+        cj.load(COOKIEFILE)
+    else:
+        cj.save(cookiefile)
+        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
+        opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-TW; rv:1.8.1.12pre) Gecko/20071220 BonEcho/2.0.0.12pre')]
+        urllib2.install_opener(opener)
+
+class Fetcher:
+    
+    opener = None
+    
+    working_product = None
+    
+    """
+    A Semi Production Decoration for content fetching.
+    
+    handles content restriving.
+    
+    >>> o = Fetcher( SemiProduct(source="http://localhost:8080") )
+    >>> o.get().working_product.content
+    'It works!!\\n'
+    """
+    def __init__(self, working_product):
+        self.working_product = working_product
+        
+    def get(self, data = {}):
+        """        
+        send datas via http get method.
+        """        
+        res = urllib2.urlopen(self.working_product.source, urllib.urlencode(data))
+        return res.read()
+    
+    def post(self, data = {} ):
+        """
+        send datas via http post method.
+        
+        >>> o = Fetcher( SemiProduct(source="http://localhost:8080") )
+        >>> o.post({'a':'b'}).working_product.content
+        'It works!!\\n'
+        """        
+        res = urllib2.urlopen(self.working_product.source, urllib.urlencode(data))
+        return res.read()    
+
+    def refer(self, refer_url):
+        """
+        refer getter/setter.
+
+        >>> o = Fetcher( SemiProduct(source="http://localhost:8080") )
+        >>> o.refer('http://www.example.com')        
+        """
+        raise NotImplementedError
+
+    def retry(self, count = 0, intval = 0, timeout = 0):
+        """
+        retry to fetch the content.
+
+        >>> o = Fetcher( SemiProduct(source="http://localhost:8080") )
+        >>> o.retry(4)        
+        """        
+        raise NotImplementedError
+    
+class Retry:
+    
+    """
+     A Fetcher Decoration for retry goal.
+     
+     
+    """
+    def __init__(self, fetcher):
+        raise NotImplementedError
+    
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/pyikriam/lazy/www/work/find.py	Thu Oct 23 00:33:22 2008 +0800
@@ -0,0 +1,23 @@
+from lxml import etree
+from cStringIO import StringIO
+
+class Finder:
+
+    dom_tree = None
+    xpath = None
+
+    def __init__(self, working_product):
+        self.working_prodcut = working_product
+
+        self.encoding = 'utf8'
+        parser = etree.HTMLParser(encoding=self.encoding)
+        self.dom_tree = etree.parse(StringIO(self.working_prodcut.content), parser)
+    
+    def find(self, express , callback = None):
+        xpath = self.dom_tree.xpath(express)
+        
+        if callback is None:
+            ret = xpath
+        else:
+            ret = self.callback(xpath)
+        return ret