code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from suning.suningparser import parserDict,sunningRoot
from spider import main
if __name__ == '__main__':
main(sunningRoot,parserDict) | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from efeihu.efeihupageparser import parserDict,efeihuRoot
from spider import main
if __name__ == '__main__':
main(efeihuRoot,parserDict) | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-22
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import urlparse
from spiderconfigparser import SpiderConfig
efeihuRoot = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')
class EfeihuAllSortParser(RootCatagoryPageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'id':'sitesort'})
for t in allSort.findAll(name='div',attrs={'id':re.compile(r'sort_hd_[0-9]*')}):#一级分类
name = t.h3.a.contents[0]
url = t.h3.a['href']
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs={'class':'subitem'})
for tt in sort_2(name='dl'):#二级分类
name, url = ParserUtils.parserTag_A(tt.dt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.dd(name='em'):#三级分类
name, url = ParserUtils.parserTag_A(ttt.a)
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class EfeihuSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
return self.rootUrlSummary.url.replace('--1','--{}')
def getTotal(self):
s = self.soup.find(name='div',attrs = {'id':'ctl00_ContentPlaceHolder1_ucProductItemWithPager1_AspNetPager_down'})
if s is None:
pageNum = 1
else:
a = s(name = 'a',attrs={'class':'btn_next'})[-1]
name,url = ParserUtils.parserTag_A(a)
pageNum = url.split('/')[-1].split('.')[0].split('-')[-1]
pageNum = int(pageNum)
if pageNum > SpiderConfig.getMaxPage():
pageNum = SpiderConfig.getMaxPage()
return pageNum
def __getSingleProdDetail(self, prod):
infoSeg = prod.find(attrs={'class':'infor'})
pNameHref = infoSeg.find(name='a',attrs={'class':'name'})
pName, url = ParserUtils.parserTag_A(pNameHref)
url = ''.join(('http://www.efeihu.com',url))
pid = url.split('/')[-1].split('.')[0]
adwords = infoSeg.find(name='p',attrs={'class':'promtn'}).getText()
t = infoSeg.find(name='span', attrs={'class':'price_e'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
t = infoSeg.find(name='span',attrs={'class':'price_del'})
if t != None:
pastPrice = ParserUtils.getPrice(t.getText())
else:
pastPrice = 0.00
evalNum = ParserUtils.getDigit(infoSeg.find(name='div',attrs={'class':'comment'}).a.getText())
imgUrlSeg = prod.find(name='a', attrs={'class':'img'})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl=url, imageUrl=imgUrl, privPrice=currentPrice,
pubPrice=pastPrice, name=pName, adWords=adwords,evaluateNum=evalNum)
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
plist = self.soup.find(name='ul',attrs={'id':'prolist'})
resultList = []
for prod in plist(name='li',attrs={'class':'m_pro'}):
prodDetail = self.__getSingleProdDetail(prod)
resultList.append(prodDetail)
return resultList
class EfeihuSort4PageParser(EfeihuSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:EfeihuAllSortParser, 3:EfeihuSort3PageParser, 4:EfeihuSort4PageParser}
''' test '''
import os,chardet
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import getContentFromUrlSum
def testAllSortPage():
fileName = os.path.join(testFilePath,'efeihu.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')
firstPage = EfeihuAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.efeihu.com/Products/89-0-0-0-0-0-40--1.html',
parentPath=[('test')], catagoryLevel=3)
content = getContentFromUrlSum(sort_3_urlsum)
sort3Page = EfeihuSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
testSort3Page()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from efeihu.efeihupageparser import parserDict,efeihuRoot
from spider import main
if __name__ == '__main__':
main(efeihuRoot,parserDict) | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-22
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import urlparse
from spiderconfigparser import SpiderConfig
efeihuRoot = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')
class EfeihuAllSortParser(RootCatagoryPageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'id':'sitesort'})
for t in allSort.findAll(name='div',attrs={'id':re.compile(r'sort_hd_[0-9]*')}):#一级分类
name = t.h3.a.contents[0]
url = t.h3.a['href']
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs={'class':'subitem'})
for tt in sort_2(name='dl'):#二级分类
name, url = ParserUtils.parserTag_A(tt.dt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.dd(name='em'):#三级分类
name, url = ParserUtils.parserTag_A(ttt.a)
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class EfeihuSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
return self.rootUrlSummary.url.replace('--1','--{}')
def getTotal(self):
s = self.soup.find(name='div',attrs = {'id':'ctl00_ContentPlaceHolder1_ucProductItemWithPager1_AspNetPager_down'})
if s is None:
pageNum = 1
else:
a = s(name = 'a',attrs={'class':'btn_next'})[-1]
name,url = ParserUtils.parserTag_A(a)
pageNum = url.split('/')[-1].split('.')[0].split('-')[-1]
pageNum = int(pageNum)
if pageNum > SpiderConfig.getMaxPage():
pageNum = SpiderConfig.getMaxPage()
return pageNum
def __getSingleProdDetail(self, prod):
infoSeg = prod.find(attrs={'class':'infor'})
pNameHref = infoSeg.find(name='a',attrs={'class':'name'})
pName, url = ParserUtils.parserTag_A(pNameHref)
url = ''.join(('http://www.efeihu.com',url))
pid = url.split('/')[-1].split('.')[0]
adwords = infoSeg.find(name='p',attrs={'class':'promtn'}).getText()
t = infoSeg.find(name='span', attrs={'class':'price_e'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
t = infoSeg.find(name='span',attrs={'class':'price_del'})
if t != None:
pastPrice = ParserUtils.getPrice(t.getText())
else:
pastPrice = 0.00
evalNum = ParserUtils.getDigit(infoSeg.find(name='div',attrs={'class':'comment'}).a.getText())
imgUrlSeg = prod.find(name='a', attrs={'class':'img'})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl=url, imageUrl=imgUrl, privPrice=currentPrice,
pubPrice=pastPrice, name=pName, adWords=adwords,evaluateNum=evalNum)
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
plist = self.soup.find(name='ul',attrs={'id':'prolist'})
resultList = []
for prod in plist(name='li',attrs={'class':'m_pro'}):
prodDetail = self.__getSingleProdDetail(prod)
resultList.append(prodDetail)
return resultList
class EfeihuSort4PageParser(EfeihuSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:EfeihuAllSortParser, 3:EfeihuSort3PageParser, 4:EfeihuSort4PageParser}
''' test '''
import os,chardet
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import getContentFromUrlSum
def testAllSortPage():
fileName = os.path.join(testFilePath,'efeihu.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')
firstPage = EfeihuAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.efeihu.com/Products/89-0-0-0-0-0-40--1.html',
parentPath=[('test')], catagoryLevel=3)
content = getContentFromUrlSum(sort_3_urlsum)
sort3Page = EfeihuSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
testSort3Page()
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-28
@author: zhongfeng
'''
from crawlerhttp import crawle
from logfacade import LoggerFactory
from threadpool import ThreadPool, WorkRequest
from urlparse import urlparse
import os, sys
import time
import threading
from threading import stack_size
stack_size(32768 * 32)
class ObuySpider(object):
def __init__(self, rootUrlSummary=None, parserDict=None, threadNum=5,
procDetails=True, include=None, exclude=None, rootPageResult=None):
self.rootUrlSummary = rootUrlSummary
self.parserDict = parserDict
self.procDetails = procDetails #是否解析页面的详细信息
self.rootUrlSummary.include = include
self.rootUrlSummary.exclude = exclude
self.pool = ThreadPool(threadNum)
self.stat = dict()
self.rootPageResult = rootPageResult
def init_urls(self):
return self.putSpideRequest(self.rootUrlSummary)
def spide(self):
self.init_urls()
self.pool.wait()
self.__printStatResult()
def procSubUrlRequests(self,parser, result):
'''SubUrl 入队'''
parserResult = parser.parserSubUrlSums()
if parserResult is not None:
for subUrlSum in parserResult:
logger = LoggerFactory.getLogger()
if logger.isEnabledFor('DEBUG'):
logger.debug('SubUrlSum put Q: %s ,level: %s' \
% (subUrlSum.url, subUrlSum.catagoryLevel))
self.putSpideRequest(subUrlSum)
def procPageInfos(self,parser, urlsum):
'''解析页面的详细信息,例如product信息'''
resultList = parser.parserPageInfos()
if resultList is not None:
siteName = urlparse(urlsum.url).hostname.split('.')[1]
logger = LoggerFactory.getLogger(logName=siteName)
for parserResult in resultList:
if logger.isEnabledFor('INFO'):
logger.info(parserResult.logstr())
def putSpideRequest(self,urlsum):
req = WorkRequest(self.main_spide, [urlsum], None,
callback=None, exc_callback=self.handle_exception)
self.pool.putRequest(req)
def saveErrorPage(self,url, content):
curModDir = os.path.abspath(os.path.dirname(sys.argv[0]))
siteName = urlparse(url).hostname.split('.')[1]
curtDate = time.strftime("%Y-%m-%d")
errorFilePath = os.path.join(curModDir, 'error_page', siteName, curtDate)
if not os.path.exists(errorFilePath):
os.makedirs(errorFilePath)
curtime = time.strftime("%Y-%m-%d_%H-%M-%S")
fileName = '%s_%s.html' % (siteName, curtime)
fullPath = os.path.join(errorFilePath, fileName)
with open(fullPath, 'w') as output:
output.write(content)
def handle_exception(self,request, exc_info):
logger = LoggerFactory.getLogger()
logger.error("**** Exception occured in request #%s: %s,%s" %
(request.requestID, exc_info, request))
def proc_result(self,request, result):
pass
def reinqueue_proc(self,urlsum, result):
logger = LoggerFactory.getLogger()
if urlsum.retries > 0:
urlsum.retries = urlsum.retries - 1
logger.error("urlsum reinqueue:%s" % urlsum.url)
self.putSpideRequest(urlsum)
else:
urlsum.stat = result.code
logger.error("Failed %s:%d" % (urlsum.url, result.code))
def writeStat(self,stat, urlsum, retSize):
if retSize > 0:
stat[urlsum] = stat.get(urlsum, 0) + retSize
def procParserResult(self, result, urlsum, parser):
if urlsum.isRecursed:
self.procSubUrlRequests(parser, result)
if self.procDetails:
self.procPageInfos(parser, urlsum)
def proc_normal_result(self,reqArgs, result):
urlsum = reqArgs[0]
logger = LoggerFactory.getLogger()
if result.content == '':
self.reinqueue_proc(urlsum, result)
return
if result.code == 200:
#print "**** Result from request #%s: %d" % (urlsum.url, result.code)
ParserClass = self.parserDict.get(urlsum.catagoryLevel, None)
if ParserClass is None:
return
parser = ParserClass(result.content, urlsum, urlsum.include,
urlsum.exclude)
try:
self.procParserResult(result, urlsum, parser)
except Exception, e:
logger.error('ParserException.Reason:%s,URL:%s' % (e, urlsum.url))
self.saveErrorPage(urlsum.url, result.content)
else:
logger.error('Get From URL:%s Error code:' % (urlsum.url, result.code))
self.reinqueue_proc(urlsum, result)
def main_spide(self,*req):
urlsum = req[0]
logger = LoggerFactory.getLogger()
logger.info( "Q Size: %d|Name:%s |URL: %s" \
% (self.pool._requests_queue.qsize(),urlsum.name,urlsum.url))
#处理rootpage直接导入文件的方式
if urlsum.catagoryLevel == 0 and self.rootPageResult != None:
self.proc_normal_result(req, self.rootPageResult)
return self.rootPageResult
result = crawle(urlsum)
self.proc_normal_result(req, result)
return result
def __printStatResult(self):
for k, v in self.stat.iteritems():
print 'Catagory:%s,Num:%d' % (k.name, v)
from spiderconfigparser import getExcludeUrlSums,getIncludeUrlSums,SpiderConfig
from upload import fileUpload
def main(root, parserDict,SpiderClass = ObuySpider):
if SpiderConfig.isStartSpider():
includes = getIncludeUrlSums()
excludes = getExcludeUrlSums()
spider = SpiderClass(rootUrlSummary=root, parserDict=parserDict, include=includes,
exclude=excludes, threadNum = SpiderConfig.getThreadNum())
spider.spide()
LoggerFactory.shutdown()
if SpiderConfig.isUpload():
fileUpload()
if __name__ == '__main__':
logger = LoggerFactory.getLogger()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-9-24
@author: zhongfeng
'''
import re,os,sys
import chardet
from pageparser import ObuyUrlSummary
from utils import Singleton
from ConfigParser import ConfigParser, NoOptionError
def __getUrlSumsFromSection(section):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'urls.cfg')
print 'spider.cfg full path:%s' % fileName
urls = list()
if not os.path.exists(fileName):
return urls
regx = r'\[%s\]' % section
includeSecRegx = re.compile(regx)
otherSecRegx = re.compile(r'\[.*\]')
flag = False
with file(fileName) as inputFile:
for line in inputFile:
encoding = chardet.detect(line)['encoding']
line = line.decode(encoding,'ignore')
if (not flag) and includeSecRegx.match(line):
flag = True
elif flag:
if otherSecRegx.match(line):
break
if line.strip() != '':
line = ' '.join(line.split())
ret = line.split(',')
ret = [it.strip() for it in ret]
urlSumm = ObuyUrlSummary(name = ret[1],url = ret[0],catagoryLevel = int(ret[2]))
urls.append(urlSumm)
return urls
def getIncludeUrlSums():
return __getUrlSumsFromSection('include')
def getExcludeUrlSums():
return __getUrlSumsFromSection('exclude')
class SpiderConfig(Singleton):
@classmethod
def _init(cls):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'spider.conf')
cls.cf = ConfigParser()
cls.cf.read(fileName)
@classmethod
def getConfig(cls,option):
if not hasattr(cls, 'cf'):
cls._init()
try:
return cls.cf.get('conf',option)
except Exception:
pass
@classmethod
def getMaxPage(cls):
ret = cls.getConfig('max_page')
if ret:
return int(ret)
return 50
@classmethod
def getThreadNum(cls):
threadNum = cls.getConfig('thread_num')
if threadNum:
return int(threadNum)
return 10
@classmethod
def getProxy(cls):
return cls.getConfig('ftp_proxy')
@classmethod
def isStartSpider(cls):
flag = int(cls.getConfig('is_spider'))
if flag is not None:
return flag
return True
@classmethod
def isUpload(cls):
flag = int(cls.getConfig('is_upload'))
if flag is not None:
return flag
return True
if __name__ == '__main__':
print SpiderConfig.getMaxPage()
print SpiderConfig.getThreadNum()
print SpiderConfig.getProxy()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-29
@author: zhongfeng
'''
import os, time
import zipfile
from ftplib import FTP,error_perm
import sys,glob
from proxysock import setup_http_proxy
from spiderconfigparser import SpiderConfig
ip = '58.64.204.70'
port = '21'
username = '55bigo'
passwd = '55bigoadmin'
def rename(fFullName):
t = time.strftime("%Y-%m-%d",time.gmtime(os.path.getmtime(fFullName)))
print 'old file:%s' % fFullName
new = '.'.join((fFullName,t))
print 'new file:%s' % new
try:
if os.path.exists(new):
os.remove(new)
os.rename(fFullName, new)
except Exception ,e:
print e
raise e
return new
def fileUpload(remoteErrPath = r'/opt/errlog',remoteLogPath = r'/opt/log'):
proxy = SpiderConfig.getProxy()
if proxy :
proxyIp = proxy.split(':')[0]
proxyPort = proxy.split(':')[1]
setup_http_proxy(proxyIp,proxyPort)
curpath = os.path.abspath(os.path.dirname(sys.argv[0]))
ret = glob.glob1(curpath,r'*_spider.log')
siteName = None
if ret:
logFileName = ret[0]
siteName = logFileName.split('_')[0]
fullLogF = os.path.join(curpath,logFileName)
newLogFile = rename(fullLogF)
rlogPath = '/'.join((remoteLogPath,siteName))
ftpupload(newLogFile,rlogPath)
print 'upload success '
#移动到当前的log目录下
print 'backup >>>>>'
logDir = os.path.join(curpath,'log')
if not os.path.exists(logDir):
os.mkdir(logDir)
backUpLogPath = os.path.join(logDir,os.path.split(newLogFile)[-1])
print 'move %s to %s' % (newLogFile,backUpLogPath)
if os.path.exists(backUpLogPath):
os.remove(backUpLogPath)
os.rename(newLogFile, backUpLogPath)
errReport = os.path.join(curpath,r'err_report.log')
if siteName and os.path.exists(errReport):
newErrLogName = rename(errReport)
rerrLogPath = '/'.join((remoteErrPath,siteName))
ftpupload(newErrLogName,rerrLogPath)
#移动到当前的errlog目录下
errlogDir = os.path.join(curpath,'errlog')
if not os.path.exists(errlogDir):
os.mkdir(errlogDir)
backUpErrLogPath = os.path.join(errlogDir,os.path.split(newErrLogName)[-1])
print 'move %s to %s' % (newErrLogName,backUpErrLogPath)
if os.path.exists(backUpErrLogPath):
os.remove(backUpErrLogPath)
os.rename(newErrLogName, backUpErrLogPath)
def ftpupload(filename,remotePath):
ftp = FTP()
ftp.set_debuglevel(2)
ftp.connect(ip, port)
ftp.login(username, passwd)
try:
ftp.cwd(remotePath)
except:
try:
ftp.mkd(remotePath)
except error_perm:
print 'U have no authority to make dir'
bufsize = 8192
file_handler = open(filename, 'rb')
ftp.storbinary('STOR %s' % os.path.basename(filename), file_handler, bufsize)
ftp.set_debuglevel(0)
file_handler.close()
ftp.quit()
#===============================================================================
#
# class ZFile(object):
# def __init__(self, filename, mode='r', basedir=''):
# self.filename = filename
# self.mode = mode
# if self.mode in ('w', 'a'):
# self.zfile = zipfile.ZipFile(filename, self.mode, compression=zipfile.ZIP_DEFLATED)
# print self.zfile.filename
# else:
# self.zfile = zipfile.ZipFile(filename, self.mode)
# self.basedir = basedir
# if not self.basedir:
# self.basedir = os.path.dirname(filename)
#
# def addfile(self, path, arcname=None):
# path = path.replace('\\', '/')
# if not arcname:
# if path.startswith(self.basedir):
# arcname = path[len(self.basedir):]
# else:
# arcname = ''
# self.zfile.write(path, arcname)
#
# def addfiles(self, paths):
# for path in paths:
# if isinstance(path, tuple):
# self.addfile(*path)
# else:
# self.addfile(path)
#
# def close(self):
# self.zfile.close()
#
# def extract_to(self, path):
# for p in self.zfile.namelist():
# self.extract(p, path)
#
# def extract(self, filename, path):
# if not filename.endswith(''):
# f = os.path.join(path, filename)
# dir = os.path.dirname(f)
# if not os.path.exists(dir):
# os.makedirs(dir)
# file(f, 'wb').write(self.zfile.read(filename))
#
#
# def create(zfile, files):
# z = ZFile(zfile, 'w')
# z.addfiles(files)
# z.close()
#
# def extract(zfile, path):
# z = ZFile(zfile)
# z.extract_to(path)
# z.close()
#===============================================================================
if __name__ == '__main__':
fileUpload() | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
import Image
import re,itertools
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaAlgorithm(object):
'''captcha algorithm'''
def LevenshteinDistance(self, m, n):
c = [[i] for i in range(0, len(m) + 1)]
c[0] = [j for j in range(0, len(n) + 1)]
for i in range(0, len(m)):
for j in range(0, len(n)):
c[i + 1].append(
min(
c[i][j + 1] + 1,
c[i + 1][j] + 1,
c[i][j] + (0 if m[i] == n[j] else 1)
)
)
return c[-1][-1]
class CaptchaImageAlgorithm(object):
'''captcha image algorithm'''
@staticmethod
def GetPixelsXEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for x in xrange(xsize):
weight = sum(1 if pixels[x, y] == 0 else 0 for y in xrange(ysize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(x)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def GetPixelsYEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for y in xrange(ysize):
weight = sum(1 if pixels[x, y] == 0 else 0 for x in xrange(xsize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(y)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def StripYEdge(im):
yedges = CaptchaImageAlgorithm.GetPixelsYEdges(im)
y1, y2 = yedges[0][0], yedges[-1][1]
return im.crop((0, y1, im.size[0], y2))
@staticmethod
def GetBinaryMap(im):
xsize, ysize = im.size
pixels = im.load()
return '\n'.join(''.join('#' if pixels[x, y] == 0 else '_' for x in xrange(xsize)) for y in xrange(ysize))
@staticmethod
def getBitMapIn(im):
xsize, ysize = im.size
pixels = im.load()
return tuple( 0 if pixels[x, y] == 0 else 255 for x in xrange(xsize) for y in xrange(ysize))
class CaptchaProfile(object):
def __init__(self,features_map):
self.features_map = features_map
def __new__(cls,features_map):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.catagory_FEATURES_MAP__ = dict([(feature_to_data(key),value) for key,value in features_map.iteritems()])
cls._inst = super(CaptchaProfile, cls).__new__(cls)
return cls._inst
def match(self, im):
#st = time.time()
imageData = feature_to_data(CaptchaImageAlgorithm.GetBinaryMap(im))
result = self.catagory_FEATURES_MAP__.get(imageData,None)
if result != None:
return result
print CaptchaImageAlgorithm.GetBinaryMap(im),'\n'
source = im.getdata()
algorithm = CaptchaAlgorithm()
minimal = min(self.features_map, key=lambda feature:algorithm.LevenshteinDistance(source, feature_to_data(feature)))
result = self.features_map[minimal]
self.catagory_FEATURES_MAP__[imageData] = result
return result
def filter(self, im):
return im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
def splitAgorim(self, im, top,bottom):
xsize, ysize = im.size
pixels = im.load()
zeroArr = []
for x in xrange(xsize):
flag = True
for y in xrange(ysize):
if pixels[x,y] != 255:
flag = False
break
if flag or x == 0:
zeroArr.append(x)
zeroArr = [(value - index ,value) for index,value in enumerate(zeroArr)]
retd = []
for key, group in itertools.groupby(zeroArr, lambda x: x[0]):
ret = [t[1] for t in group]
retd.append((ret[0],ret[-1]))
l = len(retd)
i = 0
dd = []
while i < l - 1 :
pre = retd[i][1] + 1
next = retd[i + 1][0]
# if 2 < next - pre < 7:
# nPre = retd[i + 1][1]
# nNext = retd[i + 2][0]
# if 2 < nNext - nPre < 7:
# dd.append((pre,4,nNext,16))
# i = i + 2
# continue
# print (pre,4,next,16)
dd.append((pre,top,next,bottom))
i = i + 1
return dd
def split(self, im, top,bottom):
ddArr = self.splitAgorim(im, top, bottom)
return (im.crop(idt) for idt in ddArr[1:])
def feature_to_data(feature):
feature = re.sub(r'[\t\s]', '', feature)
feature = re.sub(r'[\r\n]', '', feature)
return tuple(0 if x == '#' else 255 for x in feature)
def captcha(filename, profile):
#s = time.time()
im = Image.open(filename)
#s2 = time.time()
#print 'open',s2-s
im = profile.filter(im)
#s3 = time.time()
#print 'filter',s3 - s2
im_list = profile.split(im)
#s4 = time.time()
#print 'split',s4 - s3
result = ''.join(profile.match(im) for im in im_list)
#print 'match',time.time() - s4
return result
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from BeautifulSoup import BeautifulSoup
from crawlerhttp import UrlSummary, CrawlerType, crawleRetries
from time import strftime
import chardet, re
from urlparse import urlparse
from threadpool import WorkRequest
from crawlerhttp import crawle
from cStringIO import StringIO
from itertools import chain
encodingDict = {'360buy':'gb2312', 'newegg':'gb2312', 'dangdang':'gb2312', 'gome':'utf-8',
'amazon':'utf-8', 'coo8':'gb2312', 'suning':'utf-8','egou':'GBK',}#'efeihu':'utf-8'}
def reinqueue_proc(req, result):
urlsum = req[0]
pool = req[3]
if urlsum.stat == 0:
urlsum.stat = result.code
req = WorkRequest(getProductPrice, req, None,
callback=None)
pool.putRequest(req)
else:
print "Failed %s:%d" % (urlsum.url, result.code)
def getProductPrice(*req):
pimgUrlSumm = req[0]
result = crawleRetries(pimgUrlSumm)
proc_normal_result(req, result)
return result
def proc_normal_result(req, result):
args = req
captcha = req[4]
if result.code == 200:
prodDetail = args[1]
resultList = args[2]
prodDetail.privPrice = captcha(StringIO(result.content))
resultList.append(prodDetail)
else:
reinqueue_proc(req, result)
class ObuyUrlSummary(UrlSummary):
'''
链接抽象类
'''
def __init__(self, url='', data=None, headers=None, crawlerType=CrawlerType.GET_URL, name='',
isCrawle=True, isRecursed=True, catagoryLevel=0, retries = 4, parentPath=None,parent = None,
stat=0, errReason='', include=None, exclude=None):
super(ObuyUrlSummary, self).__init__(url, data, headers, crawlerType,retries)
self.name = name #分类名称
self.catagoryLevel = catagoryLevel #分类级别
self.parentPath = [] if parentPath is None else parentPath #路径
self.parent = parent
self.isCrawle = isCrawle #是否抓取
self.isRecursed = isRecursed #是否递归抓取
self.stat = stat #抓取的最终状态
self.errReason = errReason #错误原因
self.include = None #subUrl中应该包含的url列表
self.exclude = None #subUrl中剔除的url列表,如果include,exclude同时设置,则include规则优先
def getUrlSumAbstract(self):
return self.name, self.url, self.catagoryLevel
def __str__(self):
return str(vars(self))
__repr__ = __str__
class ParserResult(object):
def logstr(self):
pass
def convertToUnicode(dataStr, siteName):
if isinstance(dataStr, str):
encoding = encodingDict.get(siteName, None)
if encoding is None:
encoding = chardet.detect(dataStr)['encoding']
encodingDict[siteName] = encoding
dataStr = dataStr.decode(encoding, 'ignore')
return dataStr
class Parser(object):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
self.rootUrlSummary = rootUrlSummary
self.include = include
self.exclude = exclude
siteName = urlparse(rootUrlSummary.url).hostname.split('.')[1]
self.dataStr = convertToUnicode(dataStr, siteName)
self.soup = BeautifulSoup(self.dataStr, convertEntities=BeautifulSoup.HTML_ENTITIES) #默认使用BeautifulSoup做解析器
@staticmethod
def compareUrlSumm(urla, urlb):
if urla.url != None and len(urla.url) > 0:
return urla.url == urlb.url
elif urla.name != None and len(urla.name) > 0:
return urla.name == urlb.name
else:
return False
@staticmethod
def urlSummContain(filterArr, finalUrlSum):
#print finalUrlSum.name,finalUrlSum.url
for urlsumm in filterArr:
#print urlsumm.name,urlsumm.url
if Parser.compareUrlSumm(urlsumm, finalUrlSum):
return True
else:
for parent in finalUrlSum.parentPath:
#print parent.name,parent.url
if Parser.compareUrlSumm(urlsumm, parent):
return True
return False
def filterUrlList(self, finalUrlList):
filterResult = finalUrlList
if self.include != None and len(self.include) > 0:
filterResult = [ finalUrlSum for finalUrlSum in finalUrlList
if Parser.urlSummContain(self.include, finalUrlSum)]
elif self.exclude != None and len(self.exclude) > 0:
filterResult = [ finalUrlSum for finalUrlSum in finalUrlList
if not Parser.urlSummContain(self.exclude, finalUrlSum)]
return filterResult
def parserPageInfos(self):
'''
返回ParserResult组成的list
'''
pass
def parserSubUrlSums(self):
pass
def getParser(level,parserDict):
return parserDict.get(level,None)
class ParserUtils(object):
'''
html标签解析类,return (name,url)
'''
@staticmethod
def parserTag_A(a):
return a.getText().strip(), a['href'].strip()
@staticmethod
def getPrice(sPrice):
if not sPrice:
return '0.00'
'''¥4899.00变为4899.00'''
sPrice = sPrice.replace(u',', '')
regx = u'[0-9]+.[0-9]+'
p = re.compile(regx)
ret = p.search(sPrice)
if ret is None:
return '0.00'
return ret.group()
@staticmethod
def getDigit(s):
s = s.replace(u',', '')
regx = u'[0-9]+.[0-9]+|[0-9]+'
p = re.compile(regx)
sd = p.search(s)
if sd is None:
return 0
return sd.group()
@staticmethod
def getImgUrl(imgTag):
if imgTag is None:
return ''
return imgTag.img['src']
class RootCatagoryPageParser(Parser):
'''
根站点分类解析父类,获取所有的三级分类的ObuyUrlSummary
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(RootCatagoryPageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def buildSort_N(self, url, name, parent, isCrawle=True,firstFinalPage = False):
'''
构造各级节点逻辑
'''
sort_n_urlsum = ObuyUrlSummary(url=url, name=name, isCrawle=isCrawle)
sort_n_urlsum.parentPath = []
sort_n_urlsum.catagoryLevel = parent.catagoryLevel + 1
sort_n_urlsum.parentPath.extend(parent.parentPath)
sort_n_urlsum.parentPath.append(parent)
if firstFinalPage:
sort_n_urlsum.parent = sort_n_urlsum
else:
sort_n_urlsum.parent = parent
return sort_n_urlsum
def getBaseSort3UrlSums(self):
pass
def parserSubUrlSums(self):
result = self.getBaseSort3UrlSums()
return self.filterUrlList(result)
class Sort3PageParser(Parser):
'''
三级页面解析类,
a.负责获取当前分类的所有的后续页面的UrlSummary
b.负责获取页面的所有商品的信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def buildSort_4(self, url):
sort4_urlsum = ObuyUrlSummary(url=url, name=self.rootUrlSummary.name,
catagoryLevel=4)
sort4_urlsum.parentPath = []
sort4_urlsum.parentPath.extend(self.rootUrlSummary.parentPath)
sort4_urlsum.parentPath.append(self.rootUrlSummary)
sort4_urlsum.parent = self.rootUrlSummary.parent
return sort4_urlsum
def getTotal(self):
pass
def nextPageUrlPattern(self):
pass
def buildSort_4UrlSums(self):
finalUrlList = []
totalPage = self.getTotal()
if totalPage > 1:
for pageNum in range(2, totalPage + 1):
url = self.nextPageUrlPattern().format(str(pageNum))
finalUrlList.append(self.buildSort_4(url))
return finalUrlList
def getSort4PageUrlSums(self):
return self.buildSort_4UrlSums()
def parserSubUrlSums(self):
result = self.getSort4PageUrlSums()
return self.filterUrlList(result)
def seEncode(ustr,encoding='gb18030'):
if ustr is None:
return ''
if isinstance(ustr,unicode):
return ustr.encode(encoding,'ignore')
else:
return str(ustr)
class ProductDetails(ParserResult):
'''
商品详细信息
'''
def __init__(self, name='', imageUrl='', productId='', catagory=None, fullUrl='', pubPrice='0.00',
privPrice='0.00', adWords='', reputation='0', evaluateNum='0', updateTime=None):
self.name = name #商品名称
self.imageUrl = imageUrl #商品图片URL
self.productId = productId #商品在原网站的ID
self.catagory = catagory #商品所属分类
self.fullUrl = fullUrl #原始链接
self.pubPrice = pubPrice #商品标称的原价
self.privPrice = privPrice #商家卖价,没扣除广告折扣价格
self.adWords = adWords #促销信息,包括下单立减、返劵等
self.reputation = reputation #好评度
self.evaluateNum = evaluateNum #评论数
self.updateTime = strftime("%Y-%m-%d %H:%M:%S") if updateTime is None else updateTime #更新时间
def __getCatagoryAbs(self):
cat = self.catagory.parent
if isinstance(cat, ObuyUrlSummary):
return str((seEncode(cat.url), cat.catagoryLevel))
else:
return ''
#return ','.join([str((seEncode(cat.url), cat.catagoryLevel)) for cat in chain(self.catagory.parentPath, (self.catagory,))])
def __filterStr(self,s):
return ' '.join(seEncode(s).replace('|', ' ').split())
def logstr(self):
return '|'.join(map(self.__filterStr, (self.productId, self.privPrice, self.updateTime, self.name, self.evaluateNum, self.reputation,
self.adWords,self.fullUrl, self.imageUrl, self.__getCatagoryAbs())))
def __str__(self):
return str(vars(self))
__repr__ = __str__
| Python |
#!/usr/bin/python
import socket
# Class that wraps a real socket and changes it to a HTTP tunnel whenever a connection is asked via the "connect" method
class ProxySock :
def __init__(self, socket, proxy_host, proxy_port) :
# First, use the socket, without any change
self.socket = socket
# Create socket (use real one)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
# Copy attributes
self.family = socket.family
self.type = socket.type
self.proto = socket.proto
def connect(self, address) :
# Store the real remote adress
(self.host, self.port) = address
# Try to connect to the proxy
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(
self.proxy_host,
self.proxy_port,
0, 0, socket.SOL_TCP) :
try:
# Replace the socket by a connection to the proxy
self.socket = socket.socket_formal(family, socktype, proto)
self.socket.connect(sockaddr)
except socket.error, msg:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket :
raise socket.error, ms
# Ask him to create a tunnel connection to the target host/port
self.socket.send(
("CONNECT %s:%d HTTP/1.1\r\n" +
"Host: %s:%d\r\n\r\n") % (self.host, self.port, self.host, self.port));
# Get the response
resp = self.socket.recv(4096)
# Parse the response
parts = resp.split()
# Not 200 ?
if parts[1] != "200" :
raise Exception("Error response from Proxy server : %s" % resp)
def __getattr__(self, name):
'''Automatically wrap methods and attributes for socket object.'''
return getattr(self.socket, name)
# Return the (host, port) of the actual target, not the proxy gateway
def getpeername(self) :
return (self.host, self.port)
# Install a proxy, by changing the method socket.socket()
def setup_http_proxy(proxy_host, proxy_port) :
# New socket constructor that returns a ProxySock, wrapping a real socket
def socket_proxy(af, socktype, proto) :
# Create a socket, old school :
sock = socket.socket_formal(af, socktype, proto)
# Wrap it within a proxy socket
return ProxySock(
sock,
proxy_host,
proxy_port)
# Replace the "socket" method by our custom one
socket.socket_formal = socket.socket
socket.socket = socket_proxy
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
GOME_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
##
##
'''
:
'.',
'''
______
_####_
##__##
##__##
##__##
##__##
##__##
##__##
##__##
_####_ '''
:
'0',
'''
__###__
_##_##_
##___##
##___##
##___##
##___##
##___##
##___##
_##_##_
__###__ '''
:
'0',
'''
__##__
####__
__##__
__##__
__##__
__##__
__##__
__##__
__##__
######
'''
:
'1',
'''
_####_
#___##
____##
____##
____##
___##_
__##__
_##___
##____
######
'''
:
'2',
'''
_#####_
#____##
_____##
____##_
_####__
____##_
_____##
_____##
#___##_
_####__
'''
:
'3',
'''
____##_
___###_
___###_
__#_##_
_#__##_
#___##_
#######
____##_
____##_
____##_
'''
:
'4',
'''
#######
#______
#______
#####__
____##_
_____##
_____##
_____##
#___##_
_####__
'''
:
'5',
'''
__####_
_##___#
_#_____
##_____
##_###_
###__##
##___##
##___##
_##__##
__####_
'''
:
'6',
'''
#######
_____##
____##_
____##_
___##__
___##__
__##___
__##___
_##____
_##____
'''
:
'7',
'''
_#####_
##___##
##___##
###_##_
_####__
_#####_
##__###
##___##
##___##
_#####_
'''
:
'8',
'''
_####__
##__##_
##___##
##___##
##__###
_###_##
_____##
_____##
#___##_
_####__
'''
:
'9',
} | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
国美价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from gome.gome_feature import GOME_FEATURES_MAP
import Image
import itertools
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_gome(CaptchaProfile):
def __init__(self,features_map = GOME_FEATURES_MAP):
super(CaptchaProfile_gome,self).__init__(features_map)
def __new__(cls,features_map = GOME_FEATURES_MAP):
return super(CaptchaProfile_gome, cls).__new__(cls,features_map)
def filter(self,im_raw):
pixdata_raw = im_raw.load()
imge_size = im_raw.size
im = Image.new('1',imge_size)
xsize,ysize = imge_size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (0,0,0,0):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
return im
def split(self, im,top = 6,bottom = 16):
return super(CaptchaProfile_gome,self).split(im,top,bottom)
def captcha_gome(filename):
return captcha(filename, CaptchaProfile_gome())
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
if __name__ == '__main__':
fileName = os.path.join(testFilePath, "0.png")
print captcha_gome(fileName)
im_raw = Image.open(fileName)
pixdata_raw = im_raw.load()
#r,g,b,a = im.split()
im = im_raw.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
im = Image.new('1',im_raw.size)
xsize,ysize = im.size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (0,0,0,0):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
print CaptchaImageAlgorithm.GetBinaryMap(im)
# it1 = im.crop((3, 4, 13, 16))
# print cia.GetBinaryMap(it1),'\n'
# it2 = im.crop((15,4,24,16))
# print cia.GetBinaryMap(it2)
# print '+++++++++'
# it2 = im.crop((25, 4, 34, 16))
# it3 = im.crop ((36,4,45,16))
# #it3 = im.crop((35, 4, 37, 16))
# it4 = im.crop((38, 4, 47, 16))
# it5 = im.crop((48, 4, 57, 16))
# #it6 = im.crop((51, 3, 57, 11))
# #it7 = im.crop((59, 3, 65, 11))
# multilist = [[0 for col in range(5)] for row in range(3)]
# print '\n'.join(( str(t) for t in multilist))
#profile = CaptchaProfile_360Buy()
#print captcha_360buy(r'c:\6.png')
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-20
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from threadpool import ThreadPool, WorkRequest
from cStringIO import StringIO
from gome.image_price import captcha_gome
from spiderconfigparser import SpiderConfig
gomeRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/allSort.html',name='gome',
isRecursed = True,catagoryLevel = 0)
class GomeAllSortParser(RootCatagoryPageParser):
'''
从http://www.gome.com.cn/allSort.html获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.gome.com.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs = {'class':'alcatBox'})
for t in allSort.findAll(name = 'div',attrs = {'class':'listBox'}):#一级
for aSeg in t.findAll(name = 'a'):
try:
name,url = ParserUtils.parserTag_A(aSeg)
#url = ''.join((self.mainHost,url))
except Exception,e:
pass
#print e
else:
#print name,url
sort_3_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage=True)
sort_3_urlsum.catagoryLevel = 3
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class GomeSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
pricePageNum = 10
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-{}-4-1-sc_'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def nextPageUrlPattern1(self):
nextPat = 'http://search.gome.com.cn/product.do?topCtgyId=%s&order=%s&ctgyId=%s&p={}&ctgLevel=3&scopes='
urlSegs = self.rootUrlSummary.url.rsplit('/',1)[-1].split('.')[0].split('-')
#urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
#pageSeg = '&order=3&scopes=&p={}'
#return '%s%s' % (self.rootUrlSummary.url,pageSeg)
return nextPat % (urlSegs[0],urlSegs[2],urlSegs[1])
def getTotal(self):
toolSeg = self.soup.find(name='div',attrs={'class':'listNav'})
if toolSeg is None:
return 0
pageSeg = toolSeg.find(attrs={'class':'fr'}).getText()
totalPage = int(pageSeg.split('/')[-1])
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def getAdwordsDict(self):
regx =ur'''var d =\[\{name0:'(.*?)',.*?\$\('#promImg_([0-9]+)'\)'''
p = re.compile(regx,re.DOTALL)
idAdDict = {}
for match in p.finditer(self.dataStr):
idAdDict[match.group(2)] = match.group(1)
return idAdDict
def parserPageInfos(self):
pTipsSeg = self.soup.find(name='div', attrs={'class':'listNav'}).find(name='div',attrs={'class':'tips'})
resultList = []
if pTipsSeg is None:
raise Exception("Page Error")
return resultList
try:
pool = ThreadPool(self.pricePageNum)
plist = pTipsSeg.findNextSibling(name='ul')
idAdDict = self.getAdwordsDict()
for li in plist(name='li'):
pName,url = ParserUtils.parserTag_A(li.find(name='div', attrs={'class':'title'}).a)
imgUrl = li.find(name='div',attrs={'class':'pic'}).img['gome-src']
#repuSeg = li.find(name='div',attrs={'class':'extra'}).div['class']
#reputation = ParserUtils.getDigit(repuSeg)
#adWordsSeg = li.find(name='span',attrs={'id':re.compile(r'promImg_[0-9]+')}).find(name = 'img')
#if adWordsSeg:
# adWords = adWordsSeg['title']
# print adWords
pid = url.rsplit('/',1)[-1].split('.')[0]
if url and not url.startswith('http'):
url = ''.join((r'http://www.gome.com.cn',url))
priceImgUrlSeg = li.find(name='span',attrs={'class':'price'})
priceImgUrl = ParserUtils.getImgUrl(priceImgUrlSeg)
adWords = idAdDict.get(pid,'')
prodDetail = ProductDetails(productId=pid, name=pName, adWords=adWords,
imageUrl=imgUrl,fullUrl=url)
prodDetail.catagory = self.rootUrlSummary
pimgUrlSumm = ObuyUrlSummary(url = priceImgUrl)
req = WorkRequest(getProductPrice, [pimgUrlSumm, prodDetail, resultList, pool,captcha_gome], None,
callback=None)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=self.pricePageNum)
return resultList
class GomeSort4PageParser(GomeSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:GomeAllSortParser,3:GomeSort3PageParser,4:GomeSort4PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import crawle, crawleRetries
def testAllSortPage():
fileName = os.path.join(testFilePath,'AllSort.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.gome.com.cn/allSort.html', name='gome')
result = crawle(urlSum = rootUrlSum)
content = result.content
firstPage = GomeAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'gome_2011-12-24_22-37-57.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://search.gome.com.cn/product/10000000-10000073-3.html',
parentPath=[('test')], catagoryLevel=3)
#http://search.gome.com.cn/product.do?topCtgyId=10000000&order=3&ctgyId=10000070&p=2&ctgLevel=3&scopes=
#result = crawle(urlSum = sort_3_urlsum)
#content = result.content
sort3Page = GomeSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'gome_test.html')
#with open(fileName, 'r') as fInput:
# content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://search.gome.com.cn/product/10000004-10000057-3-1-4-2-sc_.html',
parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(urlSum = sort_3_urlsum)
content = result.content
with open(fileName, 'w') as fInput:
fInput.write(content)
sort3Page = GomeSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
def testRegx():
fileName = os.path.join(testFilePath,'gome_test.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
import chardet,re
content = content.decode('utf-8')
#print content
regx =ur'''var d =\[\{name0:'(.*?)',.*?\$\('#promImg_([0-9]+)'\)'''
p = re.compile(regx,re.DOTALL)
for match in p.finditer(content):
print match.group(2),match.group(1)
if __name__ == '__main__':
#testAllSortPage()
#testSort3Page()
testSort3Details()
testRegx()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from gome.gomepageparser import parserDict,gomeRoot
from spider import main
if __name__ == '__main__':
main(gomeRoot,parserDict) | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
国美价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from gome.gome_feature import GOME_FEATURES_MAP
import Image
import itertools
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_gome(CaptchaProfile):
def __init__(self,features_map = GOME_FEATURES_MAP):
super(CaptchaProfile_gome,self).__init__(features_map)
def __new__(cls,features_map = GOME_FEATURES_MAP):
return super(CaptchaProfile_gome, cls).__new__(cls,features_map)
def filter(self,im_raw):
pixdata_raw = im_raw.load()
imge_size = im_raw.size
im = Image.new('1',imge_size)
xsize,ysize = imge_size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (0,0,0,0):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
return im
def split(self, im,top = 6,bottom = 16):
return super(CaptchaProfile_gome,self).split(im,top,bottom)
def captcha_gome(filename):
return captcha(filename, CaptchaProfile_gome())
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
if __name__ == '__main__':
fileName = os.path.join(testFilePath, "0.png")
print captcha_gome(fileName)
im_raw = Image.open(fileName)
pixdata_raw = im_raw.load()
#r,g,b,a = im.split()
im = im_raw.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
im = Image.new('1',im_raw.size)
xsize,ysize = im.size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (0,0,0,0):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
print CaptchaImageAlgorithm.GetBinaryMap(im)
# it1 = im.crop((3, 4, 13, 16))
# print cia.GetBinaryMap(it1),'\n'
# it2 = im.crop((15,4,24,16))
# print cia.GetBinaryMap(it2)
# print '+++++++++'
# it2 = im.crop((25, 4, 34, 16))
# it3 = im.crop ((36,4,45,16))
# #it3 = im.crop((35, 4, 37, 16))
# it4 = im.crop((38, 4, 47, 16))
# it5 = im.crop((48, 4, 57, 16))
# #it6 = im.crop((51, 3, 57, 11))
# #it7 = im.crop((59, 3, 65, 11))
# multilist = [[0 for col in range(5)] for row in range(3)]
# print '\n'.join(( str(t) for t in multilist))
#profile = CaptchaProfile_360Buy()
#print captcha_360buy(r'c:\6.png')
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-20
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from threadpool import ThreadPool, WorkRequest
from cStringIO import StringIO
from gome.image_price import captcha_gome
from spiderconfigparser import SpiderConfig
gomeRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/allSort.html',name='gome',
isRecursed = True,catagoryLevel = 0)
class GomeAllSortParser(RootCatagoryPageParser):
'''
从http://www.gome.com.cn/allSort.html获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.gome.com.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs = {'class':'alcatBox'})
for t in allSort.findAll(name = 'div',attrs = {'class':'listBox'}):#一级
for aSeg in t.findAll(name = 'a'):
try:
name,url = ParserUtils.parserTag_A(aSeg)
#url = ''.join((self.mainHost,url))
except Exception,e:
pass
#print e
else:
#print name,url
sort_3_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage=True)
sort_3_urlsum.catagoryLevel = 3
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class GomeSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
pricePageNum = 10
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-{}-4-1-sc_'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def nextPageUrlPattern1(self):
nextPat = 'http://search.gome.com.cn/product.do?topCtgyId=%s&order=%s&ctgyId=%s&p={}&ctgLevel=3&scopes='
urlSegs = self.rootUrlSummary.url.rsplit('/',1)[-1].split('.')[0].split('-')
#urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
#pageSeg = '&order=3&scopes=&p={}'
#return '%s%s' % (self.rootUrlSummary.url,pageSeg)
return nextPat % (urlSegs[0],urlSegs[2],urlSegs[1])
def getTotal(self):
toolSeg = self.soup.find(name='div',attrs={'class':'listNav'})
if toolSeg is None:
return 0
pageSeg = toolSeg.find(attrs={'class':'fr'}).getText()
totalPage = int(pageSeg.split('/')[-1])
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def getAdwordsDict(self):
regx =ur'''var d =\[\{name0:'(.*?)',.*?\$\('#promImg_([0-9]+)'\)'''
p = re.compile(regx,re.DOTALL)
idAdDict = {}
for match in p.finditer(self.dataStr):
idAdDict[match.group(2)] = match.group(1)
return idAdDict
def parserPageInfos(self):
pTipsSeg = self.soup.find(name='div', attrs={'class':'listNav'}).find(name='div',attrs={'class':'tips'})
resultList = []
if pTipsSeg is None:
raise Exception("Page Error")
return resultList
try:
pool = ThreadPool(self.pricePageNum)
plist = pTipsSeg.findNextSibling(name='ul')
idAdDict = self.getAdwordsDict()
for li in plist(name='li'):
pName,url = ParserUtils.parserTag_A(li.find(name='div', attrs={'class':'title'}).a)
imgUrl = li.find(name='div',attrs={'class':'pic'}).img['gome-src']
#repuSeg = li.find(name='div',attrs={'class':'extra'}).div['class']
#reputation = ParserUtils.getDigit(repuSeg)
#adWordsSeg = li.find(name='span',attrs={'id':re.compile(r'promImg_[0-9]+')}).find(name = 'img')
#if adWordsSeg:
# adWords = adWordsSeg['title']
# print adWords
pid = url.rsplit('/',1)[-1].split('.')[0]
if url and not url.startswith('http'):
url = ''.join((r'http://www.gome.com.cn',url))
priceImgUrlSeg = li.find(name='span',attrs={'class':'price'})
priceImgUrl = ParserUtils.getImgUrl(priceImgUrlSeg)
adWords = idAdDict.get(pid,'')
prodDetail = ProductDetails(productId=pid, name=pName, adWords=adWords,
imageUrl=imgUrl,fullUrl=url)
prodDetail.catagory = self.rootUrlSummary
pimgUrlSumm = ObuyUrlSummary(url = priceImgUrl)
req = WorkRequest(getProductPrice, [pimgUrlSumm, prodDetail, resultList, pool,captcha_gome], None,
callback=None)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=self.pricePageNum)
return resultList
class GomeSort4PageParser(GomeSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:GomeAllSortParser,3:GomeSort3PageParser,4:GomeSort4PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import crawle, crawleRetries
def testAllSortPage():
fileName = os.path.join(testFilePath,'AllSort.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.gome.com.cn/allSort.html', name='gome')
result = crawle(urlSum = rootUrlSum)
content = result.content
firstPage = GomeAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'gome_2011-12-24_22-37-57.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://search.gome.com.cn/product/10000000-10000073-3.html',
parentPath=[('test')], catagoryLevel=3)
#http://search.gome.com.cn/product.do?topCtgyId=10000000&order=3&ctgyId=10000070&p=2&ctgLevel=3&scopes=
#result = crawle(urlSum = sort_3_urlsum)
#content = result.content
sort3Page = GomeSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'gome_test.html')
#with open(fileName, 'r') as fInput:
# content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://search.gome.com.cn/product/10000004-10000057-3-1-4-2-sc_.html',
parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(urlSum = sort_3_urlsum)
content = result.content
with open(fileName, 'w') as fInput:
fInput.write(content)
sort3Page = GomeSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
def testRegx():
fileName = os.path.join(testFilePath,'gome_test.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
import chardet,re
content = content.decode('utf-8')
#print content
regx =ur'''var d =\[\{name0:'(.*?)',.*?\$\('#promImg_([0-9]+)'\)'''
p = re.compile(regx,re.DOTALL)
for match in p.finditer(content):
print match.group(2),match.group(1)
if __name__ == '__main__':
#testAllSortPage()
#testSort3Page()
testSort3Details()
testRegx()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from gome.gomepageparser import parserDict,gomeRoot
from spider import main
if __name__ == '__main__':
main(gomeRoot,parserDict) | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
GOME_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
##
##
'''
:
'.',
'''
______
_####_
##__##
##__##
##__##
##__##
##__##
##__##
##__##
_####_ '''
:
'0',
'''
__###__
_##_##_
##___##
##___##
##___##
##___##
##___##
##___##
_##_##_
__###__ '''
:
'0',
'''
__##__
####__
__##__
__##__
__##__
__##__
__##__
__##__
__##__
######
'''
:
'1',
'''
_####_
#___##
____##
____##
____##
___##_
__##__
_##___
##____
######
'''
:
'2',
'''
_#####_
#____##
_____##
____##_
_####__
____##_
_____##
_____##
#___##_
_####__
'''
:
'3',
'''
____##_
___###_
___###_
__#_##_
_#__##_
#___##_
#######
____##_
____##_
____##_
'''
:
'4',
'''
#######
#______
#______
#####__
____##_
_____##
_____##
_____##
#___##_
_####__
'''
:
'5',
'''
__####_
_##___#
_#_____
##_____
##_###_
###__##
##___##
##___##
_##__##
__####_
'''
:
'6',
'''
#######
_____##
____##_
____##_
___##__
___##__
__##___
__##___
_##____
_##____
'''
:
'7',
'''
_#####_
##___##
##___##
###_##_
_####__
_#####_
##__###
##___##
##___##
_#####_
'''
:
'8',
'''
_####__
##__##_
##___##
##___##
##__###
_###_##
_____##
_____##
#___##_
_####__
'''
:
'9',
} | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-25
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from spiderconfigparser import SpiderConfig
lusenRoot = ObuyUrlSummary(url=r'http://www.lusen.com/', name='lusen')
class LusenAllSortParser(RootCatagoryPageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
for t in self.soup.findAll(name='a',attrs={'class':'depth-1'}):#一级分类
name,url = ParserUtils.parserTag_A(t)
sort_3_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage=True)
sort_3_urlsum.catagoryLevel = 3
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class LusenSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
pageSeg = '--0--{}--index.html'
urlSeg = self.rootUrlSummary.url.rsplit('.',1)[0]
return '%s%s' % (urlSeg,pageSeg)
def getTotal(self):
s = self.soup.find(name='span',attrs = {'class':'pageall'})
if s is None:
pageNum = 1
else:
pageNum = s.getText()
totalPage = int(pageNum)
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def __getSingleProdDetail(self, prod):
pNameSeg = prod.find(attrs={'class':'goodinfo'})
pName, url = ParserUtils.parserTag_A(pNameSeg.a)
pid = prod['product']
t = prod.find(name='td', attrs={'class':'price_button'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
pastPrice = 0.00
imgUrlSeg = prod.find(name='td', attrs={'class':'goodpic'})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl=url, imageUrl=imgUrl, privPrice=currentPrice, pubPrice=pastPrice,
name=pName, adWords='')
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
resultList = []
listSeg = self.soup.findAll(name='div',attrs={'id':re.compile(r'pdt-[0-9]*')})
for prod in listSeg:
prodDetail = self.__getSingleProdDetail(prod)
resultList.append(prodDetail)
return resultList
class LusenSort4PageParser(LusenSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:LusenAllSortParser, 3:LusenSort3PageParser, 4:LusenSort4PageParser}
''' test '''
import os,chardet
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import getContentFromUrlSum
def testAllSortPage():
rootUrlSum = ObuyUrlSummary(url=r'http://www.lusen.com', name='lusen')
content = getContentFromUrlSum(rootUrlSum)
firstPage = LusenAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.lusen.com/gallery-175.html',
parentPath=[('test')], catagoryLevel=3)
content = getContentFromUrlSum(sort_3_urlsum)
sort3Page = LusenSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
for product in sort3Page.parserPageInfos():
print product.name
if __name__ == '__main__':
#testAllSortPage()
testSort3Page()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-25
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from spiderconfigparser import SpiderConfig
lusenRoot = ObuyUrlSummary(url=r'http://www.lusen.com/', name='lusen')
class LusenAllSortParser(RootCatagoryPageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
for t in self.soup.findAll(name='a',attrs={'class':'depth-1'}):#一级分类
name,url = ParserUtils.parserTag_A(t)
sort_3_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage=True)
sort_3_urlsum.catagoryLevel = 3
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class LusenSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
pageSeg = '--0--{}--index.html'
urlSeg = self.rootUrlSummary.url.rsplit('.',1)[0]
return '%s%s' % (urlSeg,pageSeg)
def getTotal(self):
s = self.soup.find(name='span',attrs = {'class':'pageall'})
if s is None:
pageNum = 1
else:
pageNum = s.getText()
totalPage = int(pageNum)
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def __getSingleProdDetail(self, prod):
pNameSeg = prod.find(attrs={'class':'goodinfo'})
pName, url = ParserUtils.parserTag_A(pNameSeg.a)
pid = prod['product']
t = prod.find(name='td', attrs={'class':'price_button'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
pastPrice = 0.00
imgUrlSeg = prod.find(name='td', attrs={'class':'goodpic'})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl=url, imageUrl=imgUrl, privPrice=currentPrice, pubPrice=pastPrice,
name=pName, adWords='')
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
resultList = []
listSeg = self.soup.findAll(name='div',attrs={'id':re.compile(r'pdt-[0-9]*')})
for prod in listSeg:
prodDetail = self.__getSingleProdDetail(prod)
resultList.append(prodDetail)
return resultList
class LusenSort4PageParser(LusenSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:LusenAllSortParser, 3:LusenSort3PageParser, 4:LusenSort4PageParser}
''' test '''
import os,chardet
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import getContentFromUrlSum
def testAllSortPage():
rootUrlSum = ObuyUrlSummary(url=r'http://www.lusen.com', name='lusen')
content = getContentFromUrlSum(rootUrlSum)
firstPage = LusenAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.lusen.com/gallery-175.html',
parentPath=[('test')], catagoryLevel=3)
content = getContentFromUrlSum(sort_3_urlsum)
sort3Page = LusenSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
for product in sort3Page.parserPageInfos():
print product.name
if __name__ == '__main__':
#testAllSortPage()
testSort3Page()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from lusen.lusenpageparser import parserDict,lusenRoot
from spider import main
if __name__ == '__main__':
main(lusenRoot,parserDict) | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from lusen.lusenpageparser import parserDict,lusenRoot
from spider import main
if __name__ == '__main__':
main(lusenRoot,parserDict) | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb
import os
import hashlib
#DB parameter
def update360buyRepu():
conn = getConnect()
qPage = MySQLQueryPagination(conn=conn, numPerPage=2000)
sqlStr = r'SELECT id,repu FROM `prod_base_info_3c` where site_id=6'
for result in qPage.queryForList(sql=sqlStr):
prodList = []
for prod in result:
id = prod[0]
repu = prod[1]
repu = repu * 5 / 100
print repu, id
prodList.append((repu, id))
print '+++'
batchUpdateProdBaseInfo(conn, prodList)
conn.close()
strHost = 'localhost'
strDB = 'bigo_db_new'
strUser = 'root'
strPasswd = ''
def seEncode(ustr, encoding='utf-8'):
'''负责把入数据库的字符串,转化成utf-8编码'''
if ustr is None:
return ''
if isinstance(ustr, unicode):
return ustr.encode(encoding, 'ignore')
elif isinstance(ustr, (list,tuple,set)):
return '[%s]' % ','.join([seEncode(s,encoding) for s in ustr])
return str(ustr)
#connect to DB
def getConnect(db=strDB, host=strHost, user=strUser, passwd=strPasswd, charset="utf8"):
return MySQLdb.connect(host=strHost, db=strDB, user=strUser, passwd=strPasswd, charset="utf8")
def initClientEncode(conn):
'''mysql client encoding=utf8'''
curs = conn.cursor()
curs.execute("SET NAMES utf8")
conn.commit()
return curs
class MySQLQueryPagination(object):
'''MySQL 分页类的实现'''
def __init__(self,conn,numPerPage = 20):
self.conn = conn
self.numPerPage = numPerPage
def queryForList(self,sql,param = None):
totalPageNum = self.__calTotalPages(sql,param)
for pageIndex in xrange(totalPageNum):
yield self.__queryEachPage(sql,pageIndex,param)
def __createPaginaionQuerySql(self,sql,currentPageIndex):
startIndex = self.__calStartIndex(currentPageIndex)
qSql = r'select * from (%s) total_table limit %s,%s' % (sql,startIndex,self.numPerPage)
return qSql
def __queryEachPage(self,sql,currentPageIndex,param = None):
curs = initClientEncode(self.conn)
qSql = self.__createPaginaionQuerySql(sql, currentPageIndex)
if param is None:
curs.execute(qSql)
else:
curs.execute(qSql,param)
result = curs.fetchall()
curs.close()
return result
def __calStartIndex(self,currentPageIndex):
startIndex = currentPageIndex * self.numPerPage;
return startIndex;
def __calTotalRowsNum(self,sql,param = None):
''' 计算总行数 '''
tSql = r'select count(*) from (%s) total_table' % sql
curs = initClientEncode(self.conn)
if param is None:
curs.execute(tSql)
else:
curs.execute(tSql,param)
result = curs.fetchone()
curs.close()
totalRowsNum = 0
if result != None:
totalRowsNum = int(result[0])
return totalRowsNum
def __calTotalPages(self,sql,param):
''' 计算总页数 '''
totalRowsNum = self.__calTotalRowsNum(sql,param)
totalPages = 0;
if (totalRowsNum % self.numPerPage) == 0:
totalPages = totalRowsNum / self.numPerPage;
else:
totalPages = (totalRowsNum / self.numPerPage) + 1
return totalPages
def __calLastIndex(self, totalRows, totalPages,currentPageIndex):
'''计算结束时候的索引'''
lastIndex = 0;
if totalRows < self.numPerPage:
lastIndex = totalRows;
elif ((totalRows % self.numPerPage == 0)
or (totalRows % self.numPerPage != 0 and currentPageIndex < totalPages)) :
lastIndex = currentPageIndex * self.numPerPage
elif (totalRows % self.numPerPage != 0 and currentPageIndex == totalPages): # 最后一页
lastIndex = totalRows
return lastIndex
#===============================================================================
# 表 `websit_base_info` db 操作
#===============================================================================
def getAllWebsiteBaseInfo():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT en_name,id FROM `websit_base_info` ' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `cat_base_config` db 操作
#===============================================================================
def getAllCatBaseConfig():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = '''SELECT main_cat_id, baseinfo_table_name, priceinfo_cur_table_name, priceinfo_his_table_name,
en_name FROM `cat_base_config`''' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_catagory` db proc
#===============================================================================
def saveProdCat(rawCatId, siteId, parentId, url, name, parentPath,
level, self_cat_id = 0,cat_base_id = 0 ):
''' 保存各站点的分类信息 '''
conn = getConnect()
curs = initClientEncode(conn)
if parentId == '':
print url
sqlStr = '''INSERT INTO `prod_catagory` (`raw_cat_id` ,`site_id` ,`parent_id` ,`url` ,`name` ,`parent_path` ,`level` , self_cat_id, cat_base_id,`update_time` )
VALUES ( %s, %s, %s, %s, %s, %s, %s, now()) '''
param = [seEncode(pt) for pt in (rawCatId, siteId, parentId, url, name, parentPath, level,self_cat_id, cat_base_id,)]
curs.execute(sqlStr, param)
curs.close()
conn.close()
return int(curs.lastrowid)
def match55bigoCats(site_id,name):
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStrPattern = '''SELECT id,site_id,self_cat_id,name,url,cat_base_id FROM `prod_catagory` where (site_id =9 or site_id=%s) AND LEVEL =3 AND name LIKE '%%{name}%%' ''' #生成sql语句
sqlStr = sqlStrPattern.format(name = name)
param = [site_id]
curs.execute(sqlStr,param)
result = curs.fetchall()
curs.close()
conn.close()
return result
def getCatIdFromRawCatID(raw_cat_id, site_id):
'''获取分类id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT id,self_cat_id,cat_base_id FROM `prod_catagory` where raw_cat_id = %s and site_id = %s'
param = (raw_cat_id, site_id)
curs.execute(sqlStr, param)
result = curs.fetchone()
curs.close()
conn.close()
if result != None:
return result
def getCatBySiteIdAndLevel(site_id,level):
'''获取分类id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT id ,name,parent_path FROM `prod_catagory` where site_id = %s and level = %s'
param = ( site_id,level)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_base_info` db proc
#===============================================================================
def saveProdBaseInfo(conn,site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time):
'''保存商品基本信息到表:prod_base_info'''
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_base_info` (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`, `cat_id` ,`u_time` )
VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s)'''
param = [seEncode(pt) for pt in (site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
return int(ret)
def batchSaveProdBaseInfo(conn, params):
'''批量保存商品基本信息到表:prod_base_info'''
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_base_info` (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`, `cat_id` ,`u_time` )
VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s)'''
wparams = list()
for param in params:
wparams.append([seEncode(pt) for pt in param])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def getProdId(conn, site_id, raw_id):
''' 获取prod_id '''
curs = initClientEncode(conn)
sqlStr = 'SELECT id FROM `prod_base_info` where site_id = %s and raw_id = %s'
param = (site_id, raw_id)
curs.execute(sqlStr, param)
result = curs.fetchone()
curs.close()
if result != None:
return result[0]
def getAllRawProdIdsBySite(site_id):
'''获取某一个站点的所有prod_id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT raw_id,id FROM `prod_base_info` where site_id = %s'
param = (site_id)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_price_info` db proc
#===============================================================================
def saveProdPriceInfo(prod_id, real_price, cur_price, diff_price, adwords, coupon=0.00, ex_gift='',
order_cut=0.00, crash_cut=0.00, m_price=0.00, trans_price=0.00, other_dis=0.00, u_time = None):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_price_info` (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price`,`other_dis` ,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time` ) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s,%s, %s, %s,%s)'''
#if isinstance(u_time, basestring):
# u_time = strptime(u_time,"%Y-%m-%d %H:%M:%S")
param = [seEncode(pt) for pt in (prod_id, real_price, cur_price, m_price, diff_price, trans_price, other_dis, adwords, coupon, ex_gift, order_cut, crash_cut,u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
conn.close()
return int(ret)
def batchSaveProdPriceInfo(conn, params):
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_price_info` (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price`,`other_dis` ,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time` ) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s,%s, %s, %s,%s)'''
wparams = list()
for param in params:
wparams.append([seEncode(pt) for pt in param])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def getProdPriceInfoFromProdId(conn, prod_id):
curs = initClientEncode(conn)
sqlStr = '''select real_price,cur_price,u_time,id from `prod_price_info` where prod_id=%s order by u_time DESC'''
param = (prod_id)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
if len(result) > 0:
return result
def getMd5Key(src):
m2 = hashlib.md5()
m2.update(src)
dest2 = int(m2.hexdigest(), 16)
return dest2
def getCatKey(url):
return str(getMd5Key(url))[0:16]
class SiteNameIDDict(object):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.t_site_dict = dict(getAllWebsiteBaseInfo())
cls._inst = super(SiteNameIDDict, cls).__new__(cls)
return cls._inst
def getSiteIdByName(self, siteName):
return self.t_site_dict[siteName]
def getSiteIdByName(siteName):
siteNameIDDict = SiteNameIDDict()
return siteNameIDDict.getSiteIdByName(siteName)
class ProdCatIdDict(object):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.t_cat_dict = dict()
cls._inst = super(ProdCatIdDict, cls).__new__(cls)
return cls._inst
def __getKey(self, siteId, rawCatId):
return '_'.join(map(str, (siteId, rawCatId)))
def getProdCatId(self, siteId, catUrl):
rawCatId = getCatKey(catUrl)
key = self.__getKey(siteId, rawCatId)
value = self.t_cat_dict.get(key, None)
if value is None:
value = getCatIdFromRawCatID(rawCatId, siteId)
self.t_cat_dict[key] = value
return value
def getCatIdFromRawInfo(siteId, catUrl):
catIdDict = ProdCatIdDict()
ret = catIdDict.getProdCatId(siteId, catUrl)
if ret :
return ret[0]
def getProdInfoRawIDMapId(siteId):
return dict(getAllRawProdIdsBySite(siteId))#key:raw_id value:id
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def test(siteName, encoding='gb2312'):
site_id = getSiteIdByName(siteName)
fileName = os.path.join(testFilePath, '360buy_spider.log')
conn = getConnect()
params = list()
i = 0
prodIds = set([ t[0] for t in getAllRawProdIdsBySite(site_id)])
print len(prodIds)
import re
p = re.compile('[0-9]+')
p1 = re.compile(u'下单')
fOutput = open('c:t360buy_jian.log', 'w')
with open(fileName, 'r') as fInput:
for line in fInput:
line = line.strip().decode(encoding, 'ignore')
ret = line.split('|')
raw_id = ret[0]
if p.search(ret[6]) != None and p1.search(ret[6]) != None:
fOutput.write(ret[0] + ' ' + ret[6] + '\n')
#if getProdId(site_id,raw_id) != None:
if raw_id in prodIds:
#print '++++++++++++++++++++++++++'
continue
prodIds.add(raw_id)
name = ret[3]
repu = ret[4]
eval_num = ret[5]
url = ret[7]
img_url = ret[8]
catUrl = eval(ret[-1])[0]
cat_id = getCatIdFromRawInfo(site_id, catUrl)
u_time = ret[2]
#print raw_id , name , url , repu, eval_num, img_url ,cat_id
param = (site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time)
if cat_id == '':
print param
params.append(param)
i = i + 1
if i == 100:
batchSaveProdBaseInfo(conn, params)
params = list()
i = 0
if i > 0:
batchSaveProdBaseInfo(conn, params)
del params
conn.close()
fOutput.close()
def batchUpdateProdBaseInfo(conn, prodList):
''' 批量更新商品基本信息到表 table_name'''
curs = initClientEncode(conn)
sqlPattern = '''update prod_base_info_3c set repu=%s where id = %s'''
#sqlStr = sqlPattern.format(table_name=table_name)
wparams = list()
for pBaseInfo in prodList:
wparams.append([seEncode(pt) for pt in pBaseInfo])
curs.executemany(sqlPattern, wparams)
conn.commit()
curs.close()
if __name__ == '__main__':
for t in match55bigoCats('笔记本'):
print seEncode(t)
| Python |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Created on 2011-9-19
@author: zhongfeng
'''
import os,re,sys
import hashlib
import operator
import shutil
from ConfigParser import ConfigParser
from decimal import Decimal
import MySQLdb
from pageparser import ParserUtils
#DB parameter
strHost = 'localhost'
strDB = 'bigo_db_new'
strUser = 'root'
strPasswd = ''
def seEncode(ustr, encoding='utf-8'):
'''负责把入数据库的字符串,转化成utf-8编码'''
if ustr is None:
return ''
if isinstance(ustr, unicode):
return ustr.encode(encoding, 'ignore')
elif isinstance(ustr, (list,tuple,set)):
return '[%s]' % ','.join([seEncode(s,encoding) for s in ustr])
return str(ustr)
def getDigit(s):
s = s.replace(u',', '')
regx = u'[0-9]+.[0-9]+|[0-9]+'
p = re.compile(regx)
sd = p.search(s)
if sd is None:
return 0
return sd.group()
#connect to DB
def getConnect(db=strDB, host=strHost, user=strUser, passwd=strPasswd, charset="utf8"):
return MySQLdb.connect(host=strHost, db=strDB, user=strUser, passwd=strPasswd, charset="utf8")
def initClientEncode(conn):
'''mysql client encoding=utf8'''
curs = conn.cursor()
curs.execute("SET NAMES utf8")
conn.commit()
return curs
#===============================================================================
# 基本vo类定义
#===============================================================================
class CatBaseConfig(object):
''' 表 cat_base_config 对应的vo class'''
def __init__(self,id,tProdInfo,tProdPriceCur,tProdPriceHis):
attrsFromDict(locals())
def __str__(self):
return str(vars(self))
__repr__ =__str__
class ProdBaseInfo(object):
'''商品基本信息表对应于数据里的prod_base_info_XX这样的表'''
__slots__ = ('site_id','raw_id','name','url','img_url','repu','eval_num',
'cat_id','self_cat_id','cat_base_id','u_time')
def __init__(self):
pass
def __str__(self):
return os.linesep.join([seEncode((name,getattr(self,name))) for name in self.__slots__])
__repr__ =__str__
class ProdPriceInfo(object):
'''商品基本信息表对应于数据里的prod_price_info_的表'''
__slots__ = ('id','prod_id','real_price','cur_price','m_price','diff_price','trans_price',
'other_dis','adwords','coupon','ex_gift','order_cut','crash_cut','u_time')
def __str__(self):
return os.linesep.join([seEncode((name,getattr(self,name))) for name in self.__slots__])
__repr__ =__str__
#===============================================================================
# 表 `websit_base_info` db 操作
#===============================================================================
def getAllWebsiteBaseInfo():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT en_name,id FROM `websit_base_info` ' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_db_stat` db 操作
#===============================================================================
def getProdStatInfo(site_id,cat_base_id):
'''各站点的数据更新状态'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT u_time FROM `prod_db_stat` where website_id=%s and cat_base_id=%s '
param = (site_id,cat_base_id)
curs.execute(sqlStr,param)
result = curs.fetchone()
curs.close()
conn.close()
if result:
return result[0]
def saveProdStat(site_id,cat_base_id,u_time):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'INSERT INTO `prod_db_stat`(`website_id`,`cat_base_id`,`u_time`) VALUES (%s,%s,%s)'
param = (site_id,cat_base_id,u_time)
curs.execute(sqlStr, param)
conn.commit()
curs.close()
def updateProdStat(site_id,cat_base_id,u_time):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'update `prod_db_stat` set u_time=%s where website_id=%s and cat_base_id=%s'
param = (u_time,site_id,cat_base_id)
curs.execute(sqlStr, param)
conn.commit()
curs.close()
#===============================================================================
# 表 `prod_catagory` db proc
#===============================================================================
def getCatIdFromRawCatID(raw_cat_id, site_id):
'''获取分类id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT id,self_cat_id,cat_base_id FROM `prod_catagory` where raw_cat_id = %s and site_id = %s'
param = (raw_cat_id, site_id)
curs.execute(sqlStr, param)
result = curs.fetchone()
curs.close()
conn.close()
if result != None:
return result
#===============================================================================
# 表 `cat_base_config` db 操作
#===============================================================================
def getAllCatBaseConfig():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = '''SELECT main_cat_id, baseinfo_table_name, priceinfo_cur_table_name, priceinfo_his_table_name,
en_name FROM `cat_base_config`''' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 商品基础信息入库函数,根据不同的类别保存进不同的表中
#===============================================================================
def saveProdBaseInfo(conn, table_name, pBaseInfo):
'''保存商品基本信息到表 table_name,'''
curs = initClientEncode(conn)
sqlPattern = '''INSERT INTO {table_name} (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`,
`cat_id` , `self_cat_id`,`u_time` ) VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s, %s)'''
sqlStr = sqlPattern.format(table_name=table_name)
param = [seEncode(pt) for pt in (pBaseInfo.site_id , pBaseInfo.raw_id , pBaseInfo.name , pBaseInfo.url ,
pBaseInfo.img_url , pBaseInfo.repu, pBaseInfo.eval_num, pBaseInfo.cat_id,
pBaseInfo.self_cat_id , pBaseInfo.u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
return int(ret)
def batchSaveProdBaseInfo(conn, table_name, prodList):
''' 批量保存商品基本信息到表 table_name'''
curs = initClientEncode(conn)
sqlPattern = '''INSERT INTO {table_name} (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`,
`cat_id` , `self_cat_id`,`u_time` ) VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s, %s)'''
sqlStr = sqlPattern.format(table_name=table_name)
wparams = list()
for pBaseInfo in prodList:
wparams.append([seEncode(pt) for pt in (pBaseInfo.site_id , pBaseInfo.raw_id , pBaseInfo.name , pBaseInfo.url ,
pBaseInfo.img_url , pBaseInfo.repu, pBaseInfo.eval_num, pBaseInfo.cat_id,
pBaseInfo.self_cat_id , pBaseInfo.u_time)])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def updateProdBaseInfo(conn, table_name, pBaseInfo):
'''更新基本信息table_name,'''
curs = initClientEncode(conn)
sqlPattern = '''update {table_name} set `repu`=%s, `eval_num`=%s, url=%s,img_url=%s where site_id=%s and raw_id=%s'''
sqlStr = sqlPattern.format(table_name=table_name)
param = [seEncode(pt) for pt in (pBaseInfo.repu, pBaseInfo.eval_num,pBaseInfo.url ,
pBaseInfo.img_url ,pBaseInfo.site_id,pBaseInfo.raw_id)]
curs.execute(sqlStr, param)
conn.commit()
curs.close()
def batchUpdateProdBaseInfo(conn, table_name, prodList):
''' 批量更新商品基本信息到表 table_name'''
curs = initClientEncode(conn)
sqlPattern = '''update {table_name} set `repu`=%s, `eval_num`=%s, url=%s,img_url=%s where site_id=%s and raw_id=%s'''
sqlStr = sqlPattern.format(table_name=table_name)
wparams = list()
for pBaseInfo in prodList:
wparams.append([seEncode(pt) for pt in (pBaseInfo.repu, pBaseInfo.eval_num,pBaseInfo.url,
pBaseInfo.img_url ,pBaseInfo.site_id,pBaseInfo.raw_id)])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def getAllRawProdIds(table_name, site_id):
'''在table_name中获取某一个站点的所有prod_id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlPattern = 'SELECT raw_id,id FROM {table_name} where site_id = %s'
sqlStr = sqlPattern.format(table_name=table_name)
param = (site_id)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 保存价格数据到各类别的cur_price里
#===============================================================================
def saveProdPriceInfo(conn, table_name, priceInfo):
curs = initClientEncode(conn)
sqlPattern = '''INSERT INTO {table_name} (`prod_id` ,`real_price` ,`cur_price` ,`diff_price`,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time` ,`u_flag`) VALUES
(%s, %s, %s,%s, %s, %s,%s, %s, %s,%s,1)'''
sqlStr = sqlPattern.format(table_name=table_name)
param = [seEncode(pt) for pt in (priceInfo.prod_id, priceInfo.real_price, priceInfo.cur_price,
priceInfo.diff_price, priceInfo.adwords, priceInfo.coupon, priceInfo.ex_gift,
priceInfo.order_cut,priceInfo.crash_cut, priceInfo.u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
return int(ret)
def updateProdPriceInfo(conn, table_name, priceInfo):
curs = initClientEncode(conn)
sqlPattern = ''' update {table_name} set `real_price`=%s ,`cur_price`=%s ,`diff_price`=%s, `adwords`=%s,`coupon`=%s ,`ex_gift`=%s ,
`order_cut`=%s ,`crash_cut`=%s ,`u_time`=%s ,`u_flag`=1 where prod_id = %s '''
sqlStr = sqlPattern.format(table_name=table_name)
param = [seEncode(pt) for pt in (priceInfo.real_price, priceInfo.cur_price,
priceInfo.diff_price, priceInfo.adwords, priceInfo.coupon, priceInfo.ex_gift,
priceInfo.order_cut,priceInfo.crash_cut, priceInfo.u_time,priceInfo.prod_id)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
return int(ret)
def batchSaveProdPriceInfo(conn, table_name , params):
curs = initClientEncode(conn)
sqlPattern = '''INSERT INTO {table_name} (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price`,`other_dis` ,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time`,,`u_flag` ) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s,%s, %s, %s,%s,1)'''
sqlStr = sqlPattern.format(table_name=table_name)
wparams = list()
for param in params:
wparams.append([seEncode(pt) for pt in param])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def savePriceInfo2HisTable(conn,his_table,cur_table,baseinfo_table,site_id):
curs = initClientEncode(conn)
try:
#>= date_format( %s, '%%Y-%%m-%%d' )
sqlPattern = ''' insert into {his_price_table} (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price` ,
`other_dis` ,`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut` ,`u_time` ) SELECT t1.`prod_id` , t1.`real_price` ,
t1.`cur_price` , t1.`m_price` , t1.`diff_price` , t1.`trans_price` , t1.`other_dis` , t1.`adwords` , t1.`coupon` ,
t1.`ex_gift` , t1.`order_cut` , t1.`crash_cut` , t1.`u_time`
FROM {cur_price_table} AS t1 JOIN {baseinfo_table} AS t2 ON t1.prod_id = t2.id
WHERE t1.u_flag = 1 AND t2.site_id =%s'''
sqlStr = sqlPattern.format(his_price_table=his_table,
cur_price_table=cur_table,baseinfo_table=baseinfo_table)
param = (site_id)
curs.execute(sqlStr,param)
sqlPat2 = ''' UPDATE {cur_price_table} SET `u_flag` = '0' WHERE `u_flag` = 1 '''
sqlStr2 = sqlPat2.format(cur_price_table=cur_table)
curs.execute(sqlStr2)
conn.commit()
except Exception:
conn.rollback()
finally:
curs.close()
def getCurPriceByProdId(conn, table_name , prod_id):
curs = initClientEncode(conn)
sqlPattern = ''' select real_price,cur_price from {table_name} where prod_id = %s '''
sqlStr = sqlPattern.format(table_name=table_name)
param = (prod_id)
curs.execute(sqlStr,param)
result = curs.fetchone()
curs.close()
return result
# exceptions
class LogFormatException(Exception):
"""All work requests have been processed."""
pass
def attrsFromDict(d):
self = d.pop('self')
for k,v in d.iteritems():
setattr(self, k, v)
class Singleton(object):
''' python 风格的单例模式 '''
def __new__(cls,*args,**kargs):
if '_inst' not in vars(cls):
cls._inst = super(Singleton,cls).__new__(cls, *args,**kargs)
return cls._inst
class SiteNameIDDictFactory(Singleton):
'''根据站点name获取对应的id,对应表web_site_info中的数据'''
_t_site_dict = None
@classmethod
def getSiteIdByName(cls, siteName):
if cls._t_site_dict is None:
cls._t_site_dict = dict(getAllWebsiteBaseInfo())
return cls._t_site_dict[siteName]
class CatBaseConfigDictFactory(Singleton):
''' Config表的列结构 ,main_cat_id, baseinfo_table_name,
priceinfo_cur_table_name, priceinfo_his_table_name,en_name '''
_t_cbconfig_dict = None
@classmethod
def _initDict(cls):
configs = getAllCatBaseConfig()
for c in configs:
cls._t_cbconfig_dict[str(c[0])] = c
@classmethod
def getConfigById(cls, catBaseId):
if cls._t_cbconfig_dict is None:
cls._t_cbconfig_dict = {}
cls._initDict()
return cls._t_cbconfig_dict[str(catBaseId)]
@classmethod
def getCatBaseConfig(cls,catBaseId):
config = cls.getConfigById(catBaseId)
return CatBaseConfig( catBaseId, config[1],config[2],config[3])
def getCatKey(url):
'''计算caturl的md5值'''
m2 = hashlib.md5()
m2.update(url)
dest2 = int(m2.hexdigest(), 16)
return str(dest2)[0:16]
class ProdCatDictFactory(Singleton):
'''字典类key为:siteId_rawCatId,value是 (id,self_cat_id,cat_base_id)'''
_t_cat_dict = {}
@classmethod
def __getKey(cls, siteId, rawCatId):
return '_'.join(map(str, (siteId, rawCatId)))
@classmethod
def getProdCatId(cls, siteId, catUrl):
rawCatId = getCatKey(catUrl)
key = cls.__getKey(siteId, rawCatId)
value = cls._t_cat_dict.get(key, None)
if value is None:
value = getCatIdFromRawCatID(rawCatId, siteId)
cls._t_cat_dict[key] = value
return value
class ProdBaseInfoBuilder(Singleton):
'''构建商品基本信息类'''
def _getCatIdFromRawInfo(self,site_id,catUrl):
return ProdCatDictFactory.getProdCatId(site_id, catUrl)
def getResult(self,linestr,site_id):
ret = linestr.split('|')
if len(ret) != 10:
return
prodBaseInfo = ProdBaseInfo()
prodBaseInfo.site_id = site_id
prodBaseInfo.raw_id = ret[0]
prodBaseInfo.u_time = ret[2]
prodBaseInfo.name = ret[3]
evalNum = ret[4]
if evalNum:
evalNum = evalNum.replace(u',','')
repu = float(ret[5])
if repu > 5.1:
repu = repu * 5 / 100
prodBaseInfo.repu = str(repu)
prodBaseInfo.eval_num = evalNum
prodBaseInfo.url = ret[7]
prodBaseInfo.img_url = ret[8]
catUrl = eval(ret[-1])[0]
ret = self._getCatIdFromRawInfo(site_id,catUrl)
if ret:
cat_id,self_cat_id,cat_base_id = ret
else:
#self.logger.info(' '.join([str(s) for s in (site_id,catUrl)]))
return
prodBaseInfo.cat_id = cat_id
prodBaseInfo.self_cat_id = self_cat_id
prodBaseInfo.cat_base_id = cat_base_id
return prodBaseInfo
#===============================================================================
# Product 基础信息入库处理
#===============================================================================
def __getLine(fileName, encoding='gb18030'):
''' 目前的日志文件存储的字符编码为gb18030'''
with open(fileName, 'r') as fInput:
for line in fInput:
yield line.strip().decode(encoding, 'ignore')
def __judgeDuplicateProd(prodIdsDict, prodBaseInfo):
'''判断一个商品在数据库里是否已经存在,重复返回True'''
if prodBaseInfo is None:
return True
raw_id = prodBaseInfo.raw_id
if raw_id in prodIdsDict:
return True
else:
prodIdsDict[raw_id] = '' #保证文件里的同一个商品只入库一次
return False
def __j360BuyFilterFunc(prodBaseInfo):
''''判断商品是否属于京东自营'''
return not str(getattr(prodBaseInfo,'raw_id','')).startswith('100')
filterFuncDict = {'6':__j360BuyFilterFunc}
def __getProdFilterFunc(site_id):
dfltfFunc = lambda x:True
return filterFuncDict.get(str(site_id),dfltfFunc);
def __chooseProd(prodBaseInfo,catBaseId):
'''过滤不符合规则的商品,由于目前是按类别入库的,入库的商品,
首先需要在此类别中,其次要满足一些其他的规则,例如,在京东中
过滤所有id为100开头的非自营商品'''
if prodBaseInfo is None:
return False
filterFunc = __getProdFilterFunc(prodBaseInfo.site_id)
if (catBaseId == getattr(prodBaseInfo,'cat_base_id',None)) and filterFunc(prodBaseInfo):
return True
return False
def __getExistProdRawId(table_name,siteId):
return dict(getAllRawProdIds(table_name,siteId))
def __singleProdLoad2Db(conn,line,site_id,catBaseId):
''' 商品基本信息入库 '''
#根据类别id获取表的名字(不同的大分类入不同的表中)
catBaseConfig = CatBaseConfigDictFactory.getCatBaseConfig(catBaseId)
prodBaseInfoTable = catBaseConfig.tProdInfo
builder = ProdBaseInfoBuilder()
prodBaseInfo = builder.getResult(line, site_id)
prodId = None
if prodBaseInfo and __chooseProd(prodBaseInfo, catBaseId):
prodId = saveProdBaseInfo(conn, prodBaseInfoTable, prodBaseInfo)
return prodId
def createProdBaseInfo(siteName, catBaseId, fileName):
''' 商品基本信息批量入库 '''
conn = getConnect()
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
#根据类别id获取表的名字(不同的大分类入不同的表中)
catBaseConfig = CatBaseConfigDictFactory.getCatBaseConfig(catBaseId)
prodBaseInfoTable = catBaseConfig.tProdInfo
prodIdsDict = __getExistProdRawId(prodBaseInfoTable,site_id)
prodList = []
for line in __getLine(fileName):
builder = ProdBaseInfoBuilder()
prodBaseInfo = builder.getResult(line, site_id)
if __chooseProd(prodBaseInfo, catBaseId) and \
not __judgeDuplicateProd(prodIdsDict, prodBaseInfo):
print prodBaseInfo.name,prodBaseInfo.raw_id
prodList.append(prodBaseInfo)
if len(prodList) == 200:
batchSaveProdBaseInfo(conn, prodBaseInfoTable, prodList)
prodList = []
if prodList:
batchUpdateProdBaseInfo(conn, prodBaseInfoTable, prodList)
conn.close()
#===============================================================================
# 价格信息处理
#===============================================================================
def wrapDecimal(priceStr):
priceStr = ''.join(priceStr.split())
return Decimal(str(priceStr))
class PriceInfoBuilder(object):
def __init__(self,line=''):
self.line = line
@staticmethod
def _calRealPrice(curPrice, coupon, orderCut, crashCut):
deIt = [wrapDecimal(p) for p in (curPrice, coupon, orderCut, crashCut)]
return str(float(reduce(operator.sub,deIt)))
def getAdwords(self):
return self.adWords
def setAdwords(self,adwords):
self.adWords = adwords
def getCoupon(self):
'''返券'''
return '0.00'
def getExGift(self):
'''赠品'''
return ''
def getOrderCut(self):
'''下单立减'''
return '0.00'
def getCrashCut(self):
'''返现'''
return '0.00'
def getSiteDefineFinalPrice(self):
'''网站标注的商品实际成交价(折算返券、赠品等等)'''
return '0.00'
def getRealPrice(self,curPrice):
'''商品计算后得到的价格'''
fPrice = self.getSiteDefineFinalPrice()
if fPrice != '0.00':
return fPrice
return PriceInfoBuilder._calRealPrice(curPrice,
self.getCoupon(),self.getOrderCut(),self.getCrashCut())
def getResult(self,line,prod_id):
ret = line.split('|')
if len(ret) != 10:
return
prodPriceInfo = ProdPriceInfo()
cur_price = ret[1].replace(' ','')
prodPriceInfo.cur_price = cur_price
prodPriceInfo.u_time = ret[2]
prodPriceInfo.adwords = ret[6]
self.setAdwords(ret[6])
prodPriceInfo.prod_id = prod_id
prodPriceInfo.coupon = self.getCoupon()
prodPriceInfo.crash_cut = self.getCrashCut()
prodPriceInfo.ex_gift = self.getExGift()
prodPriceInfo.order_cut = self.getOrderCut()
prodPriceInfo.real_price = self.getRealPrice(ret[1])
return prodPriceInfo
class J360PriceInfoBuilder(PriceInfoBuilder):
def getCoupon(self):
p1 = re.compile(ur'([0-9]+)元?京?券')
p2 = re.compile(ur'京?券([0-9]+)')
ret1 = p1.search(self.adWords)
ret = ret1 if ret1 != None else p2.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
def getExGift(self):
i = self.adWords.rfind(u'赠')#//送
if i != -1:
ret = self.adWords[i:].replace(u',', ' ').replace(u'!', ' ')
ti = ret.find(' ')
if ti != -1:
return ret[0:ti]
else:
return ret
else:
return ''
def getOrderCut(self):
p1 = re.compile(ur'下单.*减([0-9]+)')
p2 = re.compile(ur'下单直降([0-9]+)')
ret1 = p1.search(self.adWords)
ret = ret1 if ret1 != None else p2.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
def getCrashCut(self):
p1 = re.compile(ur'返现([0-9]+)')
ret1 = p1.search(self.adWords)
if ret1 != None:
return ret1.group(1)
else:
return '0.00'
def getSiteDefineFinalPrice(self):
p1 = re.compile(ur'相当于([0-9]+)')
p2 = re.compile(ur'成交价?([0-9]+)')
p3 = re.compile(ur'([0-9]+)元?成交')
ret1 = p1.search(self.adWords)
ret2 = ret1 if ret1 != None else p2.search(self.adWords)
ret = ret2 if ret2 != None else p3.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
class Coo8PriceInfoBuilder(PriceInfoBuilder):
def getExGift(self):
return self.adWords.split('@')[1]
def getCrashCut(self):
crashCutSeg = self.adWords.split('@')[0]
return getDigit(crashCutSeg)
class GomePriceInfoBuilder(PriceInfoBuilder):
def getCrashCut(self):
crashCutSeg = self.adWords
return getDigit(crashCutSeg)
IcsonPriceInfoBuilder = J360PriceInfoBuilder
NewEggPriceInfoBuilder = Coo8PriceInfoBuilder
class PriceInfoBuilderFactory(Singleton):
_builder_map = {'360buy':J360PriceInfoBuilder,'coo8':Coo8PriceInfoBuilder,
'gome':GomePriceInfoBuilder,'icson':IcsonPriceInfoBuilder,
'newegg':NewEggPriceInfoBuilder}
@classmethod
def getPriceBuilder(cls,websiteName):
return cls._builder_map.get(websiteName,PriceInfoBuilder)
def __getRawId(line):
ret = line.split('|')
if len(ret) != 10:
return
raw_id = ret[0]
return raw_id
def __getDateStrFromFileName(fName):
p = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2}')
ret = p.search(fName)
if ret != None:
return ret.group()
class LoaderConfig(object):
'''load data 配置'''
@classmethod
def _init(cls):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'load.conf')
cls.cf = ConfigParser()
cls.cf.read(fileName)
@classmethod
def getConfig(cls,option):
if not hasattr(cls, 'cf'):
cls._init()
try:
return cls.cf.get('conf',option)
except Exception:
pass
@classmethod
def getBaseDir(cls):
ret = cls.getConfig('base_dir')
if ret:
return ret
else:
raise Exception('No base_dir found')
@classmethod
def getBackupDir(cls):
ret = cls.getConfig('backup_dir')
if ret:
return ret
else:
raise Exception('No backup_dir found')
@classmethod
def getSitesSet(cls):
sites = cls.getConfig('sites')
if sites:
return set([site.strip() for site in sites.split(',')])
return set()
@classmethod
def getCatId(cls):
cat_id = cls.getConfig('cat_id')
if cat_id:
return int(cat_id)
def ld2CurPriceInfo(siteName,fileName,catBaseId,newProdSaveFlag = True):
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
#根据类别id获取表的名字(不同的大分类入不同的表中)
catBaseConfig = CatBaseConfigDictFactory.getCatBaseConfig(catBaseId)
priceCurTable = catBaseConfig.tProdPriceCur
prodIdDict = __getExistProdRawId(catBaseConfig.tProdInfo,site_id)
#raw_id_set = set()
conn = getConnect()
for line in __getLine(fileName):
raw_id = __getRawId(line)
if raw_id is None:# or raw_id in raw_id_set:
continue
#raw_id_set.add(raw_id)
prod_id = prodIdDict.get(raw_id, None)
if prod_id is None and newProdSaveFlag:
prod_id = __singleProdLoad2Db(conn,line,site_id,catBaseId)
#print 'insert new prod %s' % prod_id
if prod_id:#属于过滤掉的商品
prodIdDict[raw_id] = prod_id
if prod_id is None:
#print 'break raw_id is %s ' % raw_id
continue
PriceInfoBuilderClass = PriceInfoBuilderFactory.getPriceBuilder(siteName)
priceInfoBuilder = PriceInfoBuilderClass()
try:
priceInfo = priceInfoBuilder.getResult(line,prod_id)
except Exception ,e:
print e
continue
curPriceInfo = getCurPriceByProdId(conn,priceCurTable,prod_id)
if curPriceInfo:
real_price,cur_price = curPriceInfo
if wrapDecimal(real_price) != wrapDecimal(priceInfo.real_price) or \
wrapDecimal(cur_price) != wrapDecimal(priceInfo.cur_price):
diff_price = (wrapDecimal(real_price) - wrapDecimal(priceInfo.cur_price))
if wrapDecimal(priceInfo.real_price) < Decimal('0.1') or diff_price < Decimal('0.1'):
continue
priceInfo.diff_price = str(diff_price)
print curPriceInfo,priceInfo.prod_id,priceInfo.real_price,priceInfo.cur_price
updateProdPriceInfo(conn,priceCurTable,priceInfo)
#else:
#pass
#print 'no price change'
else:
priceInfo.diff_price = '0.00'
if wrapDecimal(priceInfo.real_price) > Decimal('0.1'):
print 'save raw_id : %s price is:%s ' % (raw_id,priceInfo.real_price)
saveProdPriceInfo(conn,priceCurTable,priceInfo)
conn.close()
def ld2HisPriceInfo(catBaseId, siteName):
''' 把新入库的数据导入历史价格表中 '''
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
catBaseConfig = CatBaseConfigDictFactory.getCatBaseConfig(catBaseId)
priceHisTable = catBaseConfig.tProdPriceHis
priceCurTable = catBaseConfig.tProdPriceCur
prodBaseInfoTable = catBaseConfig.tProdInfo
#raw_id_set = set()
conn = getConnect()
savePriceInfo2HisTable(conn,priceHisTable,priceCurTable,
prodBaseInfoTable,site_id)
conn.close()
def __isAlreadyLoadDb(siteName,catBaseId,dateStr):
'''只有比当前数据库中记录新的才可以入库,表prod_db_stat
中记录各个类别最新的数据'''
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
db_u_time = getProdStatInfo(site_id,catBaseId)
if db_u_time and dateStr <= db_u_time.strftime('%Y-%m-%d'):
return True
else:
return False
def updateProdStatLoadDb(siteName,catBaseId,dateStr):
''' 更新入库状态 '''
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
db_u_time = getProdStatInfo(site_id,catBaseId)
if db_u_time is None:
saveProdStat(site_id, catBaseId, dateStr)
elif dateStr > db_u_time.strftime('%Y-%m-%d'):
updateProdStat(site_id, catBaseId, dateStr)
def backuplogs(src,siteName):
dst_base = LoaderConfig.getBackupDir()
dst = os.path.join(dst_base,siteName)
if not os.path.isdir(dst):
os.mkdir(dst)
print 'moving file from %s to %s' % (src,dst)
shutil.move(src, dst)
def uProdBaseInfo(siteName, catBaseId, fileName):
''' 商品基本信息批量更新 '''
conn = getConnect()
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
#根据类别id获取表的名字(不同的大分类入不同的表中)
catBaseConfig = CatBaseConfigDictFactory.getCatBaseConfig(catBaseId)
prodBaseInfoTable = catBaseConfig.tProdInfo
#prodIdsDict = __getExistProdRawId(prodBaseInfoTable,site_id)
prodList = []
for line in __getLine(fileName):
builder = ProdBaseInfoBuilder()
prodBaseInfo = builder.getResult(line, site_id)
if prodBaseInfo:
print prodBaseInfo.name,prodBaseInfo.raw_id
prodList.append(prodBaseInfo)
if len(prodList) == 200:
batchUpdateProdBaseInfo(conn, prodBaseInfoTable, prodList)
prodList = []
if prodList:
batchUpdateProdBaseInfo(conn, prodBaseInfoTable, prodList)
conn.close()
def loadDataToDB(files,catBaseId,beginDate = None,endDate = None):
for fileName in files:#itertools.chain(files[1:],files[0:1]):
fName = os.path.split(fileName)[-1]
dateStr = __getDateStrFromFileName(fName)
if dateStr is None:
print 'file :%s ,no date str found.' % fileName
continue
if beginDate != None and dateStr != None and cmp(dateStr,beginDate) < 0:
continue
elif endDate != None and dateStr != None and cmp(dateStr,endDate) > 0:
break
siteName = fName.split('_')[0]
if siteName in LoaderConfig.getSitesSet():
print 'current proc file is :%s' % fileName
if __isAlreadyLoadDb(siteName, catBaseId, dateStr):
print '**** file :%s is already load to db *****' % fileName
backuplogs(fileName,siteName)
continue
try:
ld2CurPriceInfo(siteName, fileName, catBaseId)
ld2HisPriceInfo(catBaseId,siteName)
updateProdStatLoadDb(siteName,catBaseId,dateStr)
backuplogs(fileName,siteName)
except Exception ,e:
print e
else:
print 'site :%s is not in load set' % siteName
#===============================================================================
# module 测试
#===============================================================================
def testProdBaseInfoBuilder():
testStr = '''429958|1199.00|2011-09-21 20:42:47|TCL BCD-176K50 176升 两门 冰箱(银灰色)
|92|1471|直降400元!冰箱销量冠军!高效压缩机|http://www.360buy.com/product/429958.html||
('http://www.360buy.com/products/737-794-878.html', 3)'''
builder = ProdBaseInfoBuilder()
ret = builder.getResult(testStr,site_id = 6)
print ret
def testCatBaseConfigDictFactory():
for t in xrange(10):
print CatBaseConfigDictFactory.getCatBaseConfig(1)
import glob
def load(rootBase):
for rootDir in os.listdir(rootBase):
if rootDir.find('.') >= 0:
continue
rootDir = os.path.join(rootBase,rootDir)
if os.path.isdir(rootDir):
plog = r'%s%s*log*' % (rootDir,os.sep)
files = glob.glob(plog)
print files
loadDataToDB(files,catBaseId = LoaderConfig.getCatId())
if __name__ == '__main__':
#testProdBaseInfoBuilder()
#createProdBaseInfo(siteName = u'amazon', catBaseId = 1, fileName = r'F:\spider_exe-20111222\spider_exe\amazon\amazon_spider.log')
#uProdBaseInfo(siteName=u'coo8',catBaseId = 1,fileName=r'F:\python_workspace\exe\coo8\log\coo8_spider.log.2011-12-29')
#rootBase=r'I:/log'
load(rootBase = LoaderConfig.getBaseDir())
#ld2HisPriceInfo(1, '2011-11-13',2)
#createProdBaseInfo(siteName = '360buy',catBaseId = 1,
# fileName = r'I:\log\360buy\360buy_spider.log.2011-10-24')
| Python |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Created on 2011-9-19
@author: zhongfeng
'''
import re,os,sys,itertools,glob
from dbproc.basedbproc import *
from decimal import Decimal
def wrapDecimal(priceStr):
return Decimal(str(priceStr))
def calRealPrice(curPrice, coupon, orderCut, crashCut,extraCut):
return str(float(wrapDecimal(curPrice) - wrapDecimal(coupon) -
wrapDecimal(orderCut) - wrapDecimal(crashCut) - wrapDecimal(extraCut)))
class PriceInfoBuilder(object):
def __init__(self,adWords,curPrice = 0.00,extraCut = 0.00):
self.adWords = adWords
self.curPrice = curPrice
self.extraCut = extraCut
def getCoupon(self):
'''返券'''
return '0.00'
def getExGift(self):
'''赠品'''
return ''
def getOrderCut(self):
'''下单立减'''
return '0.00'
def getCrashCut(self):
'''返现'''
return '0.00'
def getCurPrice(self):
'''商品标价'''
return self.curPrice
def getSiteDefineFinalPrice(self):
'''网站标注的商品实际成交价(折算返券、赠品等等)'''
return '0.00'
def getRealPrice(self):
'''商品计算后得到的价格'''
fPrice = self.getSiteDefineFinalPrice()
if fPrice != '0.00':
return fPrice
return calRealPrice(self.curPrice ,self.getCoupon(),
self.getOrderCut(),self.getCrashCut(),self.extraCut)
class J360PriceInfoBuilder(PriceInfoBuilder):
def __init__(self, adWords, curPrice=0.00, extraCut=0.00):
PriceInfoBuilder.__init__(self, adWords, curPrice=curPrice, extraCut=extraCut)
def getCoupon(self):
p1 = re.compile(ur'([0-9]+)元?京?券')
p2 = re.compile(ur'京?券([0-9]+)')
ret1 = p1.search(self.adWords)
ret = ret1 if ret1 != None else p2.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
def getExGift(self):
i = self.adWords.rfind(u'赠')#//送
if i != -1:
ret = self.adWords[i:].replace(u',', ' ').replace(u'!', ' ')
ti = ret.find(' ')
if ti != -1:
return ret[0:ti]
else:
return ret
else:
return ''
def getOrderCut(self):
p1 = re.compile(ur'下单.*减([0-9]+)')
p2 = re.compile(ur'下单直降([0-9]+)')
ret1 = p1.search(self.adWords)
ret = ret1 if ret1 != None else p2.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
def getCrashCut(self):
p1 = re.compile(ur'返现([0-9]+)')
ret1 = p1.search(self.adWords)
if ret1 != None:
return ret1.group(1)
else:
return '0.00'
def getSiteDefineFinalPrice(self):
p1 = re.compile(ur'相当于([0-9]+)')
p2 = re.compile(ur'成交价?([0-9]+)')
p3 = re.compile(ur'([0-9]+)元?成交')
ret1 = p1.search(self.adWords)
ret2 = ret1 if ret1 != None else p2.search(self.adWords)
ret = ret2 if ret2 != None else p3.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
#===============================================================================
# Product 基础信息入库处理
#===============================================================================
def parserProdBaseInfo(site_id, prodIdsDict, line):
ret = line.split('|')
if len(ret) != 10: #格式列不对
return
raw_id = ret[0]
if prodIdsDict.get(raw_id,None) != None: #判断库里是否已经存在
return
prodIdsDict[raw_id] = 'exist' #保证文件里的同一个商品只入库一次
catUrl = eval(ret[-1])[0]
cat_id = getCatIdFromRawInfo(site_id, catUrl) #有问题,如果有新的类别加入,应该怎样?
name = ret[3]
repu = ret[4]
eval_num = ret[5]
url = ret[7]
img_url = ret[8]
u_time = ret[2]
param = (site_id, raw_id, name, url, img_url, repu,
eval_num, cat_id, u_time)
return param
def getLine(fileName,encoding='gb18030'):
''' 处理文件 '''
with open(fileName, 'r') as fInput:
for line in fInput:
yield line.strip().decode(encoding, 'ignore')
def createProdBaseInfo(siteName, fileName):
''' 商品基本信息批量入库 '''
site_id = getSiteIdByName(siteName)
conn = getConnect()
params = list()
i = 0
prodIdsDict = getProdInfoRawIDMapId(site_id)
for line in getLine(fileName):
param = parserProdBaseInfo(site_id, prodIdsDict, line)
if param is None:
continue
print 'create prod base info %s,%s' % (param[1],param[2])
params.append(param)
i = i + 1
if i == 200:
batchSaveProdBaseInfo(conn, params)
params = list()
i = 0
if i > 0:
batchSaveProdBaseInfo(conn, params)
del params
conn.close()
def procDbLatestPriceInfo(conn,prod_id,u_time,real_price,cur_price):
ret = getProdPriceInfoFromProdId(conn,prod_id=prod_id)
if ret is None:
return '0.00'
cur_date = u_time.split()[0]
date_list = [it[2].strftime("%Y-%m-%d") for it in ret]
for v in date_list:
if cmp(cur_date, v) == 0:#同一天的不重复入库,当一天多次抓的时候,会有问题
#print prod_id,u_time
return
# elif cmp(cur_date,v) < 0:
# retPrices = ret[i][0:2]
# break
if cmp(cur_date,ret[0][2].strftime("%Y-%m-%d")) < 0:
return
retPrices = ret[0][0:2]
if retPrices != None and wrapDecimal(real_price) == wrapDecimal(retPrices[0]) and wrapDecimal(cur_price) == wrapDecimal(retPrices[1]):
return
elif retPrices is None:
diff_price = 0.00
else:
diff_price = (wrapDecimal(real_price) - wrapDecimal(retPrices[0]))
return diff_price
def parserProdPriceInfo(conn,site_id, line, raw_id_set, prodIdsDict, BuilderClass):
ret = line.split('|')
if len(ret) != 10:
return
raw_id = ret[0]
if raw_id in raw_id_set:
return
raw_id_set.add(raw_id)
cur_price = ret[1]
if str(cur_price) == '0.00':
#print 'Price is 0.00 %s,' % line
return
u_time = ret[2]
adwords = ret[6]
builder = BuilderClass(adWords=adwords, curPrice=cur_price)
coupon = builder.getCoupon()
ex_gift = builder.getExGift()
order_cut = builder.getOrderCut()
crash_cut = builder.getCrashCut()
real_price = builder.getRealPrice()
prod_id = prodIdsDict.get(raw_id, None)
if prod_id is None:
#return
param = parserProdBaseInfo(site_id, prodIdsDict, line)
param_c = list()
param_c.append(conn)
param_c.extend(param)
prod_id = saveProdBaseInfo(*param_c)
prodIdsDict[raw_id] = prod_id
print 'new prod raw_id:%s,id:%s,name:%s' % (raw_id,prod_id,param[2])
diff_price = procDbLatestPriceInfo(conn,prod_id,u_time,real_price,cur_price)
if diff_price is None :
return
param = (prod_id, real_price, cur_price, 0.00, diff_price, 0.00, 0.00, adwords, coupon, ex_gift, order_cut, crash_cut, u_time)
return param
def createProdPriceInfo(siteName,fileName, PriceInfoBuilderClass = PriceInfoBuilder, encoding='gb2312'):
site_id = getSiteIdByName(siteName)
prodIdsDict = dict(getAllRawProdIdsBySite(site_id))#key:raw_id value:id
if len(prodIdsDict) == 0:
createProdBaseInfo(siteName,fileName,encoding)
prodIdsDict = dict(getAllRawProdIdsBySite(site_id))
i = 0
params = list()
raw_id_set = set()
conn = getConnect()
with open(fileName, 'r') as fInput:
for line in fInput:
line = line.strip().decode(encoding, 'ignore')
param = parserProdPriceInfo(conn,site_id, line, raw_id_set,
prodIdsDict, PriceInfoBuilderClass)
if param is None:
continue
print 'create prod price info %s,%s' % (param[0],param[4])
params.append(param)
i = i + 1
if i == 200:
batchSaveProdPriceInfo(conn, params)
params = list()
i = 0
if i > 0:
batchSaveProdPriceInfo(conn, params)
del params
#saveProdPriceInfo(conn, prod_id=prod_id, real_price=real_price, cur_price=cur_price,diff_price = 0.00, adwords=adwords,
# coupon=coupon, ex_gift=ex_gift,order_cut=order_cut, crash_cut=crash_cut)
conn.close()
def __getDateStrFromFileName(fName):
p = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2}')
ret = p.search(fName)
if ret != None:
return ret.group()
def loadDataToDB(files,beginDate = None,endDate = None):
for fileName in itertools.chain(files[1:],files[0:1]):
fName = os.path.split(fileName)[-1]
dateStr = __getDateStrFromFileName(fName)
if beginDate != None and dateStr != None and cmp(dateStr,beginDate) < 0:
continue
elif endDate != None and dateStr != None and cmp(dateStr,endDate) > 0:
break
siteName = fName.split('_')[0]
print fileName
if siteName == '360buy':
createProdPriceInfo(siteName, fileName, J360PriceInfoBuilder)
else:
createProdPriceInfo(siteName, fileName)
if __name__ == '__main__':
#createProdBaseInfo('amazon',r'I:\log\amazon\amazon_spider.log.2011-09-20')
rootBase = r'I:\log'
for rootDir in os.listdir(rootBase):
if rootDir.find('.') >= 0:
continue
rootDir = os.path.join(rootBase,rootDir)
if os.path.isdir(rootDir):
plog = r'%s%s*log*' % (rootDir,os.sep)
files = glob.glob(plog)
print files
loadDataToDB(files,beginDate = '2011-10-03',endDate='2011-10-03')
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb
import os
import hashlib
#DB parameter
def update360buyRepu():
conn = getConnect()
qPage = MySQLQueryPagination(conn=conn, numPerPage=2000)
sqlStr = r'SELECT id,repu FROM `prod_base_info_3c` where site_id=6'
for result in qPage.queryForList(sql=sqlStr):
prodList = []
for prod in result:
id = prod[0]
repu = prod[1]
repu = repu * 5 / 100
print repu, id
prodList.append((repu, id))
print '+++'
batchUpdateProdBaseInfo(conn, prodList)
conn.close()
strHost = 'localhost'
strDB = 'bigo_db_new'
strUser = 'root'
strPasswd = ''
def seEncode(ustr, encoding='utf-8'):
'''负责把入数据库的字符串,转化成utf-8编码'''
if ustr is None:
return ''
if isinstance(ustr, unicode):
return ustr.encode(encoding, 'ignore')
elif isinstance(ustr, (list,tuple,set)):
return '[%s]' % ','.join([seEncode(s,encoding) for s in ustr])
return str(ustr)
#connect to DB
def getConnect(db=strDB, host=strHost, user=strUser, passwd=strPasswd, charset="utf8"):
return MySQLdb.connect(host=strHost, db=strDB, user=strUser, passwd=strPasswd, charset="utf8")
def initClientEncode(conn):
'''mysql client encoding=utf8'''
curs = conn.cursor()
curs.execute("SET NAMES utf8")
conn.commit()
return curs
class MySQLQueryPagination(object):
'''MySQL 分页类的实现'''
def __init__(self,conn,numPerPage = 20):
self.conn = conn
self.numPerPage = numPerPage
def queryForList(self,sql,param = None):
totalPageNum = self.__calTotalPages(sql,param)
for pageIndex in xrange(totalPageNum):
yield self.__queryEachPage(sql,pageIndex,param)
def __createPaginaionQuerySql(self,sql,currentPageIndex):
startIndex = self.__calStartIndex(currentPageIndex)
qSql = r'select * from (%s) total_table limit %s,%s' % (sql,startIndex,self.numPerPage)
return qSql
def __queryEachPage(self,sql,currentPageIndex,param = None):
curs = initClientEncode(self.conn)
qSql = self.__createPaginaionQuerySql(sql, currentPageIndex)
if param is None:
curs.execute(qSql)
else:
curs.execute(qSql,param)
result = curs.fetchall()
curs.close()
return result
def __calStartIndex(self,currentPageIndex):
startIndex = currentPageIndex * self.numPerPage;
return startIndex;
def __calTotalRowsNum(self,sql,param = None):
''' 计算总行数 '''
tSql = r'select count(*) from (%s) total_table' % sql
curs = initClientEncode(self.conn)
if param is None:
curs.execute(tSql)
else:
curs.execute(tSql,param)
result = curs.fetchone()
curs.close()
totalRowsNum = 0
if result != None:
totalRowsNum = int(result[0])
return totalRowsNum
def __calTotalPages(self,sql,param):
''' 计算总页数 '''
totalRowsNum = self.__calTotalRowsNum(sql,param)
totalPages = 0;
if (totalRowsNum % self.numPerPage) == 0:
totalPages = totalRowsNum / self.numPerPage;
else:
totalPages = (totalRowsNum / self.numPerPage) + 1
return totalPages
def __calLastIndex(self, totalRows, totalPages,currentPageIndex):
'''计算结束时候的索引'''
lastIndex = 0;
if totalRows < self.numPerPage:
lastIndex = totalRows;
elif ((totalRows % self.numPerPage == 0)
or (totalRows % self.numPerPage != 0 and currentPageIndex < totalPages)) :
lastIndex = currentPageIndex * self.numPerPage
elif (totalRows % self.numPerPage != 0 and currentPageIndex == totalPages): # 最后一页
lastIndex = totalRows
return lastIndex
#===============================================================================
# 表 `websit_base_info` db 操作
#===============================================================================
def getAllWebsiteBaseInfo():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT en_name,id FROM `websit_base_info` ' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `cat_base_config` db 操作
#===============================================================================
def getAllCatBaseConfig():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = '''SELECT main_cat_id, baseinfo_table_name, priceinfo_cur_table_name, priceinfo_his_table_name,
en_name FROM `cat_base_config`''' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_catagory` db proc
#===============================================================================
def saveProdCat(rawCatId, siteId, parentId, url, name, parentPath,
level, self_cat_id = 0,cat_base_id = 0 ):
''' 保存各站点的分类信息 '''
conn = getConnect()
curs = initClientEncode(conn)
if parentId == '':
print url
sqlStr = '''INSERT INTO `prod_catagory` (`raw_cat_id` ,`site_id` ,`parent_id` ,`url` ,`name` ,`parent_path` ,`level` , self_cat_id, cat_base_id,`update_time` )
VALUES ( %s, %s, %s, %s, %s, %s, %s, now()) '''
param = [seEncode(pt) for pt in (rawCatId, siteId, parentId, url, name, parentPath, level,self_cat_id, cat_base_id,)]
curs.execute(sqlStr, param)
curs.close()
conn.close()
return int(curs.lastrowid)
def match55bigoCats(site_id,name):
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStrPattern = '''SELECT id,site_id,self_cat_id,name,url,cat_base_id FROM `prod_catagory` where (site_id =9 or site_id=%s) AND LEVEL =3 AND name LIKE '%%{name}%%' ''' #生成sql语句
sqlStr = sqlStrPattern.format(name = name)
param = [site_id]
curs.execute(sqlStr,param)
result = curs.fetchall()
curs.close()
conn.close()
return result
def getCatIdFromRawCatID(raw_cat_id, site_id):
'''获取分类id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT id,self_cat_id,cat_base_id FROM `prod_catagory` where raw_cat_id = %s and site_id = %s'
param = (raw_cat_id, site_id)
curs.execute(sqlStr, param)
result = curs.fetchone()
curs.close()
conn.close()
if result != None:
return result
def getCatBySiteIdAndLevel(site_id,level):
'''获取分类id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT id ,name,parent_path FROM `prod_catagory` where site_id = %s and level = %s'
param = ( site_id,level)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_base_info` db proc
#===============================================================================
def saveProdBaseInfo(conn,site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time):
'''保存商品基本信息到表:prod_base_info'''
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_base_info` (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`, `cat_id` ,`u_time` )
VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s)'''
param = [seEncode(pt) for pt in (site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
return int(ret)
def batchSaveProdBaseInfo(conn, params):
'''批量保存商品基本信息到表:prod_base_info'''
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_base_info` (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`, `cat_id` ,`u_time` )
VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s)'''
wparams = list()
for param in params:
wparams.append([seEncode(pt) for pt in param])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def getProdId(conn, site_id, raw_id):
''' 获取prod_id '''
curs = initClientEncode(conn)
sqlStr = 'SELECT id FROM `prod_base_info` where site_id = %s and raw_id = %s'
param = (site_id, raw_id)
curs.execute(sqlStr, param)
result = curs.fetchone()
curs.close()
if result != None:
return result[0]
def getAllRawProdIdsBySite(site_id):
'''获取某一个站点的所有prod_id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT raw_id,id FROM `prod_base_info` where site_id = %s'
param = (site_id)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_price_info` db proc
#===============================================================================
def saveProdPriceInfo(prod_id, real_price, cur_price, diff_price, adwords, coupon=0.00, ex_gift='',
order_cut=0.00, crash_cut=0.00, m_price=0.00, trans_price=0.00, other_dis=0.00, u_time = None):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_price_info` (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price`,`other_dis` ,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time` ) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s,%s, %s, %s,%s)'''
#if isinstance(u_time, basestring):
# u_time = strptime(u_time,"%Y-%m-%d %H:%M:%S")
param = [seEncode(pt) for pt in (prod_id, real_price, cur_price, m_price, diff_price, trans_price, other_dis, adwords, coupon, ex_gift, order_cut, crash_cut,u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
conn.close()
return int(ret)
def batchSaveProdPriceInfo(conn, params):
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_price_info` (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price`,`other_dis` ,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time` ) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s,%s, %s, %s,%s)'''
wparams = list()
for param in params:
wparams.append([seEncode(pt) for pt in param])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def getProdPriceInfoFromProdId(conn, prod_id):
curs = initClientEncode(conn)
sqlStr = '''select real_price,cur_price,u_time,id from `prod_price_info` where prod_id=%s order by u_time DESC'''
param = (prod_id)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
if len(result) > 0:
return result
def getMd5Key(src):
m2 = hashlib.md5()
m2.update(src)
dest2 = int(m2.hexdigest(), 16)
return dest2
def getCatKey(url):
return str(getMd5Key(url))[0:16]
class SiteNameIDDict(object):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.t_site_dict = dict(getAllWebsiteBaseInfo())
cls._inst = super(SiteNameIDDict, cls).__new__(cls)
return cls._inst
def getSiteIdByName(self, siteName):
return self.t_site_dict[siteName]
def getSiteIdByName(siteName):
siteNameIDDict = SiteNameIDDict()
return siteNameIDDict.getSiteIdByName(siteName)
class ProdCatIdDict(object):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.t_cat_dict = dict()
cls._inst = super(ProdCatIdDict, cls).__new__(cls)
return cls._inst
def __getKey(self, siteId, rawCatId):
return '_'.join(map(str, (siteId, rawCatId)))
def getProdCatId(self, siteId, catUrl):
rawCatId = getCatKey(catUrl)
key = self.__getKey(siteId, rawCatId)
value = self.t_cat_dict.get(key, None)
if value is None:
value = getCatIdFromRawCatID(rawCatId, siteId)
self.t_cat_dict[key] = value
return value
def getCatIdFromRawInfo(siteId, catUrl):
catIdDict = ProdCatIdDict()
ret = catIdDict.getProdCatId(siteId, catUrl)
if ret :
return ret[0]
def getProdInfoRawIDMapId(siteId):
return dict(getAllRawProdIdsBySite(siteId))#key:raw_id value:id
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def test(siteName, encoding='gb2312'):
site_id = getSiteIdByName(siteName)
fileName = os.path.join(testFilePath, '360buy_spider.log')
conn = getConnect()
params = list()
i = 0
prodIds = set([ t[0] for t in getAllRawProdIdsBySite(site_id)])
print len(prodIds)
import re
p = re.compile('[0-9]+')
p1 = re.compile(u'下单')
fOutput = open('c:t360buy_jian.log', 'w')
with open(fileName, 'r') as fInput:
for line in fInput:
line = line.strip().decode(encoding, 'ignore')
ret = line.split('|')
raw_id = ret[0]
if p.search(ret[6]) != None and p1.search(ret[6]) != None:
fOutput.write(ret[0] + ' ' + ret[6] + '\n')
#if getProdId(site_id,raw_id) != None:
if raw_id in prodIds:
#print '++++++++++++++++++++++++++'
continue
prodIds.add(raw_id)
name = ret[3]
repu = ret[4]
eval_num = ret[5]
url = ret[7]
img_url = ret[8]
catUrl = eval(ret[-1])[0]
cat_id = getCatIdFromRawInfo(site_id, catUrl)
u_time = ret[2]
#print raw_id , name , url , repu, eval_num, img_url ,cat_id
param = (site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time)
if cat_id == '':
print param
params.append(param)
i = i + 1
if i == 100:
batchSaveProdBaseInfo(conn, params)
params = list()
i = 0
if i > 0:
batchSaveProdBaseInfo(conn, params)
del params
conn.close()
fOutput.close()
def batchUpdateProdBaseInfo(conn, prodList):
''' 批量更新商品基本信息到表 table_name'''
curs = initClientEncode(conn)
sqlPattern = '''update prod_base_info_3c set repu=%s where id = %s'''
#sqlStr = sqlPattern.format(table_name=table_name)
wparams = list()
for pBaseInfo in prodList:
wparams.append([seEncode(pt) for pt in pBaseInfo])
curs.executemany(sqlPattern, wparams)
conn.commit()
curs.close()
if __name__ == '__main__':
for t in match55bigoCats('笔记本'):
print seEncode(t)
| Python |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Created on 2011-9-19
@author: zhongfeng
'''
import re,os,sys,itertools,glob
from dbproc.basedbproc import *
from decimal import Decimal
def wrapDecimal(priceStr):
return Decimal(str(priceStr))
def calRealPrice(curPrice, coupon, orderCut, crashCut,extraCut):
return str(float(wrapDecimal(curPrice) - wrapDecimal(coupon) -
wrapDecimal(orderCut) - wrapDecimal(crashCut) - wrapDecimal(extraCut)))
class PriceInfoBuilder(object):
def __init__(self,adWords,curPrice = 0.00,extraCut = 0.00):
self.adWords = adWords
self.curPrice = curPrice
self.extraCut = extraCut
def getCoupon(self):
'''返券'''
return '0.00'
def getExGift(self):
'''赠品'''
return ''
def getOrderCut(self):
'''下单立减'''
return '0.00'
def getCrashCut(self):
'''返现'''
return '0.00'
def getCurPrice(self):
'''商品标价'''
return self.curPrice
def getSiteDefineFinalPrice(self):
'''网站标注的商品实际成交价(折算返券、赠品等等)'''
return '0.00'
def getRealPrice(self):
'''商品计算后得到的价格'''
fPrice = self.getSiteDefineFinalPrice()
if fPrice != '0.00':
return fPrice
return calRealPrice(self.curPrice ,self.getCoupon(),
self.getOrderCut(),self.getCrashCut(),self.extraCut)
class J360PriceInfoBuilder(PriceInfoBuilder):
def __init__(self, adWords, curPrice=0.00, extraCut=0.00):
PriceInfoBuilder.__init__(self, adWords, curPrice=curPrice, extraCut=extraCut)
def getCoupon(self):
p1 = re.compile(ur'([0-9]+)元?京?券')
p2 = re.compile(ur'京?券([0-9]+)')
ret1 = p1.search(self.adWords)
ret = ret1 if ret1 != None else p2.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
def getExGift(self):
i = self.adWords.rfind(u'赠')#//送
if i != -1:
ret = self.adWords[i:].replace(u',', ' ').replace(u'!', ' ')
ti = ret.find(' ')
if ti != -1:
return ret[0:ti]
else:
return ret
else:
return ''
def getOrderCut(self):
p1 = re.compile(ur'下单.*减([0-9]+)')
p2 = re.compile(ur'下单直降([0-9]+)')
ret1 = p1.search(self.adWords)
ret = ret1 if ret1 != None else p2.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
def getCrashCut(self):
p1 = re.compile(ur'返现([0-9]+)')
ret1 = p1.search(self.adWords)
if ret1 != None:
return ret1.group(1)
else:
return '0.00'
def getSiteDefineFinalPrice(self):
p1 = re.compile(ur'相当于([0-9]+)')
p2 = re.compile(ur'成交价?([0-9]+)')
p3 = re.compile(ur'([0-9]+)元?成交')
ret1 = p1.search(self.adWords)
ret2 = ret1 if ret1 != None else p2.search(self.adWords)
ret = ret2 if ret2 != None else p3.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
#===============================================================================
# Product 基础信息入库处理
#===============================================================================
def parserProdBaseInfo(site_id, prodIdsDict, line):
ret = line.split('|')
if len(ret) != 10: #格式列不对
return
raw_id = ret[0]
if prodIdsDict.get(raw_id,None) != None: #判断库里是否已经存在
return
prodIdsDict[raw_id] = 'exist' #保证文件里的同一个商品只入库一次
catUrl = eval(ret[-1])[0]
cat_id = getCatIdFromRawInfo(site_id, catUrl) #有问题,如果有新的类别加入,应该怎样?
name = ret[3]
repu = ret[4]
eval_num = ret[5]
url = ret[7]
img_url = ret[8]
u_time = ret[2]
param = (site_id, raw_id, name, url, img_url, repu,
eval_num, cat_id, u_time)
return param
def getLine(fileName,encoding='gb18030'):
''' 处理文件 '''
with open(fileName, 'r') as fInput:
for line in fInput:
yield line.strip().decode(encoding, 'ignore')
def createProdBaseInfo(siteName, fileName):
''' 商品基本信息批量入库 '''
site_id = getSiteIdByName(siteName)
conn = getConnect()
params = list()
i = 0
prodIdsDict = getProdInfoRawIDMapId(site_id)
for line in getLine(fileName):
param = parserProdBaseInfo(site_id, prodIdsDict, line)
if param is None:
continue
print 'create prod base info %s,%s' % (param[1],param[2])
params.append(param)
i = i + 1
if i == 200:
batchSaveProdBaseInfo(conn, params)
params = list()
i = 0
if i > 0:
batchSaveProdBaseInfo(conn, params)
del params
conn.close()
def procDbLatestPriceInfo(conn,prod_id,u_time,real_price,cur_price):
ret = getProdPriceInfoFromProdId(conn,prod_id=prod_id)
if ret is None:
return '0.00'
cur_date = u_time.split()[0]
date_list = [it[2].strftime("%Y-%m-%d") for it in ret]
for v in date_list:
if cmp(cur_date, v) == 0:#同一天的不重复入库,当一天多次抓的时候,会有问题
#print prod_id,u_time
return
# elif cmp(cur_date,v) < 0:
# retPrices = ret[i][0:2]
# break
if cmp(cur_date,ret[0][2].strftime("%Y-%m-%d")) < 0:
return
retPrices = ret[0][0:2]
if retPrices != None and wrapDecimal(real_price) == wrapDecimal(retPrices[0]) and wrapDecimal(cur_price) == wrapDecimal(retPrices[1]):
return
elif retPrices is None:
diff_price = 0.00
else:
diff_price = (wrapDecimal(real_price) - wrapDecimal(retPrices[0]))
return diff_price
def parserProdPriceInfo(conn,site_id, line, raw_id_set, prodIdsDict, BuilderClass):
ret = line.split('|')
if len(ret) != 10:
return
raw_id = ret[0]
if raw_id in raw_id_set:
return
raw_id_set.add(raw_id)
cur_price = ret[1]
if str(cur_price) == '0.00':
#print 'Price is 0.00 %s,' % line
return
u_time = ret[2]
adwords = ret[6]
builder = BuilderClass(adWords=adwords, curPrice=cur_price)
coupon = builder.getCoupon()
ex_gift = builder.getExGift()
order_cut = builder.getOrderCut()
crash_cut = builder.getCrashCut()
real_price = builder.getRealPrice()
prod_id = prodIdsDict.get(raw_id, None)
if prod_id is None:
#return
param = parserProdBaseInfo(site_id, prodIdsDict, line)
param_c = list()
param_c.append(conn)
param_c.extend(param)
prod_id = saveProdBaseInfo(*param_c)
prodIdsDict[raw_id] = prod_id
print 'new prod raw_id:%s,id:%s,name:%s' % (raw_id,prod_id,param[2])
diff_price = procDbLatestPriceInfo(conn,prod_id,u_time,real_price,cur_price)
if diff_price is None :
return
param = (prod_id, real_price, cur_price, 0.00, diff_price, 0.00, 0.00, adwords, coupon, ex_gift, order_cut, crash_cut, u_time)
return param
def createProdPriceInfo(siteName,fileName, PriceInfoBuilderClass = PriceInfoBuilder, encoding='gb2312'):
site_id = getSiteIdByName(siteName)
prodIdsDict = dict(getAllRawProdIdsBySite(site_id))#key:raw_id value:id
if len(prodIdsDict) == 0:
createProdBaseInfo(siteName,fileName,encoding)
prodIdsDict = dict(getAllRawProdIdsBySite(site_id))
i = 0
params = list()
raw_id_set = set()
conn = getConnect()
with open(fileName, 'r') as fInput:
for line in fInput:
line = line.strip().decode(encoding, 'ignore')
param = parserProdPriceInfo(conn,site_id, line, raw_id_set,
prodIdsDict, PriceInfoBuilderClass)
if param is None:
continue
print 'create prod price info %s,%s' % (param[0],param[4])
params.append(param)
i = i + 1
if i == 200:
batchSaveProdPriceInfo(conn, params)
params = list()
i = 0
if i > 0:
batchSaveProdPriceInfo(conn, params)
del params
#saveProdPriceInfo(conn, prod_id=prod_id, real_price=real_price, cur_price=cur_price,diff_price = 0.00, adwords=adwords,
# coupon=coupon, ex_gift=ex_gift,order_cut=order_cut, crash_cut=crash_cut)
conn.close()
def __getDateStrFromFileName(fName):
p = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2}')
ret = p.search(fName)
if ret != None:
return ret.group()
def loadDataToDB(files,beginDate = None,endDate = None):
for fileName in itertools.chain(files[1:],files[0:1]):
fName = os.path.split(fileName)[-1]
dateStr = __getDateStrFromFileName(fName)
if beginDate != None and dateStr != None and cmp(dateStr,beginDate) < 0:
continue
elif endDate != None and dateStr != None and cmp(dateStr,endDate) > 0:
break
siteName = fName.split('_')[0]
print fileName
if siteName == '360buy':
createProdPriceInfo(siteName, fileName, J360PriceInfoBuilder)
else:
createProdPriceInfo(siteName, fileName)
if __name__ == '__main__':
#createProdBaseInfo('amazon',r'I:\log\amazon\amazon_spider.log.2011-09-20')
rootBase = r'I:\log'
for rootDir in os.listdir(rootBase):
if rootDir.find('.') >= 0:
continue
rootDir = os.path.join(rootBase,rootDir)
if os.path.isdir(rootDir):
plog = r'%s%s*log*' % (rootDir,os.sep)
files = glob.glob(plog)
print files
loadDataToDB(files,beginDate = '2011-10-03',endDate='2011-10-03')
| Python |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Created on 2011-9-19
@author: zhongfeng
'''
import os,re,sys
import hashlib
import operator
import shutil
from ConfigParser import ConfigParser
from decimal import Decimal
import MySQLdb
from pageparser import ParserUtils
#DB parameter
strHost = 'localhost'
strDB = 'bigo_db_new'
strUser = 'root'
strPasswd = ''
def seEncode(ustr, encoding='utf-8'):
'''负责把入数据库的字符串,转化成utf-8编码'''
if ustr is None:
return ''
if isinstance(ustr, unicode):
return ustr.encode(encoding, 'ignore')
elif isinstance(ustr, (list,tuple,set)):
return '[%s]' % ','.join([seEncode(s,encoding) for s in ustr])
return str(ustr)
def getDigit(s):
s = s.replace(u',', '')
regx = u'[0-9]+.[0-9]+|[0-9]+'
p = re.compile(regx)
sd = p.search(s)
if sd is None:
return 0
return sd.group()
#connect to DB
def getConnect(db=strDB, host=strHost, user=strUser, passwd=strPasswd, charset="utf8"):
return MySQLdb.connect(host=strHost, db=strDB, user=strUser, passwd=strPasswd, charset="utf8")
def initClientEncode(conn):
'''mysql client encoding=utf8'''
curs = conn.cursor()
curs.execute("SET NAMES utf8")
conn.commit()
return curs
#===============================================================================
# 基本vo类定义
#===============================================================================
class CatBaseConfig(object):
''' 表 cat_base_config 对应的vo class'''
def __init__(self,id,tProdInfo,tProdPriceCur,tProdPriceHis):
attrsFromDict(locals())
def __str__(self):
return str(vars(self))
__repr__ =__str__
class ProdBaseInfo(object):
'''商品基本信息表对应于数据里的prod_base_info_XX这样的表'''
__slots__ = ('site_id','raw_id','name','url','img_url','repu','eval_num',
'cat_id','self_cat_id','cat_base_id','u_time')
def __init__(self):
pass
def __str__(self):
return os.linesep.join([seEncode((name,getattr(self,name))) for name in self.__slots__])
__repr__ =__str__
class ProdPriceInfo(object):
'''商品基本信息表对应于数据里的prod_price_info_的表'''
__slots__ = ('id','prod_id','real_price','cur_price','m_price','diff_price','trans_price',
'other_dis','adwords','coupon','ex_gift','order_cut','crash_cut','u_time')
def __str__(self):
return os.linesep.join([seEncode((name,getattr(self,name))) for name in self.__slots__])
__repr__ =__str__
#===============================================================================
# 表 `websit_base_info` db 操作
#===============================================================================
def getAllWebsiteBaseInfo():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT en_name,id FROM `websit_base_info` ' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_db_stat` db 操作
#===============================================================================
def getProdStatInfo(site_id,cat_base_id):
'''各站点的数据更新状态'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT u_time FROM `prod_db_stat` where website_id=%s and cat_base_id=%s '
param = (site_id,cat_base_id)
curs.execute(sqlStr,param)
result = curs.fetchone()
curs.close()
conn.close()
if result:
return result[0]
def saveProdStat(site_id,cat_base_id,u_time):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'INSERT INTO `prod_db_stat`(`website_id`,`cat_base_id`,`u_time`) VALUES (%s,%s,%s)'
param = (site_id,cat_base_id,u_time)
curs.execute(sqlStr, param)
conn.commit()
curs.close()
def updateProdStat(site_id,cat_base_id,u_time):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'update `prod_db_stat` set u_time=%s where website_id=%s and cat_base_id=%s'
param = (u_time,site_id,cat_base_id)
curs.execute(sqlStr, param)
conn.commit()
curs.close()
#===============================================================================
# 表 `prod_catagory` db proc
#===============================================================================
def getCatIdFromRawCatID(raw_cat_id, site_id):
'''获取分类id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT id,self_cat_id,cat_base_id FROM `prod_catagory` where raw_cat_id = %s and site_id = %s'
param = (raw_cat_id, site_id)
curs.execute(sqlStr, param)
result = curs.fetchone()
curs.close()
conn.close()
if result != None:
return result
#===============================================================================
# 表 `cat_base_config` db 操作
#===============================================================================
def getAllCatBaseConfig():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = '''SELECT main_cat_id, baseinfo_table_name, priceinfo_cur_table_name, priceinfo_his_table_name,
en_name FROM `cat_base_config`''' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 商品基础信息入库函数,根据不同的类别保存进不同的表中
#===============================================================================
def saveProdBaseInfo(conn, table_name, pBaseInfo):
'''保存商品基本信息到表 table_name,'''
curs = initClientEncode(conn)
sqlPattern = '''INSERT INTO {table_name} (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`,
`cat_id` , `self_cat_id`,`u_time` ) VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s, %s)'''
sqlStr = sqlPattern.format(table_name=table_name)
param = [seEncode(pt) for pt in (pBaseInfo.site_id , pBaseInfo.raw_id , pBaseInfo.name , pBaseInfo.url ,
pBaseInfo.img_url , pBaseInfo.repu, pBaseInfo.eval_num, pBaseInfo.cat_id,
pBaseInfo.self_cat_id , pBaseInfo.u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
return int(ret)
def batchSaveProdBaseInfo(conn, table_name, prodList):
''' 批量保存商品基本信息到表 table_name'''
curs = initClientEncode(conn)
sqlPattern = '''INSERT INTO {table_name} (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`,
`cat_id` , `self_cat_id`,`u_time` ) VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s, %s)'''
sqlStr = sqlPattern.format(table_name=table_name)
wparams = list()
for pBaseInfo in prodList:
wparams.append([seEncode(pt) for pt in (pBaseInfo.site_id , pBaseInfo.raw_id , pBaseInfo.name , pBaseInfo.url ,
pBaseInfo.img_url , pBaseInfo.repu, pBaseInfo.eval_num, pBaseInfo.cat_id,
pBaseInfo.self_cat_id , pBaseInfo.u_time)])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def updateProdBaseInfo(conn, table_name, pBaseInfo):
'''更新基本信息table_name,'''
curs = initClientEncode(conn)
sqlPattern = '''update {table_name} set `repu`=%s, `eval_num`=%s, url=%s,img_url=%s where site_id=%s and raw_id=%s'''
sqlStr = sqlPattern.format(table_name=table_name)
param = [seEncode(pt) for pt in (pBaseInfo.repu, pBaseInfo.eval_num,pBaseInfo.url ,
pBaseInfo.img_url ,pBaseInfo.site_id,pBaseInfo.raw_id)]
curs.execute(sqlStr, param)
conn.commit()
curs.close()
def batchUpdateProdBaseInfo(conn, table_name, prodList):
''' 批量更新商品基本信息到表 table_name'''
curs = initClientEncode(conn)
sqlPattern = '''update {table_name} set `repu`=%s, `eval_num`=%s, url=%s,img_url=%s where site_id=%s and raw_id=%s'''
sqlStr = sqlPattern.format(table_name=table_name)
wparams = list()
for pBaseInfo in prodList:
wparams.append([seEncode(pt) for pt in (pBaseInfo.repu, pBaseInfo.eval_num,pBaseInfo.url,
pBaseInfo.img_url ,pBaseInfo.site_id,pBaseInfo.raw_id)])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def getAllRawProdIds(table_name, site_id):
'''在table_name中获取某一个站点的所有prod_id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlPattern = 'SELECT raw_id,id FROM {table_name} where site_id = %s'
sqlStr = sqlPattern.format(table_name=table_name)
param = (site_id)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 保存价格数据到各类别的cur_price里
#===============================================================================
def saveProdPriceInfo(conn, table_name, priceInfo):
curs = initClientEncode(conn)
sqlPattern = '''INSERT INTO {table_name} (`prod_id` ,`real_price` ,`cur_price` ,`diff_price`,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time` ,`u_flag`) VALUES
(%s, %s, %s,%s, %s, %s,%s, %s, %s,%s,1)'''
sqlStr = sqlPattern.format(table_name=table_name)
param = [seEncode(pt) for pt in (priceInfo.prod_id, priceInfo.real_price, priceInfo.cur_price,
priceInfo.diff_price, priceInfo.adwords, priceInfo.coupon, priceInfo.ex_gift,
priceInfo.order_cut,priceInfo.crash_cut, priceInfo.u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
return int(ret)
def updateProdPriceInfo(conn, table_name, priceInfo):
curs = initClientEncode(conn)
sqlPattern = ''' update {table_name} set `real_price`=%s ,`cur_price`=%s ,`diff_price`=%s, `adwords`=%s,`coupon`=%s ,`ex_gift`=%s ,
`order_cut`=%s ,`crash_cut`=%s ,`u_time`=%s ,`u_flag`=1 where prod_id = %s '''
sqlStr = sqlPattern.format(table_name=table_name)
param = [seEncode(pt) for pt in (priceInfo.real_price, priceInfo.cur_price,
priceInfo.diff_price, priceInfo.adwords, priceInfo.coupon, priceInfo.ex_gift,
priceInfo.order_cut,priceInfo.crash_cut, priceInfo.u_time,priceInfo.prod_id)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
return int(ret)
def batchSaveProdPriceInfo(conn, table_name , params):
curs = initClientEncode(conn)
sqlPattern = '''INSERT INTO {table_name} (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price`,`other_dis` ,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time`,,`u_flag` ) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s,%s, %s, %s,%s,1)'''
sqlStr = sqlPattern.format(table_name=table_name)
wparams = list()
for param in params:
wparams.append([seEncode(pt) for pt in param])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def savePriceInfo2HisTable(conn,his_table,cur_table,baseinfo_table,site_id):
curs = initClientEncode(conn)
try:
#>= date_format( %s, '%%Y-%%m-%%d' )
sqlPattern = ''' insert into {his_price_table} (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price` ,
`other_dis` ,`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut` ,`u_time` ) SELECT t1.`prod_id` , t1.`real_price` ,
t1.`cur_price` , t1.`m_price` , t1.`diff_price` , t1.`trans_price` , t1.`other_dis` , t1.`adwords` , t1.`coupon` ,
t1.`ex_gift` , t1.`order_cut` , t1.`crash_cut` , t1.`u_time`
FROM {cur_price_table} AS t1 JOIN {baseinfo_table} AS t2 ON t1.prod_id = t2.id
WHERE t1.u_flag = 1 AND t2.site_id =%s'''
sqlStr = sqlPattern.format(his_price_table=his_table,
cur_price_table=cur_table,baseinfo_table=baseinfo_table)
param = (site_id)
curs.execute(sqlStr,param)
sqlPat2 = ''' UPDATE {cur_price_table} SET `u_flag` = '0' WHERE `u_flag` = 1 '''
sqlStr2 = sqlPat2.format(cur_price_table=cur_table)
curs.execute(sqlStr2)
conn.commit()
except Exception:
conn.rollback()
finally:
curs.close()
def getCurPriceByProdId(conn, table_name , prod_id):
curs = initClientEncode(conn)
sqlPattern = ''' select real_price,cur_price from {table_name} where prod_id = %s '''
sqlStr = sqlPattern.format(table_name=table_name)
param = (prod_id)
curs.execute(sqlStr,param)
result = curs.fetchone()
curs.close()
return result
# exceptions
class LogFormatException(Exception):
"""All work requests have been processed."""
pass
def attrsFromDict(d):
self = d.pop('self')
for k,v in d.iteritems():
setattr(self, k, v)
class Singleton(object):
''' python 风格的单例模式 '''
def __new__(cls,*args,**kargs):
if '_inst' not in vars(cls):
cls._inst = super(Singleton,cls).__new__(cls, *args,**kargs)
return cls._inst
class SiteNameIDDictFactory(Singleton):
'''根据站点name获取对应的id,对应表web_site_info中的数据'''
_t_site_dict = None
@classmethod
def getSiteIdByName(cls, siteName):
if cls._t_site_dict is None:
cls._t_site_dict = dict(getAllWebsiteBaseInfo())
return cls._t_site_dict[siteName]
class CatBaseConfigDictFactory(Singleton):
''' Config表的列结构 ,main_cat_id, baseinfo_table_name,
priceinfo_cur_table_name, priceinfo_his_table_name,en_name '''
_t_cbconfig_dict = None
@classmethod
def _initDict(cls):
configs = getAllCatBaseConfig()
for c in configs:
cls._t_cbconfig_dict[str(c[0])] = c
@classmethod
def getConfigById(cls, catBaseId):
if cls._t_cbconfig_dict is None:
cls._t_cbconfig_dict = {}
cls._initDict()
return cls._t_cbconfig_dict[str(catBaseId)]
@classmethod
def getCatBaseConfig(cls,catBaseId):
config = cls.getConfigById(catBaseId)
return CatBaseConfig( catBaseId, config[1],config[2],config[3])
def getCatKey(url):
'''计算caturl的md5值'''
m2 = hashlib.md5()
m2.update(url)
dest2 = int(m2.hexdigest(), 16)
return str(dest2)[0:16]
class ProdCatDictFactory(Singleton):
'''字典类key为:siteId_rawCatId,value是 (id,self_cat_id,cat_base_id)'''
_t_cat_dict = {}
@classmethod
def __getKey(cls, siteId, rawCatId):
return '_'.join(map(str, (siteId, rawCatId)))
@classmethod
def getProdCatId(cls, siteId, catUrl):
rawCatId = getCatKey(catUrl)
key = cls.__getKey(siteId, rawCatId)
value = cls._t_cat_dict.get(key, None)
if value is None:
value = getCatIdFromRawCatID(rawCatId, siteId)
cls._t_cat_dict[key] = value
return value
class ProdBaseInfoBuilder(Singleton):
'''构建商品基本信息类'''
def _getCatIdFromRawInfo(self,site_id,catUrl):
return ProdCatDictFactory.getProdCatId(site_id, catUrl)
def getResult(self,linestr,site_id):
ret = linestr.split('|')
if len(ret) != 10:
return
prodBaseInfo = ProdBaseInfo()
prodBaseInfo.site_id = site_id
prodBaseInfo.raw_id = ret[0]
prodBaseInfo.u_time = ret[2]
prodBaseInfo.name = ret[3]
evalNum = ret[4]
if evalNum:
evalNum = evalNum.replace(u',','')
repu = float(ret[5])
if repu > 5.1:
repu = repu * 5 / 100
prodBaseInfo.repu = str(repu)
prodBaseInfo.eval_num = evalNum
prodBaseInfo.url = ret[7]
prodBaseInfo.img_url = ret[8]
catUrl = eval(ret[-1])[0]
ret = self._getCatIdFromRawInfo(site_id,catUrl)
if ret:
cat_id,self_cat_id,cat_base_id = ret
else:
#self.logger.info(' '.join([str(s) for s in (site_id,catUrl)]))
return
prodBaseInfo.cat_id = cat_id
prodBaseInfo.self_cat_id = self_cat_id
prodBaseInfo.cat_base_id = cat_base_id
return prodBaseInfo
#===============================================================================
# Product 基础信息入库处理
#===============================================================================
def __getLine(fileName, encoding='gb18030'):
''' 目前的日志文件存储的字符编码为gb18030'''
with open(fileName, 'r') as fInput:
for line in fInput:
yield line.strip().decode(encoding, 'ignore')
def __judgeDuplicateProd(prodIdsDict, prodBaseInfo):
'''判断一个商品在数据库里是否已经存在,重复返回True'''
if prodBaseInfo is None:
return True
raw_id = prodBaseInfo.raw_id
if raw_id in prodIdsDict:
return True
else:
prodIdsDict[raw_id] = '' #保证文件里的同一个商品只入库一次
return False
def __j360BuyFilterFunc(prodBaseInfo):
''''判断商品是否属于京东自营'''
return not str(getattr(prodBaseInfo,'raw_id','')).startswith('100')
filterFuncDict = {'6':__j360BuyFilterFunc}
def __getProdFilterFunc(site_id):
dfltfFunc = lambda x:True
return filterFuncDict.get(str(site_id),dfltfFunc);
def __chooseProd(prodBaseInfo,catBaseId):
'''过滤不符合规则的商品,由于目前是按类别入库的,入库的商品,
首先需要在此类别中,其次要满足一些其他的规则,例如,在京东中
过滤所有id为100开头的非自营商品'''
if prodBaseInfo is None:
return False
filterFunc = __getProdFilterFunc(prodBaseInfo.site_id)
if (catBaseId == getattr(prodBaseInfo,'cat_base_id',None)) and filterFunc(prodBaseInfo):
return True
return False
def __getExistProdRawId(table_name,siteId):
return dict(getAllRawProdIds(table_name,siteId))
def __singleProdLoad2Db(conn,line,site_id,catBaseId):
''' 商品基本信息入库 '''
#根据类别id获取表的名字(不同的大分类入不同的表中)
catBaseConfig = CatBaseConfigDictFactory.getCatBaseConfig(catBaseId)
prodBaseInfoTable = catBaseConfig.tProdInfo
builder = ProdBaseInfoBuilder()
prodBaseInfo = builder.getResult(line, site_id)
prodId = None
if prodBaseInfo and __chooseProd(prodBaseInfo, catBaseId):
prodId = saveProdBaseInfo(conn, prodBaseInfoTable, prodBaseInfo)
return prodId
def createProdBaseInfo(siteName, catBaseId, fileName):
''' 商品基本信息批量入库 '''
conn = getConnect()
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
#根据类别id获取表的名字(不同的大分类入不同的表中)
catBaseConfig = CatBaseConfigDictFactory.getCatBaseConfig(catBaseId)
prodBaseInfoTable = catBaseConfig.tProdInfo
prodIdsDict = __getExistProdRawId(prodBaseInfoTable,site_id)
prodList = []
for line in __getLine(fileName):
builder = ProdBaseInfoBuilder()
prodBaseInfo = builder.getResult(line, site_id)
if __chooseProd(prodBaseInfo, catBaseId) and \
not __judgeDuplicateProd(prodIdsDict, prodBaseInfo):
print prodBaseInfo.name,prodBaseInfo.raw_id
prodList.append(prodBaseInfo)
if len(prodList) == 200:
batchSaveProdBaseInfo(conn, prodBaseInfoTable, prodList)
prodList = []
if prodList:
batchUpdateProdBaseInfo(conn, prodBaseInfoTable, prodList)
conn.close()
#===============================================================================
# 价格信息处理
#===============================================================================
def wrapDecimal(priceStr):
priceStr = ''.join(priceStr.split())
return Decimal(str(priceStr))
class PriceInfoBuilder(object):
def __init__(self,line=''):
self.line = line
@staticmethod
def _calRealPrice(curPrice, coupon, orderCut, crashCut):
deIt = [wrapDecimal(p) for p in (curPrice, coupon, orderCut, crashCut)]
return str(float(reduce(operator.sub,deIt)))
def getAdwords(self):
return self.adWords
def setAdwords(self,adwords):
self.adWords = adwords
def getCoupon(self):
'''返券'''
return '0.00'
def getExGift(self):
'''赠品'''
return ''
def getOrderCut(self):
'''下单立减'''
return '0.00'
def getCrashCut(self):
'''返现'''
return '0.00'
def getSiteDefineFinalPrice(self):
'''网站标注的商品实际成交价(折算返券、赠品等等)'''
return '0.00'
def getRealPrice(self,curPrice):
'''商品计算后得到的价格'''
fPrice = self.getSiteDefineFinalPrice()
if fPrice != '0.00':
return fPrice
return PriceInfoBuilder._calRealPrice(curPrice,
self.getCoupon(),self.getOrderCut(),self.getCrashCut())
def getResult(self,line,prod_id):
ret = line.split('|')
if len(ret) != 10:
return
prodPriceInfo = ProdPriceInfo()
cur_price = ret[1].replace(' ','')
prodPriceInfo.cur_price = cur_price
prodPriceInfo.u_time = ret[2]
prodPriceInfo.adwords = ret[6]
self.setAdwords(ret[6])
prodPriceInfo.prod_id = prod_id
prodPriceInfo.coupon = self.getCoupon()
prodPriceInfo.crash_cut = self.getCrashCut()
prodPriceInfo.ex_gift = self.getExGift()
prodPriceInfo.order_cut = self.getOrderCut()
prodPriceInfo.real_price = self.getRealPrice(ret[1])
return prodPriceInfo
class J360PriceInfoBuilder(PriceInfoBuilder):
def getCoupon(self):
p1 = re.compile(ur'([0-9]+)元?京?券')
p2 = re.compile(ur'京?券([0-9]+)')
ret1 = p1.search(self.adWords)
ret = ret1 if ret1 != None else p2.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
def getExGift(self):
i = self.adWords.rfind(u'赠')#//送
if i != -1:
ret = self.adWords[i:].replace(u',', ' ').replace(u'!', ' ')
ti = ret.find(' ')
if ti != -1:
return ret[0:ti]
else:
return ret
else:
return ''
def getOrderCut(self):
p1 = re.compile(ur'下单.*减([0-9]+)')
p2 = re.compile(ur'下单直降([0-9]+)')
ret1 = p1.search(self.adWords)
ret = ret1 if ret1 != None else p2.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
def getCrashCut(self):
p1 = re.compile(ur'返现([0-9]+)')
ret1 = p1.search(self.adWords)
if ret1 != None:
return ret1.group(1)
else:
return '0.00'
def getSiteDefineFinalPrice(self):
p1 = re.compile(ur'相当于([0-9]+)')
p2 = re.compile(ur'成交价?([0-9]+)')
p3 = re.compile(ur'([0-9]+)元?成交')
ret1 = p1.search(self.adWords)
ret2 = ret1 if ret1 != None else p2.search(self.adWords)
ret = ret2 if ret2 != None else p3.search(self.adWords)
if ret != None:
return ret.group(1)
else:
return '0.00'
class Coo8PriceInfoBuilder(PriceInfoBuilder):
def getExGift(self):
return self.adWords.split('@')[1]
def getCrashCut(self):
crashCutSeg = self.adWords.split('@')[0]
return getDigit(crashCutSeg)
class GomePriceInfoBuilder(PriceInfoBuilder):
def getCrashCut(self):
crashCutSeg = self.adWords
return getDigit(crashCutSeg)
IcsonPriceInfoBuilder = J360PriceInfoBuilder
NewEggPriceInfoBuilder = Coo8PriceInfoBuilder
class PriceInfoBuilderFactory(Singleton):
_builder_map = {'360buy':J360PriceInfoBuilder,'coo8':Coo8PriceInfoBuilder,
'gome':GomePriceInfoBuilder,'icson':IcsonPriceInfoBuilder,
'newegg':NewEggPriceInfoBuilder}
@classmethod
def getPriceBuilder(cls,websiteName):
return cls._builder_map.get(websiteName,PriceInfoBuilder)
def __getRawId(line):
ret = line.split('|')
if len(ret) != 10:
return
raw_id = ret[0]
return raw_id
def __getDateStrFromFileName(fName):
p = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2}')
ret = p.search(fName)
if ret != None:
return ret.group()
class LoaderConfig(object):
'''load data 配置'''
@classmethod
def _init(cls):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'load.conf')
cls.cf = ConfigParser()
cls.cf.read(fileName)
@classmethod
def getConfig(cls,option):
if not hasattr(cls, 'cf'):
cls._init()
try:
return cls.cf.get('conf',option)
except Exception:
pass
@classmethod
def getBaseDir(cls):
ret = cls.getConfig('base_dir')
if ret:
return ret
else:
raise Exception('No base_dir found')
@classmethod
def getBackupDir(cls):
ret = cls.getConfig('backup_dir')
if ret:
return ret
else:
raise Exception('No backup_dir found')
@classmethod
def getSitesSet(cls):
sites = cls.getConfig('sites')
if sites:
return set([site.strip() for site in sites.split(',')])
return set()
@classmethod
def getCatId(cls):
cat_id = cls.getConfig('cat_id')
if cat_id:
return int(cat_id)
def ld2CurPriceInfo(siteName,fileName,catBaseId,newProdSaveFlag = True):
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
#根据类别id获取表的名字(不同的大分类入不同的表中)
catBaseConfig = CatBaseConfigDictFactory.getCatBaseConfig(catBaseId)
priceCurTable = catBaseConfig.tProdPriceCur
prodIdDict = __getExistProdRawId(catBaseConfig.tProdInfo,site_id)
#raw_id_set = set()
conn = getConnect()
for line in __getLine(fileName):
raw_id = __getRawId(line)
if raw_id is None:# or raw_id in raw_id_set:
continue
#raw_id_set.add(raw_id)
prod_id = prodIdDict.get(raw_id, None)
if prod_id is None and newProdSaveFlag:
prod_id = __singleProdLoad2Db(conn,line,site_id,catBaseId)
#print 'insert new prod %s' % prod_id
if prod_id:#属于过滤掉的商品
prodIdDict[raw_id] = prod_id
if prod_id is None:
#print 'break raw_id is %s ' % raw_id
continue
PriceInfoBuilderClass = PriceInfoBuilderFactory.getPriceBuilder(siteName)
priceInfoBuilder = PriceInfoBuilderClass()
try:
priceInfo = priceInfoBuilder.getResult(line,prod_id)
except Exception ,e:
print e
continue
curPriceInfo = getCurPriceByProdId(conn,priceCurTable,prod_id)
if curPriceInfo:
real_price,cur_price = curPriceInfo
if wrapDecimal(real_price) != wrapDecimal(priceInfo.real_price) or \
wrapDecimal(cur_price) != wrapDecimal(priceInfo.cur_price):
diff_price = (wrapDecimal(real_price) - wrapDecimal(priceInfo.cur_price))
if wrapDecimal(priceInfo.real_price) < Decimal('0.1') or diff_price < Decimal('0.1'):
continue
priceInfo.diff_price = str(diff_price)
print curPriceInfo,priceInfo.prod_id,priceInfo.real_price,priceInfo.cur_price
updateProdPriceInfo(conn,priceCurTable,priceInfo)
#else:
#pass
#print 'no price change'
else:
priceInfo.diff_price = '0.00'
if wrapDecimal(priceInfo.real_price) > Decimal('0.1'):
print 'save raw_id : %s price is:%s ' % (raw_id,priceInfo.real_price)
saveProdPriceInfo(conn,priceCurTable,priceInfo)
conn.close()
def ld2HisPriceInfo(catBaseId, siteName):
''' 把新入库的数据导入历史价格表中 '''
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
catBaseConfig = CatBaseConfigDictFactory.getCatBaseConfig(catBaseId)
priceHisTable = catBaseConfig.tProdPriceHis
priceCurTable = catBaseConfig.tProdPriceCur
prodBaseInfoTable = catBaseConfig.tProdInfo
#raw_id_set = set()
conn = getConnect()
savePriceInfo2HisTable(conn,priceHisTable,priceCurTable,
prodBaseInfoTable,site_id)
conn.close()
def __isAlreadyLoadDb(siteName,catBaseId,dateStr):
'''只有比当前数据库中记录新的才可以入库,表prod_db_stat
中记录各个类别最新的数据'''
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
db_u_time = getProdStatInfo(site_id,catBaseId)
if db_u_time and dateStr <= db_u_time.strftime('%Y-%m-%d'):
return True
else:
return False
def updateProdStatLoadDb(siteName,catBaseId,dateStr):
''' 更新入库状态 '''
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
db_u_time = getProdStatInfo(site_id,catBaseId)
if db_u_time is None:
saveProdStat(site_id, catBaseId, dateStr)
elif dateStr > db_u_time.strftime('%Y-%m-%d'):
updateProdStat(site_id, catBaseId, dateStr)
def backuplogs(src,siteName):
dst_base = LoaderConfig.getBackupDir()
dst = os.path.join(dst_base,siteName)
if not os.path.isdir(dst):
os.mkdir(dst)
print 'moving file from %s to %s' % (src,dst)
shutil.move(src, dst)
def uProdBaseInfo(siteName, catBaseId, fileName):
''' 商品基本信息批量更新 '''
conn = getConnect()
site_id = SiteNameIDDictFactory.getSiteIdByName(siteName)
#根据类别id获取表的名字(不同的大分类入不同的表中)
catBaseConfig = CatBaseConfigDictFactory.getCatBaseConfig(catBaseId)
prodBaseInfoTable = catBaseConfig.tProdInfo
#prodIdsDict = __getExistProdRawId(prodBaseInfoTable,site_id)
prodList = []
for line in __getLine(fileName):
builder = ProdBaseInfoBuilder()
prodBaseInfo = builder.getResult(line, site_id)
if prodBaseInfo:
print prodBaseInfo.name,prodBaseInfo.raw_id
prodList.append(prodBaseInfo)
if len(prodList) == 200:
batchUpdateProdBaseInfo(conn, prodBaseInfoTable, prodList)
prodList = []
if prodList:
batchUpdateProdBaseInfo(conn, prodBaseInfoTable, prodList)
conn.close()
def loadDataToDB(files,catBaseId,beginDate = None,endDate = None):
for fileName in files:#itertools.chain(files[1:],files[0:1]):
fName = os.path.split(fileName)[-1]
dateStr = __getDateStrFromFileName(fName)
if dateStr is None:
print 'file :%s ,no date str found.' % fileName
continue
if beginDate != None and dateStr != None and cmp(dateStr,beginDate) < 0:
continue
elif endDate != None and dateStr != None and cmp(dateStr,endDate) > 0:
break
siteName = fName.split('_')[0]
if siteName in LoaderConfig.getSitesSet():
print 'current proc file is :%s' % fileName
if __isAlreadyLoadDb(siteName, catBaseId, dateStr):
print '**** file :%s is already load to db *****' % fileName
backuplogs(fileName,siteName)
continue
try:
ld2CurPriceInfo(siteName, fileName, catBaseId)
ld2HisPriceInfo(catBaseId,siteName)
updateProdStatLoadDb(siteName,catBaseId,dateStr)
backuplogs(fileName,siteName)
except Exception ,e:
print e
else:
print 'site :%s is not in load set' % siteName
#===============================================================================
# module 测试
#===============================================================================
def testProdBaseInfoBuilder():
testStr = '''429958|1199.00|2011-09-21 20:42:47|TCL BCD-176K50 176升 两门 冰箱(银灰色)
|92|1471|直降400元!冰箱销量冠军!高效压缩机|http://www.360buy.com/product/429958.html||
('http://www.360buy.com/products/737-794-878.html', 3)'''
builder = ProdBaseInfoBuilder()
ret = builder.getResult(testStr,site_id = 6)
print ret
def testCatBaseConfigDictFactory():
for t in xrange(10):
print CatBaseConfigDictFactory.getCatBaseConfig(1)
import glob
def load(rootBase):
for rootDir in os.listdir(rootBase):
if rootDir.find('.') >= 0:
continue
rootDir = os.path.join(rootBase,rootDir)
if os.path.isdir(rootDir):
plog = r'%s%s*log*' % (rootDir,os.sep)
files = glob.glob(plog)
print files
loadDataToDB(files,catBaseId = LoaderConfig.getCatId())
if __name__ == '__main__':
#testProdBaseInfoBuilder()
#createProdBaseInfo(siteName = u'amazon', catBaseId = 1, fileName = r'F:\spider_exe-20111222\spider_exe\amazon\amazon_spider.log')
#uProdBaseInfo(siteName=u'coo8',catBaseId = 1,fileName=r'F:\python_workspace\exe\coo8\log\coo8_spider.log.2011-12-29')
#rootBase=r'I:/log'
load(rootBase = LoaderConfig.getBaseDir())
#ld2HisPriceInfo(1, '2011-11-13',2)
#createProdBaseInfo(siteName = '360buy',catBaseId = 1,
# fileName = r'I:\log\360buy\360buy_spider.log.2011-10-24')
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from BeautifulSoup import BeautifulSoup, Comment
from pageparser import *
import itertools
import json
import os
import re
import urllib
import urlparse
import string
from enum import Enum
from spiderconfigparser import SpiderConfig
def translator(frm = '',to = '',delete = '',keep = None):
if len(to) == 1:
to = to * len(frm)
trans = string.maketrans(frm,to)
if keep is not None:
allchars = string.maketrans('','')
delete = allchars.translate(allchars,keep.translate(allchars,delete))
def translate(s):
if isinstance(s, unicode):
s = s.encode('utf-8','ignore')
return s.translate(trans,delete)
return translate
digits_only = translator(keep = string.digits)
rootUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='amazon', catagoryLevel=0)
class AmazonAllSortParser(RootCatagoryPageParser):
'''
从http://www.amazon.cn获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __getBaseSort1UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='select',attrs={"id":"searchDropdownBox"})
base_url = r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url={}&field-keywords=&x=20&y=15'
for t in allSort.findAll(name='option'):#一级分类
searchAias = t['value']
name = searchAias.split('=')[-1]
url = base_url.format(urllib.quote(searchAias))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary)
finalUrlList.append(sort_1_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort1UrlSums()
return self.filterUrlList(result)
class AmazonSortListParser(RootCatagoryPageParser):
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSortListParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __isCat(self, catName):
return catName.find(u'类别') >= 0
def __getSubUrlSums(self):
finalUrlList = []
sort2 = self.soup.find(name='div', attrs={"id":"refinements"})
#refId = 'ref_%s' % urllib.unquote(sort2['data-browseladder']).split(':')[-1]
#allSort2Seg = sort2.find(name='ul',attrs={'id':refId})
for catSeg in sort2(name='h2'):
if self.__isCat(catSeg.getText().strip()):
break
allSort2Seg = catSeg.findNextSibling(name='ul')
for t in allSort2Seg.findAll(name='a'):
nameSeg = t.find(name='span',attrs={'class':'refinementLink'})
if not nameSeg:
continue
#prodTotalNumSeg = t.find(name='span',attrs={'class':'narrowValue'})
name = nameSeg.getText()
#totalNum = prodTotalNumSeg.getText()
#print digits_only(totalNum)
url = t['href']
url = ''.join((self.mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,
isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getSubUrlSums()
return self.filterUrlList(result)
class AmazonListFirstPageParser(Sort3PageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonListFirstPageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parseProduct(self, prod):
titleSeg = prod.find( attrs={'class':'title'})
if titleSeg is None:
return
if titleSeg.a is None:
return
pName, url = ParserUtils.parserTag_A(titleSeg.a)
pid = url.split('/')[-2]
url = 'http://www.amazon.cn/mn/detailApp?asin={}'.format(pid)
priceSeg = prod.find(name='div', attrs={'class':'newPrice'})
pastPrice = '0.00'
currentPrice = '0.00'
if priceSeg != None:
currentPrice = ParserUtils.getPrice(priceSeg.span.getText())
bypastSeg = priceSeg.strike
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
imgUrl = ParserUtils.getImgUrl(prod.find(name='div',attrs={'class':'image'}))
repuSeg = prod.find(name='div', attrs={'class':'stars'})
reputation = '0'
if repuSeg != None:
reputation = ParserUtils.getDigit(repuSeg.img['alt'])
evlSeg = prod.find(name='div', attrs={'class':'reviewsCount'})
evaluateNum = '0'
if evlSeg != None:
evaluateNum = ParserUtils.getDigit(evlSeg.a.getText())
prodDetail = ProductDetails(productId=pid, fullUrl=url,imageUrl=imgUrl,privPrice=currentPrice, pubPrice=pastPrice,
reputation=reputation,evaluateNum=evaluateNum,name=pName, adWords='')
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
resultList = []
soupRoot = self.soup
for prod in soupRoot.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
resultsAtfNextSeg = self.soup.find(attrs = {'id':'results-atf-next'})
if resultsAtfNextSeg != None:
resultsAtfNext = resultsAtfNextSeg.find(text=lambda text:isinstance(text, Comment))
spt = BeautifulSoup(resultsAtfNext,convertEntities = BeautifulSoup.HTML_ENTITIES)
for prod in spt.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
return resultList
def __nextPagePattern(self):
# return r'http://www.amazon.cn/mn/search/ajax/{}&tab={}&pageTypeID={}&fromHash=§ion=BTF&fromApp=undefined&fromPage=undefined&version=2'
return r'http://www.amazon.cn/mn/search/ajax/{}&pageTypeID={}&fromHash=§ion=BTF&fromApp=undefined&fromPage=undefined&version=2'
def __getNextPageUrl(self):
nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
fullUrl = None
if nextPageSeg != None:
name,url = ParserUtils.parserTag_A(nextPageSeg)
t= urlparse.urlparse(url)
qsDict = urlparse.parse_qs(t.query)
pageTypeID = qsDict['rh'][0].split(',')[-1].split(':')[-1]
ref = url.replace(r'/gp/search/','')
#tab = self.rootUrlSummary.parentPath[1].name
fullUrl = self.__nextPagePattern().format(ref,pageTypeID)
return fullUrl
def parserSubUrlSums(self):
nextPageUrl = self.__getNextPageUrl()
if nextPageUrl is None:
return []
else:
query = urlparse.urlparse(nextPageUrl).query
pageNum = urlparse.parse_qs(query)['page'][0]
if(int(pageNum) >= SpiderConfig.getMaxPage()):
return []
urlSum = self.buildSort_4(nextPageUrl)
return [urlSum]
class AmazonNextPageJsonParser(Parser):
'''
Sort3Json解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonNextPageJsonParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
segList = self.dataStr.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
self.pageNextSeg = jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
self.resultsBtf = jsonObj['results-btf']['data']['value']
if jsonObj.has_key('results-atf-next'):
self.resultsAtf = jsonObj['results-atf-next']['data']['value']
def parserPageInfos(self):
result = []
retBtf = AmazonListFirstPageParser(self.resultsBtf,self.rootUrlSummary).parserPageInfos()
retAtf = AmazonListFirstPageParser(self.resultsAtf,self.rootUrlSummary).parserPageInfos()
result.extend(itertools.chain(retBtf,retAtf))
return result
def parserSubUrlSums(self):
return AmazonListFirstPageParser(self.pageNextSeg,self.rootUrlSummary).parserSubUrlSums()
parserDict = {0:AmazonAllSortParser, 1:AmazonSortListParser, 2:AmazonSortListParser, 3:AmazonListFirstPageParser, 4:AmazonNextPageJsonParser}
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'amazon.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='Amazon')
exclude = [ ObuyUrlSummary(name=name) for name in [u'video',u'aps',u'stripbooks',u'music',u'apparel',u'electronics']]
include = [ ObuyUrlSummary(name=name) for name in [u'video',u'aps']]
firstPage = AmazonAllSortParser(content, rootUrlSum, include = None,exclude=exclude)
for sort_1 in firstPage.parserSubUrlSums():
#for index, urlsum in enumerate(sort_3.parentPath):
#print '\t' * index, str(urlsum.getUrlSumAbstract())
print sort_1.url , sort_1.catagoryLevel
def testSort1Page():
fileName = os.path.join(testFilePath, 'toys_games.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[('test')], catagoryLevel=1)
sort2Page = AmazonSortListParser(content, sort_1_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
def testSort2Page():
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-00-22.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort2Page = AmazonListFirstPageParser(content, sort_2_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
for product in sort2Page.parserPageInfos():
print product.logstr()
def deepSort3Page():
from crawlerhttp import getContentFromUrlSum
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_ex_n_1?rh=n%3A814224051%2Cn%3A814227051%2Cn%3A98519071&bbn=98519071&ie=UTF8&qid=1322031024',
parentPath=[rootObuyUrlSummary], catagoryLevel=2)
content = getContentFromUrlSum(sort_2_urlsum)
parser = AmazonSortListParser(content, sort_2_urlsum)
for urlsum in parser.parserSubUrlSums():
print urlsum.name,urlsum.url
def testSort3Details():
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-08-17.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort3Page = AmazonNextPageJsonParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testComment():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
print comment.extract()
def testJson():
import json
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-06-28.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
segList = content.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
print jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
print '+++++++++++++++++'
jsonRet = jsonObj['results-btf']['data']['value']
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonListFirstPageParser(jsonRet, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
elif jsonObj.has_key('results-atf-next'):
print '--------------'
jsonRet = jsonObj['results-atf-next']['data']['value']
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(jsonRet)
comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonListFirstPageParser(comment.extract(), sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
testSort3Details()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from pageparser import ObuyUrlSummary
from amazon.amazonpageparser import parserDict,rootUrlSummary
from spider import ObuySpider,main
import os,sys
from logfacade import LoggerFactory
class AmazonSpider(ObuySpider):
def __init__(self, rootUrlSummary=None, parserDict=None, threadNum=5,
procDetails=True, include=None, exclude=None, rootPageResult=None,):
super(AmazonSpider, self).__init__(rootUrlSummary, parserDict, threadNum,
procDetails, include, exclude, rootPageResult)
def init_urls(self):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
catFile = os.path.join(curPath,'amazon.cat')
with open(catFile) as f:
for line in f:
name,url,level = [t.decode('utf-8') for t in line.split(',')]
self.putSpideRequest(ObuyUrlSummary(name=name,url=url,catagoryLevel=int(level)))
def procParserResult(self, result, urlsum, parser):
if urlsum.catagoryLevel == 3:#作为最终页面的标志
urlsum.parent = urlsum
parserResult = parser.parserSubUrlSums()
if parserResult:
for subUrlSum in parserResult:
self.putSpideRequest(subUrlSum)
else:
if urlsum.catagoryLevel == 2:
urlsum.catagoryLevel = 3
self.putSpideRequest(urlsum)
self.procPageInfos(parser,urlsum)
if __name__ == '__main__':
main(root=rootUrlSummary,parserDict=parserDict,SpiderClass = AmazonSpider)
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from pageparser import ObuyUrlSummary
from amazon.amazonpageparser import parserDict,rootUrlSummary
from spider import ObuySpider,main
import os,sys
from logfacade import LoggerFactory
class AmazonSpider(ObuySpider):
def __init__(self, rootUrlSummary=None, parserDict=None, threadNum=5,
procDetails=True, include=None, exclude=None, rootPageResult=None,):
super(AmazonSpider, self).__init__(rootUrlSummary, parserDict, threadNum,
procDetails, include, exclude, rootPageResult)
def init_urls(self):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
catFile = os.path.join(curPath,'amazon.cat')
with open(catFile) as f:
for line in f:
name,url,level = [t.decode('utf-8') for t in line.split(',')]
self.putSpideRequest(ObuyUrlSummary(name=name,url=url,catagoryLevel=int(level)))
def procParserResult(self, result, urlsum, parser):
if urlsum.catagoryLevel == 3:#作为最终页面的标志
urlsum.parent = urlsum
parserResult = parser.parserSubUrlSums()
if parserResult:
for subUrlSum in parserResult:
self.putSpideRequest(subUrlSum)
else:
if urlsum.catagoryLevel == 2:
urlsum.catagoryLevel = 3
self.putSpideRequest(urlsum)
self.procPageInfos(parser,urlsum)
if __name__ == '__main__':
main(root=rootUrlSummary,parserDict=parserDict,SpiderClass = AmazonSpider)
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import re
from copy import deepcopy
class AmazonAllSortParser(RootCatagoryPageParser):
'''
从http://www.amazon.cn/gp/site-directory获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __getBaseSort2UrlS__getSubUrlSums finalUrlList = []
allSort = self.soup.find(attrs={"id":"siteDirectory"})
for t in allSort.findAll(name='div', attrs={"class":"popover-grouping"}):#一级分类
name = t.find(name='div', attrs={"class":"popover-category-name"}).h2.getText()
url = ''.join((self.mainHost, name))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSiblings(name='div')
for tt in sort_2:#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
url = ''.join((self.mainHost,url))
if name.startswith(u'所有'):
continue
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort2UrlSums()
__getSubUrlSumsilterUrlList(result)
class AmazonSort2Parser(RootCatagoryPageParser):
'''
从http://www.amazon.cn/gp/site-directory获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort2Parser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __isCat(self, catName):
return catName.find(u'分类') >= 0
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort3 = self.soup.findAll(name='div', attrs={"class":"unified_widget blurb"})
for alls3 in allSort3:
if self.__isCat(alls3.h2.getText()):
break
for t in alls3.findAll(name='div',attrs={'class':'title'}):
name, url = ParserUtils.parserTag_A(t.a)
url = ''.join((self.mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
class AmazonSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserPageInfos(self):
resultList = []
for prod in self.soup.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
pName, url = ParserUtils.parserTag_A(prod.find(name='div', attrs={'class':'title'}).a)
pid = pName
currentPrice = ParserUtils.getPrice(prod.find(name='div',attrs={'class':'newPrice'}).span.getText())
bypastSeg = prod.find(name='div',attrs={'class':'newPrice'}).strike
pastPrice = '0.00'
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
prodDetail = ProductDetails(productId=pid, privPrice=currentPrice, pubPrice=pastPrice,
name=pName, adWords='')
resultList.append(prodDetail)
return resultList
def __getNextPageUrl(self):
nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
fullUrl = None
if nextPageSeg != None:
name,url = ParserUtils.parserTag_A(nextPageSeg)
print url
url = url.replace(r'/gp/search','#')
baseUrl = self.rootUrlSummary.url.rsplit('#')[0]
fullUrl = ''.join((baseUrl,url))
return fullUrl
def parserSubUrlSums(self):
result = self.__getNextPageUrl()
if result is None:
return []
else:
urlSum = deepcopy(self.rootUrlSummary)
urlSum.url = result
return [urlSum]
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'amazonSite.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/site-directory/ref=topnav_sad', name='Amazon')
include = [ ObuyUrlSummary(url=r'http://http://www.newegg.com.cn/Category/536.htm',
name='服务器', catagoryLevel=2)]
firstPage = AmazonAllSortParser(content, rootUrlSum, include=None)
for sort_2 in firstPage.parserSubUrlSums():
#for index, urlsum in enumerate(sort_3.parentPath):
#print '\t' * index, str(urlsum.getUrlSumAbstract())
print sort_2.url , sort_2.catagoryLevel
def testSort2Page():
fileName = os.path.join(testFilePath, '888465051.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/%E7%94%B5%E8%84%91%E5%8F%8A%E9%85%8D%E4%BB%B6/b/ref=sd_allcat_pc?ie=UTF8&node=888465051',
parentPath=[('test')], catagoryLevel=2)
sort3Page = AmazonSort2Parser(content, sort_2_urlsum)
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testSort3Page():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(content, sort_3_urlsum)
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testSort3Details():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
def testComment():
from BeautifulSoup import BeautifulSoup, Comment
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
print comment.extract()
def testJson():
import json
fileName = os.path.join(testFilePath, 'watch_json.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
segList = content.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('results-btf'):
print '+++++++++++++++++'
jsonRet = jsonObj['results-btf']['data']['value']
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(jsonRet, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
elif jsonObj.has_key('results-atf-next'):
print '--------------'
jsonRet = jsonObj['results-atf-next']['data']['value']
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(jsonRet)
comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(comment.extract(), sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
#testAllSortPage()
#testSort2Page()
#testSort3Page()
#testSort3Details()
#testComment()
testJson()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from BeautifulSoup import BeautifulSoup, Comment
from pageparser import *
import itertools
import json
import os
import re
import urllib
import urlparse
import string
from enum import Enum
from spiderconfigparser import SpiderConfig
def translator(frm = '',to = '',delete = '',keep = None):
if len(to) == 1:
to = to * len(frm)
trans = string.maketrans(frm,to)
if keep is not None:
allchars = string.maketrans('','')
delete = allchars.translate(allchars,keep.translate(allchars,delete))
def translate(s):
if isinstance(s, unicode):
s = s.encode('utf-8','ignore')
return s.translate(trans,delete)
return translate
digits_only = translator(keep = string.digits)
rootUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='amazon', catagoryLevel=0)
class AmazonAllSortParser(RootCatagoryPageParser):
'''
从http://www.amazon.cn获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __getBaseSort1UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='select',attrs={"id":"searchDropdownBox"})
base_url = r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url={}&field-keywords=&x=20&y=15'
for t in allSort.findAll(name='option'):#一级分类
searchAias = t['value']
name = searchAias.split('=')[-1]
url = base_url.format(urllib.quote(searchAias))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary)
finalUrlList.append(sort_1_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort1UrlSums()
return self.filterUrlList(result)
class AmazonSortListParser(RootCatagoryPageParser):
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSortListParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __isCat(self, catName):
return catName.find(u'类别') >= 0
def __getSubUrlSums(self):
finalUrlList = []
sort2 = self.soup.find(name='div', attrs={"id":"refinements"})
#refId = 'ref_%s' % urllib.unquote(sort2['data-browseladder']).split(':')[-1]
#allSort2Seg = sort2.find(name='ul',attrs={'id':refId})
for catSeg in sort2(name='h2'):
if self.__isCat(catSeg.getText().strip()):
break
allSort2Seg = catSeg.findNextSibling(name='ul')
for t in allSort2Seg.findAll(name='a'):
nameSeg = t.find(name='span',attrs={'class':'refinementLink'})
if not nameSeg:
continue
#prodTotalNumSeg = t.find(name='span',attrs={'class':'narrowValue'})
name = nameSeg.getText()
#totalNum = prodTotalNumSeg.getText()
#print digits_only(totalNum)
url = t['href']
url = ''.join((self.mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,
isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getSubUrlSums()
return self.filterUrlList(result)
class AmazonListFirstPageParser(Sort3PageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonListFirstPageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parseProduct(self, prod):
titleSeg = prod.find( attrs={'class':'title'})
if titleSeg is None:
return
if titleSeg.a is None:
return
pName, url = ParserUtils.parserTag_A(titleSeg.a)
pid = url.split('/')[-2]
url = 'http://www.amazon.cn/mn/detailApp?asin={}'.format(pid)
priceSeg = prod.find(name='div', attrs={'class':'newPrice'})
pastPrice = '0.00'
currentPrice = '0.00'
if priceSeg != None:
currentPrice = ParserUtils.getPrice(priceSeg.span.getText())
bypastSeg = priceSeg.strike
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
imgUrl = ParserUtils.getImgUrl(prod.find(name='div',attrs={'class':'image'}))
repuSeg = prod.find(name='div', attrs={'class':'stars'})
reputation = '0'
if repuSeg != None:
reputation = ParserUtils.getDigit(repuSeg.img['alt'])
evlSeg = prod.find(name='div', attrs={'class':'reviewsCount'})
evaluateNum = '0'
if evlSeg != None:
evaluateNum = ParserUtils.getDigit(evlSeg.a.getText())
prodDetail = ProductDetails(productId=pid, fullUrl=url,imageUrl=imgUrl,privPrice=currentPrice, pubPrice=pastPrice,
reputation=reputation,evaluateNum=evaluateNum,name=pName, adWords='')
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
resultList = []
soupRoot = self.soup
for prod in soupRoot.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
resultsAtfNextSeg = self.soup.find(attrs = {'id':'results-atf-next'})
if resultsAtfNextSeg != None:
resultsAtfNext = resultsAtfNextSeg.find(text=lambda text:isinstance(text, Comment))
spt = BeautifulSoup(resultsAtfNext,convertEntities = BeautifulSoup.HTML_ENTITIES)
for prod in spt.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
return resultList
def __nextPagePattern(self):
# return r'http://www.amazon.cn/mn/search/ajax/{}&tab={}&pageTypeID={}&fromHash=§ion=BTF&fromApp=undefined&fromPage=undefined&version=2'
return r'http://www.amazon.cn/mn/search/ajax/{}&pageTypeID={}&fromHash=§ion=BTF&fromApp=undefined&fromPage=undefined&version=2'
def __getNextPageUrl(self):
nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
fullUrl = None
if nextPageSeg != None:
name,url = ParserUtils.parserTag_A(nextPageSeg)
t= urlparse.urlparse(url)
qsDict = urlparse.parse_qs(t.query)
pageTypeID = qsDict['rh'][0].split(',')[-1].split(':')[-1]
ref = url.replace(r'/gp/search/','')
#tab = self.rootUrlSummary.parentPath[1].name
fullUrl = self.__nextPagePattern().format(ref,pageTypeID)
return fullUrl
def parserSubUrlSums(self):
nextPageUrl = self.__getNextPageUrl()
if nextPageUrl is None:
return []
else:
query = urlparse.urlparse(nextPageUrl).query
pageNum = urlparse.parse_qs(query)['page'][0]
if(int(pageNum) >= SpiderConfig.getMaxPage()):
return []
urlSum = self.buildSort_4(nextPageUrl)
return [urlSum]
class AmazonNextPageJsonParser(Parser):
'''
Sort3Json解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonNextPageJsonParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
segList = self.dataStr.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
self.pageNextSeg = jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
self.resultsBtf = jsonObj['results-btf']['data']['value']
if jsonObj.has_key('results-atf-next'):
self.resultsAtf = jsonObj['results-atf-next']['data']['value']
def parserPageInfos(self):
result = []
retBtf = AmazonListFirstPageParser(self.resultsBtf,self.rootUrlSummary).parserPageInfos()
retAtf = AmazonListFirstPageParser(self.resultsAtf,self.rootUrlSummary).parserPageInfos()
result.extend(itertools.chain(retBtf,retAtf))
return result
def parserSubUrlSums(self):
return AmazonListFirstPageParser(self.pageNextSeg,self.rootUrlSummary).parserSubUrlSums()
parserDict = {0:AmazonAllSortParser, 1:AmazonSortListParser, 2:AmazonSortListParser, 3:AmazonListFirstPageParser, 4:AmazonNextPageJsonParser}
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'amazon.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='Amazon')
exclude = [ ObuyUrlSummary(name=name) for name in [u'video',u'aps',u'stripbooks',u'music',u'apparel',u'electronics']]
include = [ ObuyUrlSummary(name=name) for name in [u'video',u'aps']]
firstPage = AmazonAllSortParser(content, rootUrlSum, include = None,exclude=exclude)
for sort_1 in firstPage.parserSubUrlSums():
#for index, urlsum in enumerate(sort_3.parentPath):
#print '\t' * index, str(urlsum.getUrlSumAbstract())
print sort_1.url , sort_1.catagoryLevel
def testSort1Page():
fileName = os.path.join(testFilePath, 'toys_games.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[('test')], catagoryLevel=1)
sort2Page = AmazonSortListParser(content, sort_1_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
def testSort2Page():
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-00-22.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort2Page = AmazonListFirstPageParser(content, sort_2_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
for product in sort2Page.parserPageInfos():
print product.logstr()
def deepSort3Page():
from crawlerhttp import getContentFromUrlSum
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_ex_n_1?rh=n%3A814224051%2Cn%3A814227051%2Cn%3A98519071&bbn=98519071&ie=UTF8&qid=1322031024',
parentPath=[rootObuyUrlSummary], catagoryLevel=2)
content = getContentFromUrlSum(sort_2_urlsum)
parser = AmazonSortListParser(content, sort_2_urlsum)
for urlsum in parser.parserSubUrlSums():
print urlsum.name,urlsum.url
def testSort3Details():
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-08-17.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort3Page = AmazonNextPageJsonParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testComment():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
print comment.extract()
def testJson():
import json
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-06-28.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
segList = content.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
print jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
print '+++++++++++++++++'
jsonRet = jsonObj['results-btf']['data']['value']
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonListFirstPageParser(jsonRet, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
elif jsonObj.has_key('results-atf-next'):
print '--------------'
jsonRet = jsonObj['results-atf-next']['data']['value']
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(jsonRet)
comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonListFirstPageParser(comment.extract(), sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
testSort3Details()
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-10-10
@author: zhongfeng
'''
## {{{ http://code.activestate.com/recipes/84317/ (r2)
from threading import Condition,Thread
import copy
class Future(object):
def __init__(self,func,*param):
# Constructor
self.__done=0
self.__result=None
self.__status='working'
self.__C=Condition() # Notify on this Condition when result is ready
# Run the actual function in a separate thread
self.__T=Thread(target=self.wrapper,args=(func,param))
self.__T.setName("FutureThread")
self.__T.start()
def __repr__(self):
return '<Future at '+hex(id(self))+':'+self.__status+'>'
def __call__(self):
self.__C.acquire()
while self.__done == 0:
self.__C.wait()
self.__C.release()
# We deepcopy __result to prevent accidental tampering with it.
a=copy.deepcopy(self.__result)
return a
def wrapper(self, func, param):
# Run the actual function, and let us housekeep around it
self.__C.acquire()
try:
self.__result=func(*param)
except:
self.__result="Exception raised within Future"
self.__done = 1
self.__status = self.__result
self.__C.notify()
self.__C.release()
## end of http://code.activestate.com/recipes/84317/ }}}
class Singleton(object):
''' python 风格的单例模式 '''
def __new__(cls,*args,**kargs):
if '_inst' not in vars(cls):
cls._inst = super(Singleton,cls).__new__(cls, *args,**kargs)
return cls._inst
from time import time
def profile(func):
def log(*args,**kargs):
start = time()
ret = func(*args,**kargs)
end = time()
expire = end - start
print func.__name__,expire
return ret
return log
def profile1(sec):
def around(func):
def log(*args,**kargs):
start = time()
ret = func(*args,**kargs)
end = time()
expire = end - start
print func.__name__,expire
return ret
return log
def after(func):
def log(*args,**kargs):
start = time()
print func.__name__,start
ret = func(*args,**kargs)
return ret
return log
return {'around':around,'after':after}[sec]
from copy import deepcopy
def keeper(func):
defArgs = func.__defaults__
def wrap(*args,**kargs):
funcDef = deepcopy(defArgs)
func.__defaults__ = funcDef
return func(*args,**kargs)
return wrap
import md5
def hash(key):
m = md5.new()
keyStr = str(key)
m.update(keyStr)
return long(m.hexdigest(), 16)
import bisect
class ConsistentHash(object):
def __init__(self,nodes,numOfReplicas = 4,hashfunc = hash):
self.hashfunc = hashfunc
self.numOfReplicas = numOfReplicas
self.ring = {}
self.__sorted_key_list = []
if nodes:
for node in nodes:
self.addNode(node)
def addNode(self,node):
for num in range(self.numOfReplicas):
genKey = self.hashfunc('%s:%s' % (node,num))
self.ring[genKey] = node
bisect.insort(self.__sorted_key_list,genKey)
def removeNode(self,node):
for num in range(self.numOfReplicas):
key = self.hashfunc('%s:%s' % (node,num))
del self.ring[key]
self.__sorted_key_list.remove(key)
def getNode(self,obj):
genKey = self.hashfunc(obj)
nodeKey = self.__getNodeKey(genKey)
if nodeKey != None:
return self.ring[nodeKey]
def __getNodeKey(self,key):
if not self.ring:
return None
for nodeKey in self.__sorted_key_list:
if key <= nodeKey:
return nodeKey
return self.__sorted_key_list[0]
@keeper
def test(l = [],t = 2):
l.append('1')
print l
return t
memcache_servers = ['192.168.0.246:11212',
'192.168.0.247:11212',
'192.168.0.249:11212',
'192.168.0.250:11212']
if __name__ == '__main__':
f = Future(test, *([1,2,3],3))
print f() | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-02
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from spiderconfigparser import SpiderConfig
dangdangRoot = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')
class DangDangAllSortParser(RootCatagoryPageParser):
'''
从http://category.dangdang.com/?ref=www-0-C 获取所有的分类信息,
组合成ObuyUrlSummary,不包含图书
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'class':'categories_mainBody'})
for t in allSort.findAll(name='div',attrs={'id':re.compile(r'[a-z]*')}):#一级分类
name = t['id']
if name == 'book': #不解析图书
continue
url = ''.join((r'http://category.dangdang.com/',name))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs={'class':''.join([name,'_details'])})
for tt in sort_2(name='li'):#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.a.findNextSiblings(name='a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
url = '&'.join((url,'store=eq0'))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class DangDangSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
pageSeg = 'p={}'
return '%s&%s' % (self.rootUrlSummary.url,pageSeg)
def getTotal(self):
regx = u'共([0-9]*)页'
p = re.compile(regx)
s = self.soup.find(name='span',attrs = {'id':'all_num'})
if s is None: #dangdang_2011-08-04_10-00-04.html页面格式解析
st = self.soup.find(name='input',attrs = {'id':'jumpto'})
if st != None:
s = st.findNextSibling(name='span')
if s is None:
return 1
pageNum = s.getText()
totalNum = int(p.search(pageNum).group(1))
if totalNum > SpiderConfig.getMaxPage():
totalNum = SpiderConfig.getMaxPage()
return totalNum
def parserPageInfos(self):
plist = self.soup.find(name='ul',attrs={'class':'mode_goods clearfix'})
resultList = []
if plist is None:
prodSeg = self.soup.findAll(attrs = {'class':'listitem '})
else:
prodSeg = plist.findAll(name='li')
for prod in prodSeg:
pNameSeg = prod.find(attrs={'class':'name'})
if pNameSeg is None:
pNameSeg = prod.find(attrs={'class':'title'})
pName,url = ParserUtils.parserTag_A(pNameSeg.a)
pid = url.rsplit('=',1)[-1]
t = prod.find(attrs={'class':'price_d'})
if t != None :
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
t = prod.find(attrs={'class':'price_m'})
if t != None:
pastPrice = ParserUtils.getPrice(t.getText())
else:
pastPrice = 0.00
starLevelSeg = prod.find(name = 'p',attrs={'class':'starlevel'})
repu = 0.0
evalNum = 0
if starLevelSeg:
for starImg in starLevelSeg.findAll(name='img'):
if starImg['src'] == 'images/star_all.png':
repu += 1.0
elif starImg['src'] == 'images/star_half.png':
repu += 0.5
evalNum = starLevelSeg.find(name='span').a.getText()
imgUrlSeg = prod.find(attrs={'class':re.compile('.*pic')})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl= url,imageUrl = imgUrl, privPrice = currentPrice,pubPrice=pastPrice,
name=pName, adWords='',reputation=repu,evaluateNum=evalNum)
prodDetail.catagory = self.rootUrlSummary
resultList.append(prodDetail)
return resultList
class DangDangSort4PageParser(DangDangSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:DangDangAllSortParser, 3:DangDangSort3PageParser, 4:DangDangSort4PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testDangDangAllSortPage():
fileName = os.path.join(testFilePath,'dangcat.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
firstPage = DangDangAllSortParser(content, rootUrlSum,include = [pserver])
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[('test')], catagoryLevel=3)
sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'dangdang_2011-08-04_10-31-18.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[], catagoryLevel=3)
sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print type(product.logstr())
print product.logstr()
def testRegx():
regx = u'共([0-9]*)页'
p = re.compile(regx)
fileName = os.path.join(testFilePath,'4001011.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
s = soup.find(name='span',attrs = {'id':'all_num'}).getText()
content = content.decode('gb18030','ignore')
print p.search(s).group(1)
if __name__ == '__main__':
#testRegx()
#testDangDangAllSortPage()
#testSort3Page()
testSort3Details()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from dangdang.dangpageparser import parserDict,dangdangRoot
from spider import main
if __name__ == '__main__':
main(dangdangRoot,parserDict) | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from dangdang.dangpageparser import parserDict,dangdangRoot
from spider import main
if __name__ == '__main__':
main(dangdangRoot,parserDict) | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-02
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from spiderconfigparser import SpiderConfig
dangdangRoot = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')
class DangDangAllSortParser(RootCatagoryPageParser):
'''
从http://category.dangdang.com/?ref=www-0-C 获取所有的分类信息,
组合成ObuyUrlSummary,不包含图书
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'class':'categories_mainBody'})
for t in allSort.findAll(name='div',attrs={'id':re.compile(r'[a-z]*')}):#一级分类
name = t['id']
if name == 'book': #不解析图书
continue
url = ''.join((r'http://category.dangdang.com/',name))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs={'class':''.join([name,'_details'])})
for tt in sort_2(name='li'):#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.a.findNextSiblings(name='a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
url = '&'.join((url,'store=eq0'))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class DangDangSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
pageSeg = 'p={}'
return '%s&%s' % (self.rootUrlSummary.url,pageSeg)
def getTotal(self):
regx = u'共([0-9]*)页'
p = re.compile(regx)
s = self.soup.find(name='span',attrs = {'id':'all_num'})
if s is None: #dangdang_2011-08-04_10-00-04.html页面格式解析
st = self.soup.find(name='input',attrs = {'id':'jumpto'})
if st != None:
s = st.findNextSibling(name='span')
if s is None:
return 1
pageNum = s.getText()
totalNum = int(p.search(pageNum).group(1))
if totalNum > SpiderConfig.getMaxPage():
totalNum = SpiderConfig.getMaxPage()
return totalNum
def parserPageInfos(self):
plist = self.soup.find(name='ul',attrs={'class':'mode_goods clearfix'})
resultList = []
if plist is None:
prodSeg = self.soup.findAll(attrs = {'class':'listitem '})
else:
prodSeg = plist.findAll(name='li')
for prod in prodSeg:
pNameSeg = prod.find(attrs={'class':'name'})
if pNameSeg is None:
pNameSeg = prod.find(attrs={'class':'title'})
pName,url = ParserUtils.parserTag_A(pNameSeg.a)
pid = url.rsplit('=',1)[-1]
t = prod.find(attrs={'class':'price_d'})
if t != None :
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
t = prod.find(attrs={'class':'price_m'})
if t != None:
pastPrice = ParserUtils.getPrice(t.getText())
else:
pastPrice = 0.00
starLevelSeg = prod.find(name = 'p',attrs={'class':'starlevel'})
repu = 0.0
evalNum = 0
if starLevelSeg:
for starImg in starLevelSeg.findAll(name='img'):
if starImg['src'] == 'images/star_all.png':
repu += 1.0
elif starImg['src'] == 'images/star_half.png':
repu += 0.5
evalNum = starLevelSeg.find(name='span').a.getText()
imgUrlSeg = prod.find(attrs={'class':re.compile('.*pic')})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl= url,imageUrl = imgUrl, privPrice = currentPrice,pubPrice=pastPrice,
name=pName, adWords='',reputation=repu,evaluateNum=evalNum)
prodDetail.catagory = self.rootUrlSummary
resultList.append(prodDetail)
return resultList
class DangDangSort4PageParser(DangDangSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:DangDangAllSortParser, 3:DangDangSort3PageParser, 4:DangDangSort4PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testDangDangAllSortPage():
fileName = os.path.join(testFilePath,'dangcat.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
firstPage = DangDangAllSortParser(content, rootUrlSum,include = [pserver])
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[('test')], catagoryLevel=3)
sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'dangdang_2011-08-04_10-31-18.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[], catagoryLevel=3)
sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print type(product.logstr())
print product.logstr()
def testRegx():
regx = u'共([0-9]*)页'
p = re.compile(regx)
fileName = os.path.join(testFilePath,'4001011.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
s = soup.find(name='span',attrs = {'id':'all_num'}).getText()
content = content.decode('gb18030','ignore')
print p.search(s).group(1)
if __name__ == '__main__':
#testRegx()
#testDangDangAllSortPage()
#testSort3Page()
testSort3Details()
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-07-11
日志工厂类
@author: zhongfeng
'''
import logging.config
import os,sys
class LoggerFactory(object):
_loggerFac = None
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
#modPath = os.path.dirname(__file__)
logCfg = os.path.join(curPath,'logging.conf')
if not os.path.exists(logCfg):
pass
else:
logging.config.fileConfig(logCfg)
cls._inst = super(LoggerFactory, cls).__new__(cls)
return cls._inst
@classmethod
def getLogger(cls,logName='root'):
if cls._loggerFac == None:
cls._loggerFac = LoggerFactory()
if isinstance(logName,type): #传递的是一个类,去类名
logName = logName.__name__
return logging.getLogger(logName)
def __del__(self):
logging.shutdown()
@classmethod
def shutdown(cls):
logging.shutdown()
def testMutiThread():
from threadpool import ThreadPool,WorkRequest
def printlog(msg):
logger = LoggerFactory.getLogger()
logger.info('-'.join([msg,ctime()]))
urls = (r'http://www.360buy.com/product/{}.html'.format(str(proid)) for proid in xrange(1,14000))
#print urls
#requests = makeRequests(printlog,urls)
print "Creating thread pool with 3 worker threads."
main = ThreadPool(3)
[main.putRequest(WorkRequest(printlog,[url])) for url in urls ]
main.wait()
if __name__ == '__main__':
from time import ctime
for t in range(10):
logger = LoggerFactory.getLogger('360buy')
logger.info(' %d this is a test %s' % ( t, ctime() ))
| Python |
import os, marshal, thread
# Filename used for index files, must not contain numbers
INDEX_FILENAME = 'index'
# Exception thrown when calling get() on an empty queue
class Empty(Exception): pass
class PersistentQueue:
def __init__(self, name, cache_size=512, marshal=marshal):
"""
Create a persistent FIFO queue named by the 'name' argument.
The number of cached queue items at the head and tail of the queue
is determined by the optional 'cache_size' parameter. By default
the marshal module is used to (de)serialize queue items, but you
may specify an alternative serialize module/instance with the
optional 'marshal' argument (e.g. pickle).
"""
assert cache_size > 0, 'Cache size must be larger than 0'
self.name = name
self.cache_size = cache_size
self.marshal = marshal
self.index_file = os.path.join(name, INDEX_FILENAME)
self.temp_file = os.path.join(name, 'tempfile')
self.mutex = thread.allocate_lock()
self._init_index()
def _init_index(self):
if not os.path.exists(self.name):
os.mkdir(self.name)
if os.path.exists(self.index_file):
index_file = open(self.index_file)
print os.path.abspath(self.index_file)
self.head, self.tail = map(lambda x: int(x),
index_file.read().split(' '))
index_file.close()
else:
self.head, self.tail = 0, 1
def _load_cache(cache, num):
name = os.path.join(self.name, str(num))
mode = 'rb+' if os.path.exists(name) else 'wb+'
cachefile = open(name, mode)
try:
setattr(self, cache, self.marshal.load(cachefile))
except EOFError:
setattr(self, cache, [])
cachefile.close()
_load_cache('put_cache', self.tail)
_load_cache('get_cache', self.head)
assert self.head < self.tail, 'Head not less than tail'
def _sync_index(self):
assert self.head < self.tail, 'Head not less than tail'
index_file = open(self.temp_file, 'w')
index_file.write('%d %d' % (self.head, self.tail))
index_file.close()
if os.path.exists(self.index_file):
os.remove(self.index_file)
os.rename(self.temp_file, self.index_file)
def _split(self):
put_file = os.path.join(self.name, str(self.tail))
temp_file = open(self.temp_file, 'wb')
self.marshal.dump(self.put_cache, temp_file)
temp_file.close()
if os.path.exists(put_file):
os.remove(put_file)
os.rename(self.temp_file, put_file)
self.tail += 1
if len(self.put_cache) <= self.cache_size:
self.put_cache = []
else:
self.put_cache = self.put_cache[:self.cache_size]
self._sync_index()
def _join(self):
current = self.head + 1
if current == self.tail:
self.get_cache = self.put_cache
self.put_cache = []
else:
get_file = open(os.path.join(self.name, str(current)), 'rb')
self.get_cache = self.marshal.load(get_file)
get_file.close()
try:
os.remove(os.path.join(self.name, str(self.head)))
except:
pass
self.head = current
if self.head == self.tail:
self.head = self.tail - 1
self._sync_index()
def _sync(self):
self._sync_index()
get_file = os.path.join(self.name, str(self.head))
temp_file = open(self.temp_file, 'wb')
self.marshal.dump(self.get_cache, temp_file)
temp_file.close()
if os.path.exists(get_file):
os.remove(get_file)
os.rename(self.temp_file, get_file)
put_file = os.path.join(self.name, str(self.tail))
temp_file = open(self.temp_file, 'wb')
self.marshal.dump(self.put_cache, temp_file)
temp_file.close()
if os.path.exists(put_file):
os.remove(put_file)
os.rename(self.temp_file, put_file)
def __len__(self):
"""
Return number of items in queue.
"""
self.mutex.acquire()
try:
return (((self.tail-self.head)-1)*self.cache_size) + \
len(self.put_cache) + len(self.get_cache)
finally:
self.mutex.release()
def sync(self):
"""
Synchronize memory caches to disk.
"""
self.mutex.acquire()
try:
self._sync()
finally:
self.mutex.release()
def put(self, obj):
"""
Put the item 'obj' on the queue.
"""
self.mutex.acquire()
try:
self.put_cache.append(obj)
if len(self.put_cache) >= self.cache_size:
self._split()
finally:
self.mutex.release()
def get(self):
"""
Get an item from the queue.
Throws Empty exception if the queue is empty.
"""
self.mutex.acquire()
try:
if len(self.get_cache) > 0:
return self.get_cache.pop(0)
else:
self._join()
if len(self.get_cache) > 0:
return self.get_cache.pop(0)
else:
raise Empty
finally:
self.mutex.release()
def close(self):
"""
Close the queue. Implicitly synchronizes memory caches to disk.
No further accesses should be made through this queue instance.
"""
self.mutex.acquire()
try:
self._sync()
if os.path.exists(self.temp_file):
try:
os.remove(self.temp_file)
except:
pass
finally:
self.mutex.release()
## Tests
if __name__ == "__main__":
ELEMENTS = 1000
p = PersistentQueue('test', 1)
print 'Enqueueing %d items, cache size = %d' % (ELEMENTS,
p.cache_size)
for a in range(ELEMENTS/2):
print p.get()
from time import sleep
for a in range(ELEMENTS):
sleep(1)
print a
p.put(str(a))
p.sync()
print 'Queue length (using __len__):', len(p)
print 'Dequeueing %d items' % (ELEMENTS/2)
for a in range(ELEMENTS/2):
p.get()
print 'Queue length (using __len__):', len(p)
print 'Dequeueing %d items' % (ELEMENTS/2)
for a in range(ELEMENTS/2):
p.get()
print 'Queue length (using __len__):', len(p)
p.sync()
p.close()
| Python |
#!/usr/bin/python
import socket
# Class that wraps a real socket and changes it to a HTTP tunnel whenever a connection is asked via the "connect" method
class ProxySock :
def __init__(self, socket, proxy_host, proxy_port) :
# First, use the socket, without any change
self.socket = socket
# Create socket (use real one)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
# Copy attributes
self.family = socket.family
self.type = socket.type
self.proto = socket.proto
def connect(self, address) :
# Store the real remote adress
(self.host, self.port) = address
# Try to connect to the proxy
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(
self.proxy_host,
self.proxy_port,
0, 0, socket.SOL_TCP) :
try:
# Replace the socket by a connection to the proxy
self.socket = socket.socket_formal(family, socktype, proto)
self.socket.connect(sockaddr)
except socket.error, msg:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket :
raise socket.error, ms
# Ask him to create a tunnel connection to the target host/port
self.socket.send(
("CONNECT %s:%d HTTP/1.1\r\n" +
"Host: %s:%d\r\n\r\n") % (self.host, self.port, self.host, self.port));
# Get the response
resp = self.socket.recv(4096)
# Parse the response
parts = resp.split()
# Not 200 ?
if parts[1] != "200" :
raise Exception("Error response from Proxy server : %s" % resp)
def __getattr__(self, name):
'''Automatically wrap methods and attributes for socket object.'''
return getattr(self.socket, name)
# Return the (host, port) of the actual target, not the proxy gateway
def getpeername(self) :
return (self.host, self.port)
# Install a proxy, by changing the method socket.socket()
def setup_http_proxy(proxy_host, proxy_port) :
# New socket constructor that returns a ProxySock, wrapping a real socket
def socket_proxy(af, socktype, proto) :
# Create a socket, old school :
sock = socket.socket_formal(af, socktype, proto)
# Wrap it within a proxy socket
return ProxySock(
sock,
proxy_host,
proxy_port)
# Replace the "socket" method by our custom one
socket.socket_formal = socket.socket
socket.socket = socket_proxy
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-9
@author: zhongfeng
'''
from egou.egoupageparser import *
from crawlerhttp import crawle
from pageparser import *
from dbproc.catagoryproc import *
def getContentFromUrlSum(urlsum):
while True:
result = crawle(urlsum)
if result.code == 200:
break
content = result.content
return content
def getAllSort1(content = None):
if content is None:
content = getContentFromUrlSum(egouRoot)
#telCat = ObuyUrlSummary(url='http://www.egou.com/browse07.01/',catagoryLevel=1,parentPath=[egouRoot])
#homeCat = ObuyUrlSummary(url='http://www.egou.com/browse07.02/',catagoryLevel=1,parentPath=[egouRoot])
#computeCat = ObuyUrlSummary(url='http://www.egou.com/browse07.03/',catagoryLevel=1,parentPath=[egouRoot])
#include=(telCat,homeCat,computeCat)
parser = EGouSortParser(content, egouRoot)
return parser.parserSubUrlSums()
def getAllSort3():
result = []
for sort_1 in getAllSort1():
content = getContentFromUrlSum(sort_1)
#telCat = ObuyUrlSummary(url='http://www.egou.com/browse07.01.01/',catagoryLevel=2)
parser = EGouSort1PageParser(content, sort_1)
for sort_2 in parser.parserSubUrlSums():
content = getContentFromUrlSum(sort_2)
parser = EGouSort2PageParser(content, sort_2)
result.extend(parser.parserSubUrlSums())
return result
if __name__ == '__main__':
from itertools import chain
import os
with open('c:t.log','w') as output:
result = getAllSort3()
calEveryLevelCatNum(result)
createSiteCat(result,u'55bigo')
#for sort3 in getAllSort3():
#output.write( '|'.join([cat.name.encode('gb18030') for cat in chain(sort3.parentPath,(sort3.parent,))]))
#output.write(os.linesep)
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-09
@author: zhongfeng
'''
import re
from pageparser import *
egouRoot = ObuyUrlSummary(url=r'http://www.egou.com/', name='egou')
mainHost = 'http://www.egou.com'
def filterCatName(name):
p = re.compile(r'\([0-9]*\)')
return p.sub('',name)
class EGouSortParser(RootCatagoryPageParser):
'''
从http://www.egou.com/获取所有的分类信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EGouSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def _getBaseSort1UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='div',attrs={'id':'_JD_ALLSORT'})
for t in allSort.findAll(name='div',attrs={'class':'item'}):#一级分类
name,url = ParserUtils.parserTag_A(t.span.a)
name = filterCatName(name)
url = ''.join((mainHost,url))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
#sort_2 = t.find(attrs={'class':'subitem'})
#for tt in sort_2(name='dt'):#二级分类
# name, url = ParserUtils.parserTag_A(tt.a)
# sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=True)
# finalUrlList.append(sort_2_urlsum)
finalUrlList.append(sort_1_urlsum)
return finalUrlList
def parserSubUrlSums(self):
result = self._getBaseSort1UrlSums()
return self.filterUrlList(result)
class EGouSort1PageParser(RootCatagoryPageParser):
'''
一级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EGouSort1PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getNextSortUrlSums(self,firstFinalPage = False):
finalUrlList = []
rSort2 = self.soup.find(name='div',attrs={'class':'bi_mainBox_L_1_m_2_1','style':'padding-left:18px;'})
for t in rSort2.findAll(name='div',attrs={'class':'cat_1'}):
name,url = ParserUtils.parserTag_A(t.a)
name = filterCatName(name)
url = ''.join((mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage = firstFinalPage)
finalUrlList.append(sort_2_urlsum)
rSort2_more = self.soup.find(name='div',attrs={'id':'biPopLayer2'})
if rSort2_more:
for t in rSort2_more(name='a'):
name,url = ParserUtils.parserTag_A(t)
name = filterCatName(name)
url = ''.join((mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage = firstFinalPage)
finalUrlList.append(sort_2_urlsum)
return finalUrlList
def parserSubUrlSums(self):
result = self.getNextSortUrlSums()
return self.filterUrlList(result)
class EGouSort2PageParser(EGouSort1PageParser):
'''
二级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EGouSort2PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
result = self.getNextSortUrlSums(firstFinalPage = True)
return self.filterUrlList(result)
parserDict = {0:EGouSortParser, 1:EGouSort1PageParser,2:EGouSort2PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testEgouSortPage():
fileName = os.path.join(testFilePath,'egouAllSort.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
telCat = ObuyUrlSummary(url='http://www.egou.com/browse07.01/',catagoryLevel=1,parentPath=[egouRoot])
firstPage = EGouSortParser(content, egouRoot,include=(telCat,))
for sort_1 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_1.parentPath):
pass
print sort_1.name,sort_1.url ,sort_1.catagoryLevel
def testEgouSort1Page():
fileName = os.path.join(testFilePath,'browse08.3085_sort1.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
egSortUrl = ObuyUrlSummary(url='http://www.egou.com/browse08.3085',catagoryLevel=1)
firstPage = EGouSort1PageParser(content, egouRoot)
for sort_2 in firstPage.parserSubUrlSums():
print sort_2.name,sort_2.url ,sort_2.catagoryLevel
def testEgouSort2Page():
fileName = os.path.join(testFilePath,'browse07.01.01.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
egSortUrl = ObuyUrlSummary(url='http://www.egou.com/browse07.01.01',catagoryLevel=3)
firstPage = EGouSort2PageParser(content, egSortUrl)
for sort_2 in firstPage.parserSubUrlSums():
print sort_2.name,sort_2.url ,sort_2.catagoryLevel
if __name__ == '__main__':
#testRegx()
#testDangDangAllSortPage()
#testSort3Page()
testEgouSort2Page()
#testEgouSort1Page()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from newegg.neweggpageparser import parserDict,newEggRoot
from spider import main
if __name__ == '__main__':
main(newEggRoot,parserDict) | Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import re
from spiderconfigparser import SpiderConfig
from crawlerhttp import crawleRetries
from utils import Future
newEggRoot = ObuyUrlSummary(url=ur'http://www.newegg.com.cn/CategoryList.htm', name='newegg')
class NewEggAllSortParser(RootCatagoryPageParser):
'''
从http://www.newegg.com.cn/CategoryList.htm获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.newegg.com.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name = 'div',attrs={'class':'allCateList'})
for t in allSort.findAll(attrs={'id':re.compile('pd[0-9]+')}):#一级分类
name = t.getText()
url = '#'.join((r'http://www.newegg.com.cn/CategoryList.htm',t['id']))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='dl')
for tt in sort_2(name='dt'):#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.findNextSibling(name='dd').findAll(name = 'a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
url = '?'.join((url,'pageSize=96'))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class NewEggSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-{}'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def getTotal(self):
pageSeg = self.soup.find(name='div',attrs={'class':'pageNav'}).find(name='ins').getText()
totalPage = int(pageSeg.split('/')[-1])
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def getAdWords(self,prod,prodUrl):
extraIconSeg = prod.find(name ='p',attrs={'class':'extraIcon'})
adWords = ''
if extraIconSeg:
extraMsg = extraIconSeg.getText()
if extraMsg.find(u'返现') != -1 or extraMsg.find(u'赠品') != -1:
sort_5_urlsum = ObuyUrlSummary(url=prodUrl)
result = crawleRetries(urlSum = sort_5_urlsum)
parser = NewEggSortFinalParser(dataStr = result.content,rootUrlSummary=sort_5_urlsum)
adWords = parser.parserPageInfos()
return adWords
def parserPageInfos(self):
plist = self.soup.find(attrs={'id':'itemGrid1'})
resultList = []
if plist is None:
return resultList
for prod in plist.findAll(attrs={'class':'itemCell noSeller'}):
pName,url = ParserUtils.parserTag_A(prod.find(name ='p',attrs={'class':'info'}).a)
futureTask = Future(self.getAdWords, *(prod, url))
#adWords = self.getAdWords(prod, url)
pid = url.rsplit('/',1)[-1].split('.')[0]
currentPrice = ParserUtils.getPrice(prod.find(attrs={'class':'current'}).strong.getText())
bypastSeg = prod.find(attrs={'class':'bypast'})
pastPrice = '0.00'
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
imgUrlSeg = prod.find(name='dt').findAll(name='img')[-1]
imgUrl = imgUrlSeg['src']
reputation = '0.0'
evlNum = '0'
rankSeg = prod.find(name='dd',attrs={'class':'rank '})
aSeg = None
if rankSeg != None:
aSeg = rankSeg.a
if aSeg != None:
reputation = ParserUtils.getDigit(aSeg['title'])
evlNum = ParserUtils.getDigit(aSeg.getText())
adWords = futureTask()
prodDetail = ProductDetails(productId=pid, fullUrl=url,imageUrl=imgUrl,privPrice = currentPrice,
pubPrice=pastPrice,name=pName, adWords=adWords,reputation=reputation,evaluateNum=evlNum)
prodDetail.reputation = reputation
prodDetail.evaluateNum = evlNum
prodDetail.catagory = self.rootUrlSummary
resultList.append(prodDetail)
return resultList
class NewEggSort4PageParser(NewEggSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
class NewEggSortFinalParser(Parser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggSortFinalParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserPageInfos(self):
crashCut = self.getCrashCut()
exGift = self.getExtGift()
return '@'.join((crashCut,exGift))
def getCrashCut(self):
favInfoSeg = self.soup.find(name = 'ul',attrs={'class':'favourableInfo'})
crashCut = '0.00'
if favInfoSeg:
for info in favInfoSeg(name = 'li'):
if info.label.getText().find(u'返现') != -1:
crashCutText = info.getText()
crashCut = ParserUtils.getDigit(crashCutText)
break
return crashCut
def getExtGift(self):
exGiftSeg = self.soup.find(name = 'div',attrs={'class':'presentArea'})
exGift = []
if exGiftSeg:
for index,info in enumerate(exGiftSeg(name = 'dd')):
t = '%s.%s' % (index,info.getText())
exGift.append(t)
return ''.join(exGift)
parserDict = {0:NewEggAllSortParser, 3:NewEggSort3PageParser, 4:NewEggSort4PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testNewEggAllSortPage():
fileName = os.path.join(testFilePath,'CategoryList.htm')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/CategoryList.htm', name='newegg')
include = [ ObuyUrlSummary(url = r'http://http://www.newegg.com.cn/Category/536.htm',
name='服务器',catagoryLevel = 2)]
firstPage = NewEggAllSortParser(content, rootUrlSum,include = include)
for sort_3 in firstPage.getBaseSort3UrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print '\t'*index,str(urlsum.getUrlSumAbstract())
print sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'newegg_2011-08-25_16-03-49.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/SubCategory/1043.htm?pageSize=96',
parentPath=[('test')], catagoryLevel=3)
sort3Page = NewEggSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
#fileName = os.path.join(testFilePath,'1043.htm')
#with open(fileName, 'r') as fInput:
# content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/SubCategory/970.htm?ep=1',
parentPath=[('test')], catagoryLevel=3)
from crawlerhttp import crawle
content = ''
while True:
result = crawle(sort_3_urlsum)
if result.code == 200:
content = result.content
break
sort3Page = NewEggSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
#testNewEggAllSortPage()
#testSort3Page()
testSort3Details()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from newegg.neweggpageparser import parserDict,newEggRoot
from spider import main
if __name__ == '__main__':
main(newEggRoot,parserDict) | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from BeautifulSoup import BeautifulSoup
from crawlerhttp import UrlSummary, CrawlerType, crawleRetries
from time import strftime
import chardet, re
from urlparse import urlparse
from threadpool import WorkRequest
from crawlerhttp import crawle
from cStringIO import StringIO
from itertools import chain
encodingDict = {'360buy':'gb2312', 'newegg':'gb2312', 'dangdang':'gb2312', 'gome':'utf-8',
'amazon':'utf-8', 'coo8':'gb2312', 'suning':'utf-8','egou':'GBK',}#'efeihu':'utf-8'}
def reinqueue_proc(req, result):
urlsum = req[0]
pool = req[3]
if urlsum.stat == 0:
urlsum.stat = result.code
req = WorkRequest(getProductPrice, req, None,
callback=None)
pool.putRequest(req)
else:
print "Failed %s:%d" % (urlsum.url, result.code)
def getProductPrice(*req):
pimgUrlSumm = req[0]
result = crawleRetries(pimgUrlSumm)
proc_normal_result(req, result)
return result
def proc_normal_result(req, result):
args = req
captcha = req[4]
if result.code == 200:
prodDetail = args[1]
resultList = args[2]
prodDetail.privPrice = captcha(StringIO(result.content))
resultList.append(prodDetail)
else:
reinqueue_proc(req, result)
class ObuyUrlSummary(UrlSummary):
'''
链接抽象类
'''
def __init__(self, url='', data=None, headers=None, crawlerType=CrawlerType.GET_URL, name='',
isCrawle=True, isRecursed=True, catagoryLevel=0, retries = 4, parentPath=None,parent = None,
stat=0, errReason='', include=None, exclude=None):
super(ObuyUrlSummary, self).__init__(url, data, headers, crawlerType,retries)
self.name = name #分类名称
self.catagoryLevel = catagoryLevel #分类级别
self.parentPath = [] if parentPath is None else parentPath #路径
self.parent = parent
self.isCrawle = isCrawle #是否抓取
self.isRecursed = isRecursed #是否递归抓取
self.stat = stat #抓取的最终状态
self.errReason = errReason #错误原因
self.include = None #subUrl中应该包含的url列表
self.exclude = None #subUrl中剔除的url列表,如果include,exclude同时设置,则include规则优先
def getUrlSumAbstract(self):
return self.name, self.url, self.catagoryLevel
def __str__(self):
return str(vars(self))
__repr__ = __str__
class ParserResult(object):
def logstr(self):
pass
def convertToUnicode(dataStr, siteName):
if isinstance(dataStr, str):
encoding = encodingDict.get(siteName, None)
if encoding is None:
encoding = chardet.detect(dataStr)['encoding']
encodingDict[siteName] = encoding
dataStr = dataStr.decode(encoding, 'ignore')
return dataStr
class Parser(object):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
self.rootUrlSummary = rootUrlSummary
self.include = include
self.exclude = exclude
siteName = urlparse(rootUrlSummary.url).hostname.split('.')[1]
self.dataStr = convertToUnicode(dataStr, siteName)
self.soup = BeautifulSoup(self.dataStr, convertEntities=BeautifulSoup.HTML_ENTITIES) #默认使用BeautifulSoup做解析器
@staticmethod
def compareUrlSumm(urla, urlb):
if urla.url != None and len(urla.url) > 0:
return urla.url == urlb.url
elif urla.name != None and len(urla.name) > 0:
return urla.name == urlb.name
else:
return False
@staticmethod
def urlSummContain(filterArr, finalUrlSum):
#print finalUrlSum.name,finalUrlSum.url
for urlsumm in filterArr:
#print urlsumm.name,urlsumm.url
if Parser.compareUrlSumm(urlsumm, finalUrlSum):
return True
else:
for parent in finalUrlSum.parentPath:
#print parent.name,parent.url
if Parser.compareUrlSumm(urlsumm, parent):
return True
return False
def filterUrlList(self, finalUrlList):
filterResult = finalUrlList
if self.include != None and len(self.include) > 0:
filterResult = [ finalUrlSum for finalUrlSum in finalUrlList
if Parser.urlSummContain(self.include, finalUrlSum)]
elif self.exclude != None and len(self.exclude) > 0:
filterResult = [ finalUrlSum for finalUrlSum in finalUrlList
if not Parser.urlSummContain(self.exclude, finalUrlSum)]
return filterResult
def parserPageInfos(self):
'''
返回ParserResult组成的list
'''
pass
def parserSubUrlSums(self):
pass
def getParser(level,parserDict):
return parserDict.get(level,None)
class ParserUtils(object):
'''
html标签解析类,return (name,url)
'''
@staticmethod
def parserTag_A(a):
return a.getText().strip(), a['href'].strip()
@staticmethod
def getPrice(sPrice):
if not sPrice:
return '0.00'
'''¥4899.00变为4899.00'''
sPrice = sPrice.replace(u',', '')
regx = u'[0-9]+.[0-9]+'
p = re.compile(regx)
ret = p.search(sPrice)
if ret is None:
return '0.00'
return ret.group()
@staticmethod
def getDigit(s):
s = s.replace(u',', '')
regx = u'[0-9]+.[0-9]+|[0-9]+'
p = re.compile(regx)
sd = p.search(s)
if sd is None:
return 0
return sd.group()
@staticmethod
def getImgUrl(imgTag):
if imgTag is None:
return ''
return imgTag.img['src']
class RootCatagoryPageParser(Parser):
'''
根站点分类解析父类,获取所有的三级分类的ObuyUrlSummary
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(RootCatagoryPageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def buildSort_N(self, url, name, parent, isCrawle=True,firstFinalPage = False):
'''
构造各级节点逻辑
'''
sort_n_urlsum = ObuyUrlSummary(url=url, name=name, isCrawle=isCrawle)
sort_n_urlsum.parentPath = []
sort_n_urlsum.catagoryLevel = parent.catagoryLevel + 1
sort_n_urlsum.parentPath.extend(parent.parentPath)
sort_n_urlsum.parentPath.append(parent)
if firstFinalPage:
sort_n_urlsum.parent = sort_n_urlsum
else:
sort_n_urlsum.parent = parent
return sort_n_urlsum
def getBaseSort3UrlSums(self):
pass
def parserSubUrlSums(self):
result = self.getBaseSort3UrlSums()
return self.filterUrlList(result)
class Sort3PageParser(Parser):
'''
三级页面解析类,
a.负责获取当前分类的所有的后续页面的UrlSummary
b.负责获取页面的所有商品的信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def buildSort_4(self, url):
sort4_urlsum = ObuyUrlSummary(url=url, name=self.rootUrlSummary.name,
catagoryLevel=4)
sort4_urlsum.parentPath = []
sort4_urlsum.parentPath.extend(self.rootUrlSummary.parentPath)
sort4_urlsum.parentPath.append(self.rootUrlSummary)
sort4_urlsum.parent = self.rootUrlSummary.parent
return sort4_urlsum
def getTotal(self):
pass
def nextPageUrlPattern(self):
pass
def buildSort_4UrlSums(self):
finalUrlList = []
totalPage = self.getTotal()
if totalPage > 1:
for pageNum in range(2, totalPage + 1):
url = self.nextPageUrlPattern().format(str(pageNum))
finalUrlList.append(self.buildSort_4(url))
return finalUrlList
def getSort4PageUrlSums(self):
return self.buildSort_4UrlSums()
def parserSubUrlSums(self):
result = self.getSort4PageUrlSums()
return self.filterUrlList(result)
def seEncode(ustr,encoding='gb18030'):
if ustr is None:
return ''
if isinstance(ustr,unicode):
return ustr.encode(encoding,'ignore')
else:
return str(ustr)
class ProductDetails(ParserResult):
'''
商品详细信息
'''
def __init__(self, name='', imageUrl='', productId='', catagory=None, fullUrl='', pubPrice='0.00',
privPrice='0.00', adWords='', reputation='0', evaluateNum='0', updateTime=None):
self.name = name #商品名称
self.imageUrl = imageUrl #商品图片URL
self.productId = productId #商品在原网站的ID
self.catagory = catagory #商品所属分类
self.fullUrl = fullUrl #原始链接
self.pubPrice = pubPrice #商品标称的原价
self.privPrice = privPrice #商家卖价,没扣除广告折扣价格
self.adWords = adWords #促销信息,包括下单立减、返劵等
self.reputation = reputation #好评度
self.evaluateNum = evaluateNum #评论数
self.updateTime = strftime("%Y-%m-%d %H:%M:%S") if updateTime is None else updateTime #更新时间
def __getCatagoryAbs(self):
cat = self.catagory.parent
if isinstance(cat, ObuyUrlSummary):
return str((seEncode(cat.url), cat.catagoryLevel))
else:
return ''
#return ','.join([str((seEncode(cat.url), cat.catagoryLevel)) for cat in chain(self.catagory.parentPath, (self.catagory,))])
def __filterStr(self,s):
return ' '.join(seEncode(s).replace('|', ' ').split())
def logstr(self):
return '|'.join(map(self.__filterStr, (self.productId, self.privPrice, self.updateTime, self.name, self.evaluateNum, self.reputation,
self.adWords,self.fullUrl, self.imageUrl, self.__getCatagoryAbs())))
def __str__(self):
return str(vars(self))
__repr__ = __str__
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-29
@author: zhongfeng
'''
import os, time
import zipfile
from ftplib import FTP,error_perm
import sys,glob
from proxysock import setup_http_proxy
from spiderconfigparser import SpiderConfig
ip = '58.64.204.70'
port = '21'
username = '55bigo'
passwd = '55bigoadmin'
def rename(fFullName):
t = time.strftime("%Y-%m-%d",time.gmtime(os.path.getmtime(fFullName)))
print 'old file:%s' % fFullName
new = '.'.join((fFullName,t))
print 'new file:%s' % new
try:
if os.path.exists(new):
os.remove(new)
os.rename(fFullName, new)
except Exception ,e:
print e
raise e
return new
def fileUpload(remoteErrPath = r'/opt/errlog',remoteLogPath = r'/opt/log'):
proxy = SpiderConfig.getProxy()
if proxy :
proxyIp = proxy.split(':')[0]
proxyPort = proxy.split(':')[1]
setup_http_proxy(proxyIp,proxyPort)
curpath = os.path.abspath(os.path.dirname(sys.argv[0]))
ret = glob.glob1(curpath,r'*_spider.log')
siteName = None
if ret:
logFileName = ret[0]
siteName = logFileName.split('_')[0]
fullLogF = os.path.join(curpath,logFileName)
newLogFile = rename(fullLogF)
rlogPath = '/'.join((remoteLogPath,siteName))
ftpupload(newLogFile,rlogPath)
print 'upload success '
#移动到当前的log目录下
print 'backup >>>>>'
logDir = os.path.join(curpath,'log')
if not os.path.exists(logDir):
os.mkdir(logDir)
backUpLogPath = os.path.join(logDir,os.path.split(newLogFile)[-1])
print 'move %s to %s' % (newLogFile,backUpLogPath)
if os.path.exists(backUpLogPath):
os.remove(backUpLogPath)
os.rename(newLogFile, backUpLogPath)
errReport = os.path.join(curpath,r'err_report.log')
if siteName and os.path.exists(errReport):
newErrLogName = rename(errReport)
rerrLogPath = '/'.join((remoteErrPath,siteName))
ftpupload(newErrLogName,rerrLogPath)
#移动到当前的errlog目录下
errlogDir = os.path.join(curpath,'errlog')
if not os.path.exists(errlogDir):
os.mkdir(errlogDir)
backUpErrLogPath = os.path.join(errlogDir,os.path.split(newErrLogName)[-1])
print 'move %s to %s' % (newErrLogName,backUpErrLogPath)
if os.path.exists(backUpErrLogPath):
os.remove(backUpErrLogPath)
os.rename(newErrLogName, backUpErrLogPath)
def ftpupload(filename,remotePath):
ftp = FTP()
ftp.set_debuglevel(2)
ftp.connect(ip, port)
ftp.login(username, passwd)
try:
ftp.cwd(remotePath)
except:
try:
ftp.mkd(remotePath)
except error_perm:
print 'U have no authority to make dir'
bufsize = 8192
file_handler = open(filename, 'rb')
ftp.storbinary('STOR %s' % os.path.basename(filename), file_handler, bufsize)
ftp.set_debuglevel(0)
file_handler.close()
ftp.quit()
#===============================================================================
#
# class ZFile(object):
# def __init__(self, filename, mode='r', basedir=''):
# self.filename = filename
# self.mode = mode
# if self.mode in ('w', 'a'):
# self.zfile = zipfile.ZipFile(filename, self.mode, compression=zipfile.ZIP_DEFLATED)
# print self.zfile.filename
# else:
# self.zfile = zipfile.ZipFile(filename, self.mode)
# self.basedir = basedir
# if not self.basedir:
# self.basedir = os.path.dirname(filename)
#
# def addfile(self, path, arcname=None):
# path = path.replace('\\', '/')
# if not arcname:
# if path.startswith(self.basedir):
# arcname = path[len(self.basedir):]
# else:
# arcname = ''
# self.zfile.write(path, arcname)
#
# def addfiles(self, paths):
# for path in paths:
# if isinstance(path, tuple):
# self.addfile(*path)
# else:
# self.addfile(path)
#
# def close(self):
# self.zfile.close()
#
# def extract_to(self, path):
# for p in self.zfile.namelist():
# self.extract(p, path)
#
# def extract(self, filename, path):
# if not filename.endswith(''):
# f = os.path.join(path, filename)
# dir = os.path.dirname(f)
# if not os.path.exists(dir):
# os.makedirs(dir)
# file(f, 'wb').write(self.zfile.read(filename))
#
#
# def create(zfile, files):
# z = ZFile(zfile, 'w')
# z.addfiles(files)
# z.close()
#
# def extract(zfile, path):
# z = ZFile(zfile)
# z.extract_to(path)
# z.close()
#===============================================================================
if __name__ == '__main__':
fileUpload() | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
COO8_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
__
__
##
#_
'''
:
'.',
'''
__###___
_#_#_##_
_##__##_
##____##
#_____#_
##____##
#_____#_
##____##
#_____#_
_##__#_#
_##_###_
__##_#__
'''
:
'0',
'''
___##
__#_#
_###_
#__##
#__#_
___##
___#_
___##
___#_
___##
___#_
___##
'''
:
'1',
'''
__###___
_#_#_##_
##____##
#_____#_
______##
_____#__
____###_
___#_#__
__##____
_#______
#######_
#_#_#_##
'''
:
'2',
'''
__###_#_
_#_#_###
##____#_
______##
___##_#_
___#_##_
______##
______#_
##____##
#_#___#_
_######_
___#_#__
'''
:
'3',
'''
_____##_
____#_#_
____##__
___#_##_
__##_#__
__#__##_
_#___#__
##___##_
#_###_##
##_#_##_
_____#__
_____##_
'''
:
'4',
'''
_____##_
____#_#_
____##__
___#_##_
__##_#__
___#_##_
_##__#__
#____##_
####_#_#
#_#_####
_____#__
_____##_
'''
:
'4',
'''
_###_##_
_#_###__
_#______
_#______
###_##__
#_###_#_
##____##
______#_
##____##
#_#___#_
_######_
___#_#__
'''
:
'5',
'''
__###_#_
_#_#_###
_##___#_
#_______
##_###__
#_#_#_#_
##____##
#_____#_
##____##
_##___#_
_#_##_#_
__#_##__
'''
:
'6',
'''
###_####
#_###_#_
_____##_
____#___
____##__
___#____
___##___
___#____
___#____
__##____
__#_____
__##____
'''
:
'7',
'''
__####__
_#_#_##_
##____##
#______#
##____##
_#####__
__#_#_#_
##____##
#_____#_
##____##
_####_#_
___#_#__
'''
:
'8',
'''
__###___
_#_#_##_
##___##_
#_____##
##_____#
#_____##
_#####_#
__#_#_##
______#_
##___#__
#_##_##_
_##_##__
'''
:
'9',
'''
__###___
_#_#_##_
##___##_
#_____##
##_____#
#_____##
_#####_#
__#_#_##
______#_
##___#__
#_##_##_
_#_###__
'''
:
'9',
} | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
COO8_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
__
##
#_
##
'''
:
'.',
'''
__###_#__
_#____##_
_#_____#_
##_____##
#______#_
##_____##
#______#_
##_____##
#______#_
_#_____##
_##___#__
__###_#__
'''
:
'0',
'''
__###_#__
_#____##_
#______#_
##_____##
#______#_
##_____##
#______#_
##_____##
#______#_
_#_____##
_##___#__
__###_#__
'''
:
'0',
'''
__##__
__#___
#_##__
__#___
__##__
__#___
__##__
__#___
__##__
__#___
__##__
##_###
'''
:
'1',
'''
_####_#__
#_____##_
##_____#_
_______##
_______#_
______#__
_____##__
____#____
___#_#___
__##_____
_#_______
#_#######
'''
:
'2',
'''
###_###__
#_____#__
##_____##
_______#_
______##_
___##_#__
______##_
_______##
________#
##_____##
#_____#__
_#####___
'''
:
'3',
'''
______##__
_____#_#__
____#_##__
___#__#___
__#___##__
_#____#___
#_____##__
#####_#_##
______##__
______#___
______##__
______#___
'''
:
'4',
'''
_###_####
_#_______
_##______
_#_______
_######__
______#__
_______##
_______#_
_______##
##_____#_
#_____#__
_####_#__
'''
:
'5',
'''
___###_#_
__#______
_##______
#________
##_####__
#_____#__
##_____##
#______#_
##_____##
#______#_
_##___#__
__###_#__
'''
:
'6',
'''
###_####_
_______##
______#__
______##_
_____#___
_____##__
____#____
____##___
___#_____
___##____
__#______
_###_____
'''
:
'7',
'''
__###_#__
_#____##_
##_____##
#_______#
_##___##_
__###_#__
_#____##_
##_____#_
#______##
##_____#_
_#____#__
__####___
'''
:
'8',
'''
__###_#__
_#____##_
##_____#_
#______##
##_____#_
#______##
_##____#_
__###__##
_______#_
______##_
_____#___
_###_#___
'''
:
'9',
} | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from coo8.coo8_feature2 import COO8_FEATURES_MAP
import Image
import itertools
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_coo8(CaptchaProfile):
def __init__(self,features_map = COO8_FEATURES_MAP):
super(CaptchaProfile_coo8,self).__init__(features_map)
def __new__(cls,features_map = COO8_FEATURES_MAP):
return super(CaptchaProfile_coo8, cls).__new__(cls,features_map)
def split(self, im,top = 4,bottom = 16):
return super(CaptchaProfile_coo8,self).split(im,top,bottom)
def captcha_coo8(filename):
return captcha(filename, CaptchaProfile_coo8())
def test():
print CaptchaProfile_coo8(r'c:\gp359329,2.png')
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
if __name__ == '__main__':
fileName = os.path.join(testFilePath, "125487,1.png")
print captcha_coo8(fileName)
# it1 = im.crop((3, 4, 13, 16))
# print cia.GetBinaryMap(it1),'\n'
# it2 = im.crop((15,4,24,16))
# print cia.GetBinaryMap(it2)
# print '+++++++++'
# it2 = im.crop((25, 4, 34, 16))
# it3 = im.crop ((36,4,45,16))
# #it3 = im.crop((35, 4, 37, 16))
# it4 = im.crop((38, 4, 47, 16))
# it5 = im.crop((48, 4, 57, 16))
# #it6 = im.crop((51, 3, 57, 11))
# #it7 = im.crop((59, 3, 65, 11))
# multilist = [[0 for col in range(5)] for row in range(3)]
# print '\n'.join(( str(t) for t in multilist))
#profile = CaptchaProfile_360Buy()
#print captcha_360buy(r'c:\6.png')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from coo8.coo8_feature2 import COO8_FEATURES_MAP
import Image
import itertools
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_coo8(CaptchaProfile):
def __init__(self,features_map = COO8_FEATURES_MAP):
super(CaptchaProfile_coo8,self).__init__(features_map)
def __new__(cls,features_map = COO8_FEATURES_MAP):
return super(CaptchaProfile_coo8, cls).__new__(cls,features_map)
def split(self, im,top = 4,bottom = 16):
return super(CaptchaProfile_coo8,self).split(im,top,bottom)
def captcha_coo8(filename):
return captcha(filename, CaptchaProfile_coo8())
def test():
print CaptchaProfile_coo8(r'c:\gp359329,2.png')
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
if __name__ == '__main__':
fileName = os.path.join(testFilePath, "125487,1.png")
print captcha_coo8(fileName)
# it1 = im.crop((3, 4, 13, 16))
# print cia.GetBinaryMap(it1),'\n'
# it2 = im.crop((15,4,24,16))
# print cia.GetBinaryMap(it2)
# print '+++++++++'
# it2 = im.crop((25, 4, 34, 16))
# it3 = im.crop ((36,4,45,16))
# #it3 = im.crop((35, 4, 37, 16))
# it4 = im.crop((38, 4, 47, 16))
# it5 = im.crop((48, 4, 57, 16))
# #it6 = im.crop((51, 3, 57, 11))
# #it7 = im.crop((59, 3, 65, 11))
# multilist = [[0 for col in range(5)] for row in range(3)]
# print '\n'.join(( str(t) for t in multilist))
#profile = CaptchaProfile_360Buy()
#print captcha_360buy(r'c:\6.png')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-1
@author: zhongfeng
'''
from coo8.coo8pageparser import parserDict,coo8Root
from spider import main
if __name__ == '__main__':
main(coo8Root,parserDict)
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
COO8_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
__
##
#_
##
'''
:
'.',
'''
__###_#__
_#____##_
_#_____#_
##_____##
#______#_
##_____##
#______#_
##_____##
#______#_
_#_____##
_##___#__
__###_#__
'''
:
'0',
'''
__###_#__
_#____##_
#______#_
##_____##
#______#_
##_____##
#______#_
##_____##
#______#_
_#_____##
_##___#__
__###_#__
'''
:
'0',
'''
__##__
__#___
#_##__
__#___
__##__
__#___
__##__
__#___
__##__
__#___
__##__
##_###
'''
:
'1',
'''
_####_#__
#_____##_
##_____#_
_______##
_______#_
______#__
_____##__
____#____
___#_#___
__##_____
_#_______
#_#######
'''
:
'2',
'''
###_###__
#_____#__
##_____##
_______#_
______##_
___##_#__
______##_
_______##
________#
##_____##
#_____#__
_#####___
'''
:
'3',
'''
______##__
_____#_#__
____#_##__
___#__#___
__#___##__
_#____#___
#_____##__
#####_#_##
______##__
______#___
______##__
______#___
'''
:
'4',
'''
_###_####
_#_______
_##______
_#_______
_######__
______#__
_______##
_______#_
_______##
##_____#_
#_____#__
_####_#__
'''
:
'5',
'''
___###_#_
__#______
_##______
#________
##_####__
#_____#__
##_____##
#______#_
##_____##
#______#_
_##___#__
__###_#__
'''
:
'6',
'''
###_####_
_______##
______#__
______##_
_____#___
_____##__
____#____
____##___
___#_____
___##____
__#______
_###_____
'''
:
'7',
'''
__###_#__
_#____##_
##_____##
#_______#
_##___##_
__###_#__
_#____##_
##_____#_
#______##
##_____#_
_#____#__
__####___
'''
:
'8',
'''
__###_#__
_#____##_
##_____#_
#______##
##_____#_
#______##
_##____#_
__###__##
_______#_
______##_
_____#___
_###_#___
'''
:
'9',
} | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
COO8_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
__
__
##
#_
'''
:
'.',
'''
__###___
_#_#_##_
_##__##_
##____##
#_____#_
##____##
#_____#_
##____##
#_____#_
_##__#_#
_##_###_
__##_#__
'''
:
'0',
'''
___##
__#_#
_###_
#__##
#__#_
___##
___#_
___##
___#_
___##
___#_
___##
'''
:
'1',
'''
__###___
_#_#_##_
##____##
#_____#_
______##
_____#__
____###_
___#_#__
__##____
_#______
#######_
#_#_#_##
'''
:
'2',
'''
__###_#_
_#_#_###
##____#_
______##
___##_#_
___#_##_
______##
______#_
##____##
#_#___#_
_######_
___#_#__
'''
:
'3',
'''
_____##_
____#_#_
____##__
___#_##_
__##_#__
__#__##_
_#___#__
##___##_
#_###_##
##_#_##_
_____#__
_____##_
'''
:
'4',
'''
_____##_
____#_#_
____##__
___#_##_
__##_#__
___#_##_
_##__#__
#____##_
####_#_#
#_#_####
_____#__
_____##_
'''
:
'4',
'''
_###_##_
_#_###__
_#______
_#______
###_##__
#_###_#_
##____##
______#_
##____##
#_#___#_
_######_
___#_#__
'''
:
'5',
'''
__###_#_
_#_#_###
_##___#_
#_______
##_###__
#_#_#_#_
##____##
#_____#_
##____##
_##___#_
_#_##_#_
__#_##__
'''
:
'6',
'''
###_####
#_###_#_
_____##_
____#___
____##__
___#____
___##___
___#____
___#____
__##____
__#_____
__##____
'''
:
'7',
'''
__####__
_#_#_##_
##____##
#______#
##____##
_#####__
__#_#_#_
##____##
#_____#_
##____##
_####_#_
___#_#__
'''
:
'8',
'''
__###___
_#_#_##_
##___##_
#_____##
##_____#
#_____##
_#####_#
__#_#_##
______#_
##___#__
#_##_##_
_##_##__
'''
:
'9',
'''
__###___
_#_#_##_
##___##_
#_____##
##_____#
#_____##
_#####_#
__#_#_##
______#_
##___#__
#_##_##_
_#_###__
'''
:
'9',
} | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-7
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from cStringIO import StringIO
from coo8.image_price import captcha_coo8
from crawlerhttp import crawleRetries
from pageparser import *
from threadpool import ThreadPool, WorkRequest
import json
import os
import re
import threading
import urllib
from spiderconfigparser import SpiderConfig
coo8Root = ObuyUrlSummary(url=r'http://www.coo8.com/allcatalog/', name='coo8',
isRecursed=True, catagoryLevel=0)
class Coo8AllSortParser(RootCatagoryPageParser):
'''
从http://www.coo8.com/allcatalog/获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.coo8.com/'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8AllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='div', attrs={'class':'cateItems'})
for t in allSort.findAll(name='div', attrs={'class':re.compile('hd.*')}):#一级分类
sort_1 = t.find(name='h2')
name = sort_1['id']
url = ''.join((self.mainHost,name,'/'))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='div',attrs={'class':re.compile('bd.*')})
for tt in sort_2(name='dl'):#二级分类
name = tt.dt.h3.getText()
url = ''.join((self.mainHost, sort_1_urlsum.name, name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt(name='dd'):#三级分类
try:
name, url = ParserUtils.parserTag_A(ttt.a)
except Exception:
continue
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class Coo8Sort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
pricePageNum = 8
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-{}-0-101101'
return '%s%s.%s' % (urlSegs[0].replace('-0-0-0-0',''), pageSeg, urlSegs[1])
def getTotal(self):
regx = u'共([0-9]*)页'
p = re.compile(regx)
pageSeg = self.soup.find(name='div', attrs={'class':'pageInfo'})
if pageSeg is None:
return 1
pageNum = pageSeg.getText()
totalNum = int(p.search(pageNum).group(1))
if totalNum > SpiderConfig.getMaxPage():
totalNum = SpiderConfig.getMaxPage()
return totalNum
def getAdWords(self,prod,prodUrl):
extraIconSeg = prod.find(name ='p',attrs={'class':'text-tag-wrap'})
adWords = ''
if extraIconSeg:
extraMsg = extraIconSeg.getText()
if extraMsg.find(u'返现') != -1 or extraMsg.find(u'赠品') != -1 \
or extraMsg.find(u'返券') != -1 :
sort_5_urlsum = ObuyUrlSummary(url=prodUrl)
result = crawleRetries(urlSum = sort_5_urlsum)
parser = Coo8SortFinalParser(dataStr = result.content,rootUrlSummary=sort_5_urlsum)
adWords = parser.parserPageInfos()
return adWords
def parserPageInfos(self):
resultList = []
plist = self.soup.find(name='div', attrs={'class':'srchContent'})
if plist is None:
#raise Exception("Page Error")
return resultList
try:
pool = ThreadPool(self.pricePageNum)
for li in plist(name='li'):
pNameSeg = li.find(name='p', attrs={'class':'name'}).a
pName = pNameSeg['title']
imgUrlSeg = li.find(name='p',attrs={'class':'pic'}).img
imgUrl = ''
if imgUrlSeg:
imgUrl = imgUrlSeg['src']
pid = pNameSeg['href'].rsplit('/')[-1].split('.')[0]
url = pNameSeg['href']
if url and not url.startswith('http'):
url = ''.join((r'http://www.coo8.com',pNameSeg['href']))
adWords = self.getAdWords(prod = li,prodUrl=url)
priceImgUrl = li.find(name='p', attrs={'class':'price'}).img['src']
prodDetail = ProductDetails(fullUrl=url,productId=pid, adWords=adWords, name=pName,imageUrl=imgUrl)
prodDetail.catagory = self.rootUrlSummary
pimgUrlSumm = ObuyUrlSummary(url = priceImgUrl)
req = WorkRequest(getProductPrice, [pimgUrlSumm, prodDetail, resultList, pool,captcha_coo8], None,
callback=None)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=self.pricePageNum)
return resultList
class Coo8Sort4PageParser(Coo8Sort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8Sort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
class Coo8SortFinalParser(Parser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8SortFinalParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserPageInfos(self):
crashCut = self.getCrashCut()
exGift = self.getCouponAndExGift()
return '@'.join((crashCut,exGift))
def getCrashCut(self):
crashCutSeg = self.soup.find(name = 'span',attrs={'class':'D-fanxian'})
crashCutText = ''
if crashCutSeg:
crashCutText = crashCutSeg.getText()
return crashCutText
def getCouponAndExGift(self):
adSeg = self.soup.find(name = 'dl',attrs = {'id':'zengpin'})
ret = ''
if adSeg:
ret = adSeg.getText()
return ret
parserDict = {0:Coo8AllSortParser, 3:Coo8Sort3PageParser, 4:Coo8Sort4PageParser}
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'coo8_2011-11-07_21-02-49.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.coo8.com/allcatalog/', name='coo8')
firstPage = Coo8AllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
print sort_3.url
def testSort3Page():
fileName = os.path.join(testFilePath, '280-0-0-0-0.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/products/280-0-0-0-0.html', parentPath=[('test')], catagoryLevel=3)
sort3Page = Coo8Sort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath, '280-0-0-0-0.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/products/353-0-0-0-0.html', parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(sort_3_urlsum)
content = result.content
sort3Page = Coo8Sort3PageParser(content, sort_3_urlsum)
for prod in sort3Page.parserPageInfos():
print prod.logstr()
def testSortFinal():
urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/product/159376.html', parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(urlsum)
finalPage = Coo8SortFinalParser(result.content, urlsum)
print finalPage.parserPageInfos()
if __name__ == '__main__':
#testAllSortPage()
#testSort3Page()
#testSort3Details()
#testSortFinal()
s = '@'
print s.split('@')
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-7
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from cStringIO import StringIO
from coo8.image_price import captcha_coo8
from crawlerhttp import crawleRetries
from pageparser import *
from threadpool import ThreadPool, WorkRequest
import json
import os
import re
import threading
import urllib
from spiderconfigparser import SpiderConfig
coo8Root = ObuyUrlSummary(url=r'http://www.coo8.com/allcatalog/', name='coo8',
isRecursed=True, catagoryLevel=0)
class Coo8AllSortParser(RootCatagoryPageParser):
'''
从http://www.coo8.com/allcatalog/获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.coo8.com/'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8AllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='div', attrs={'class':'cateItems'})
for t in allSort.findAll(name='div', attrs={'class':re.compile('hd.*')}):#一级分类
sort_1 = t.find(name='h2')
name = sort_1['id']
url = ''.join((self.mainHost,name,'/'))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='div',attrs={'class':re.compile('bd.*')})
for tt in sort_2(name='dl'):#二级分类
name = tt.dt.h3.getText()
url = ''.join((self.mainHost, sort_1_urlsum.name, name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt(name='dd'):#三级分类
try:
name, url = ParserUtils.parserTag_A(ttt.a)
except Exception:
continue
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class Coo8Sort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
pricePageNum = 8
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-{}-0-101101'
return '%s%s.%s' % (urlSegs[0].replace('-0-0-0-0',''), pageSeg, urlSegs[1])
def getTotal(self):
regx = u'共([0-9]*)页'
p = re.compile(regx)
pageSeg = self.soup.find(name='div', attrs={'class':'pageInfo'})
if pageSeg is None:
return 1
pageNum = pageSeg.getText()
totalNum = int(p.search(pageNum).group(1))
if totalNum > SpiderConfig.getMaxPage():
totalNum = SpiderConfig.getMaxPage()
return totalNum
def getAdWords(self,prod,prodUrl):
extraIconSeg = prod.find(name ='p',attrs={'class':'text-tag-wrap'})
adWords = ''
if extraIconSeg:
extraMsg = extraIconSeg.getText()
if extraMsg.find(u'返现') != -1 or extraMsg.find(u'赠品') != -1 \
or extraMsg.find(u'返券') != -1 :
sort_5_urlsum = ObuyUrlSummary(url=prodUrl)
result = crawleRetries(urlSum = sort_5_urlsum)
parser = Coo8SortFinalParser(dataStr = result.content,rootUrlSummary=sort_5_urlsum)
adWords = parser.parserPageInfos()
return adWords
def parserPageInfos(self):
resultList = []
plist = self.soup.find(name='div', attrs={'class':'srchContent'})
if plist is None:
#raise Exception("Page Error")
return resultList
try:
pool = ThreadPool(self.pricePageNum)
for li in plist(name='li'):
pNameSeg = li.find(name='p', attrs={'class':'name'}).a
pName = pNameSeg['title']
imgUrlSeg = li.find(name='p',attrs={'class':'pic'}).img
imgUrl = ''
if imgUrlSeg:
imgUrl = imgUrlSeg['src']
pid = pNameSeg['href'].rsplit('/')[-1].split('.')[0]
url = pNameSeg['href']
if url and not url.startswith('http'):
url = ''.join((r'http://www.coo8.com',pNameSeg['href']))
adWords = self.getAdWords(prod = li,prodUrl=url)
priceImgUrl = li.find(name='p', attrs={'class':'price'}).img['src']
prodDetail = ProductDetails(fullUrl=url,productId=pid, adWords=adWords, name=pName,imageUrl=imgUrl)
prodDetail.catagory = self.rootUrlSummary
pimgUrlSumm = ObuyUrlSummary(url = priceImgUrl)
req = WorkRequest(getProductPrice, [pimgUrlSumm, prodDetail, resultList, pool,captcha_coo8], None,
callback=None)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=self.pricePageNum)
return resultList
class Coo8Sort4PageParser(Coo8Sort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8Sort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
class Coo8SortFinalParser(Parser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8SortFinalParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserPageInfos(self):
crashCut = self.getCrashCut()
exGift = self.getCouponAndExGift()
return '@'.join((crashCut,exGift))
def getCrashCut(self):
crashCutSeg = self.soup.find(name = 'span',attrs={'class':'D-fanxian'})
crashCutText = ''
if crashCutSeg:
crashCutText = crashCutSeg.getText()
return crashCutText
def getCouponAndExGift(self):
adSeg = self.soup.find(name = 'dl',attrs = {'id':'zengpin'})
ret = ''
if adSeg:
ret = adSeg.getText()
return ret
parserDict = {0:Coo8AllSortParser, 3:Coo8Sort3PageParser, 4:Coo8Sort4PageParser}
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'coo8_2011-11-07_21-02-49.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.coo8.com/allcatalog/', name='coo8')
firstPage = Coo8AllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
print sort_3.url
def testSort3Page():
fileName = os.path.join(testFilePath, '280-0-0-0-0.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/products/280-0-0-0-0.html', parentPath=[('test')], catagoryLevel=3)
sort3Page = Coo8Sort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath, '280-0-0-0-0.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/products/353-0-0-0-0.html', parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(sort_3_urlsum)
content = result.content
sort3Page = Coo8Sort3PageParser(content, sort_3_urlsum)
for prod in sort3Page.parserPageInfos():
print prod.logstr()
def testSortFinal():
urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/product/159376.html', parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(urlsum)
finalPage = Coo8SortFinalParser(result.content, urlsum)
print finalPage.parserPageInfos()
if __name__ == '__main__':
#testAllSortPage()
#testSort3Page()
#testSort3Details()
#testSortFinal()
s = '@'
print s.split('@')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-1
@author: zhongfeng
'''
from coo8.coo8pageparser import parserDict,coo8Root
from spider import main
if __name__ == '__main__':
main(coo8Root,parserDict)
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-9-19
@author: zhongfeng
'''
import sys,os
from dbproc.basedbproc import *
def createSiteCat(urls, siteName, catKeyFunc=getCatKey,saveFlag = True):
siteId = getSiteIdByName(siteName)
for urlsum in urls:
parent = urlsum.parentPath
path = []
for pUrl in parent:
rawCatId = catKeyFunc(pUrl.url)
if pUrl.catagoryLevel == 0:
id0 = getCatIdFromRawInfo(siteId, catUrl=pUrl.url)
if id0 is None :
if saveFlag:
id0 = saveProdCat(rawCatId=rawCatId, siteId=siteId, parentId=0, url=pUrl.url,
name=pUrl.name, parentPath=[0], level=pUrl.catagoryLevel)
print 'new cat :id is id0 %s,name:%s,url:%s,level:%s' % (id0,pUrl.name,pUrl.url,pUrl.catagoryLevel)
else:
print 'new cat :name:%s,url:%s,level:%s' % (pUrl.name,pUrl.url,pUrl.catagoryLevel)
path.append(id0)
elif pUrl.catagoryLevel == 1:
id1 = getCatIdFromRawInfo(siteId, catUrl=pUrl.url)
if id1 is None:
if saveFlag:
parentPath1 = [id0]
id1 = saveProdCat(rawCatId=rawCatId, siteId=siteId, parentId=id0, url=pUrl.url,
name=pUrl.name, parentPath=parentPath1, level=pUrl.catagoryLevel)
print 'new cat :id is id1 %s,name:%s,url:%s,level:%s' % (id1,pUrl.name,pUrl.url,pUrl.catagoryLevel)
else:
print 'new cat :name:%s,url:%s,level:%s' % (pUrl.name,pUrl.url,pUrl.catagoryLevel)
path.append(id1)
elif pUrl.catagoryLevel == 2:
id2 = getCatIdFromRawInfo(siteId, catUrl=pUrl.url)
if id2 is None:
if saveFlag:
parentPath2 = [id0, id1]
id2 = saveProdCat(rawCatId=rawCatId, siteId=siteId, parentId=id1, url=pUrl.url,
name=pUrl.name, parentPath=parentPath2, level=pUrl.catagoryLevel)
print 'new cat :id is id0 %s,name:%s,url:%s,level:%s' % (id2,pUrl.name,pUrl.url,pUrl.catagoryLevel)
else:
print 'new cat :name:%s,url:%s,level:%s' % (pUrl.name,pUrl.url,pUrl.catagoryLevel)
path.append(id2)
rawCatId = catKeyFunc(urlsum.url)
id3 = getCatIdFromRawInfo(siteId, catUrl=urlsum.url)
if id3 is None:
if saveFlag:
parentPath3 = path
id3 = saveProdCat(rawCatId=rawCatId, siteId=siteId, parentId=parentPath3[-1], url=urlsum.url,
name=urlsum.name, parentPath=parentPath3, level=urlsum.catagoryLevel)
print 'new cat :id is id0 %s,name:%s,url:%s,level:%s' % (id3,urlsum.name,urlsum.ur,urlsum.catagoryLevel)
else:
print 'new cat :name:%s,url:%s,level:%s' % (urlsum.name,urlsum.url,urlsum.catagoryLevel)
def getAllCatUrlSums(rootUrlSum, ParserClass, content = None,include = None,exclude = None):
if content is None:
from crawlerhttp import crawle
while True:
result = crawle(rootUrlSum)
if result.code == 200:
break
content = result.content
firstPage = ParserClass(content, rootUrlSum,include,exclude)
urlSums = firstPage.parserSubUrlSums()
return urlSums
def calEveryLevelCatNum(urlSums):
s0 = set()
s1 = set()
s2 = set()
s3 = set()
for sort_3 in urlSums:
print sort_3.name,sort_3.url
print seEncode(match55bigoCats(8,sort_3.name))
parentPath = sort_3.parentPath
s0.add(parentPath[0].url)
s1.add(parentPath[1].url)
if len(parentPath) > 2:
s2.add(parentPath[2].url)
s3.add(sort_3.url)
sa = set()
import itertools
for t in itertools.chain(s0, s1, s2, s3):
sa.add(str(getMd5Key(t))[0:16])
print len(sa)
print len(s0), len(s1), len(s2), len(s3)
def testSiteCat(rootUrlSum,ParserClass,content = None,updateDb = False):
urlSums = getAllCatUrlSums(rootUrlSum, ParserClass, content)
calEveryLevelCatNum(urlSums)
if updateDb:
createSiteCat(urlSums,rootUrlSum.name)
def preProcCats(rootUrlSum,ParserClass,content = None):
urlSums = getAllCatUrlSums(rootUrlSum, ParserClass, content)#获取网站当前的所有分类列表
siteId = getSiteIdByName(rootUrlSum.name)
noMatch = []
unDecided = []
for sort_3 in urlSums:
id3 = getCatIdFromRawInfo(siteId, catUrl=sort_3.url)
if id3 is not None:
continue
retArr = match55bigoCats(siteId,sort_3.name)
if len(retArr) == 0:
noMatch.append( '|'.join((sort_3.name,sort_3.url)))
continue
auRet = []
for t in retArr:
id,site_id,self_cat_id,name,url ,cat_base_id = t
flag = 0
if site_id == siteId:
if url == sort_3.url:
flag = 1
print '|'.join((sort_3.name,sort_3.url,name,str(id),str(flag),str(site_id),str(self_cat_id),str(cat_base_id)))
break
elif name == sort_3.name:
print '|'.join((sort_3.name,sort_3.url,name,str(id),str(flag),str(site_id),str(self_cat_id),str(cat_base_id)))
break
else:
auRet.append( '(%s,%s)' % (str(self_cat_id),name))
if len(auRet) > 0:
unDecided.append( '|'.join((sort_3.name,sort_3.url,name,str(id),str(flag),str(site_id),
str(self_cat_id),str(cat_base_id), seEncode(auRet))))
for newCat in noMatch:
print newCat
for unDeCat in unDecided:
print unDeCat
#
from pageparser import getParser
def __buildCatagory(parserDict,root,content = None):
parserClass = getParser(0, parserDict)
preProcCats(root, parserClass,None)
def build360BuyCat():
from j360buy.j360pageparser import parserDict,j360buyRoot
__buildCatagory(parserDict,j360buyRoot)
def buildEfeihuCat():
from efeihu.efeihupageparser import parserDict,efeihuRoot
__buildCatagory(parserDict,efeihuRoot)
def buildLusenCat():
from lusen.lusenpageparser import parserDict,lusenRoot
__buildCatagory(parserDict,lusenRoot)
def buildGomeCat():
from gome.gomepageparser import parserDict,gomeRoot
__buildCatagory(parserDict,gomeRoot)
def buildDangDangCat():
from dangdang.dangpageparser import parserDict,dangdangRoot
__buildCatagory(parserDict,dangdangRoot)
def buildNewEggCat():
from newegg.neweggpageparser import parserDict,newEggRoot
__buildCatagory(parserDict,newEggRoot)
def buildSuningCat():
from suning.suningparser import parserDict,sunningRoot
__buildCatagory(parserDict,sunningRoot)
def buildIcsonCat():
from icson.icsonpageparser import parserDict,icsonRoot
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'portal.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
__buildCatagory(parserDict,icsonRoot,content)
def buildCoo8Cat():
from coo8.coo8pageparser import parserDict,coo8Root
#===========================================================================
# curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
# fileName = os.path.join(curPath, 'coo8_allcat.htm')
# with open(fileName, 'r') as fInput:
# content = fInput.read()
#===========================================================================
__buildCatagory(parserDict,coo8Root)
def buildAmazonCat():
from amazon.amazonpageparser import rootUrlSummary,parserDict
from pageparser import ObuyUrlSummary
parserClass = getParser(0, parserDict)
include = [ObuyUrlSummary(name=name) for name in [u'home-appliances']]
exclude = [ObuyUrlSummary(name=name) for name in [u'video', u'aps', u'stripbooks', u'music', u'apparel', u'electronics', u'audio-visual-education']]
urlSumsSort1 = getAllCatUrlSums(rootUrlSum=rootUrlSummary,ParserClass=parserClass,exclude=exclude)
ret = []
for sort1 in urlSumsSort1:
print sort1.url
parserClass1 = getParser(1, parserDict)
urlSumsSort2 = getAllCatUrlSums(sort1, parserClass1)
ret.extend(urlSumsSort2)
calEveryLevelCatNum(ret)
createSiteCat(ret,rootUrlSummary.name)
def buildAmazonCat_New():
from amazon.amazonpageparser import rootUrlSummary,parserDict
from pageparser import ObuyUrlSummary
parserClass = getParser(0, parserDict)
include = [ObuyUrlSummary(name=name) for name in [u'appliances',u'communications',u'audio-visual',u'computers',u'office-products',
u'home-appliances',u'photo-video',u'music-players',u'automotive',u'software']]
urlSumsSort1 = getAllCatUrlSums(rootUrlSum=rootUrlSummary,ParserClass=parserClass,include=include)
ret = []
for sort1 in urlSumsSort1:
print sort1.name
parserClass1 = getParser(1, parserDict)
urlSumsSort2 = getAllCatUrlSums(sort1, parserClass1)
for sort2 in urlSumsSort2:
print ' %s' % sort2.name
parserClass2 = getParser(2, parserDict)
urlSumsSort3 = getAllCatUrlSums(sort2, parserClass2)
if not urlSumsSort3:
sort2.catagoryLevel = 3
ret.append(sort2)
else:
for sort3 in urlSumsSort3:
print ' %s' % sort3.name
ret.extend(urlSumsSort3)
calEveryLevelCatNum(ret)
createSiteCat(ret,rootUrlSummary.name)
def buildAllCat():
build360BuyCat()
buildGomeCat()
buildDangDangCat()
buildNewEggCat()
buildSuningCat()
buildIcsonCat()
buildCoo8Cat()
if __name__ == '__main__':
#from gome.gomepageparser import parserDict,gomeRoot
#parserClass = getParser(0, parserDict)
#urlSums = getAllCatUrlSums(rootUrlSum = gomeRoot, ParserClass=parserClass, content = None)
#createSiteCat(urls = urlSums, siteName = gomeRoot.name, saveFlag = False)
#build360BuyCat()
#buildGomeCat()
#buildSuningCat()
#buildAmazonCat_New()
#buildEfeihuCat()
#buildLusenCat()
buildCoo8Cat()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys
import glob
from dbproc.basedbproc import getConnect,initClientEncode
#===============================================================================
# 表 `prod_catagory` db proc
#===============================================================================
def updateProdCat(conn,selfCatId,catBaseId,id):
''' 更新各站点三级分类的对应的自有分类和所属大类别信息 '''
curs = initClientEncode(conn)
sqlStr = '''UPDATE `prod_catagory` SET `self_cat_id` = %s,
`cat_base_id` = %s WHERE `prod_catagory`.`id` =%s '''
param = (selfCatId,catBaseId,id)
curs.execute(sqlStr, param)
curs.close()
def seEncode(ustr, encoding='utf-8'):
'''负责把入数据库的字符串,转化成utf-8编码'''
if ustr is None:
return ''
if isinstance(ustr, unicode):
return ustr.encode(encoding, 'ignore')
elif isinstance(ustr, (list,tuple,set)):
return '%s' % ','.join([seEncode(s,encoding) for s in ustr])
return str(ustr)
def exportSiteProdCat(site_id,level=3):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT name, url, LEVEL FROM `prod_catagory` WHERE site_id =%s AND LEVEL =%s '
param = (site_id,level)
curs.execute(sqlStr,param)
result = curs.fetchall()
curs.close()
conn.close()
return result
def buildCatDbMap():#有一个问题没有解决,对应的level 1 与level 2没有对应上分类
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
catBaseId = 1
conn = getConnect()
for f in glob.glob1(curPath, 'lusen.3C'):
print f
for line in file(f):
line = line.strip()
if line == '':
continue
ret = line.split('|')
try:
selfCatId = int(ret[-1])
except ValueError:
selfCatId = 0
catBaseId = 0
else:
catBaseId = 1
id = ret[-2]
print selfCatId, catBaseId, id
updateProdCat(conn, selfCatId, catBaseId, id)
conn.close()
if __name__ == '__main__':
#buildCatDbMap()
f = file(r"c:\amazon.cat",'w')
for t in exportSiteProdCat(site_id=1):
s = seEncode(t)
f.write(s)
f.write(os.linesep)
f.close() | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys
import glob
from dbproc.basedbproc import getConnect,initClientEncode
#===============================================================================
# 表 `prod_catagory` db proc
#===============================================================================
def updateProdCat(conn,selfCatId,catBaseId,id):
''' 更新各站点三级分类的对应的自有分类和所属大类别信息 '''
curs = initClientEncode(conn)
sqlStr = '''UPDATE `prod_catagory` SET `self_cat_id` = %s,
`cat_base_id` = %s WHERE `prod_catagory`.`id` =%s '''
param = (selfCatId,catBaseId,id)
curs.execute(sqlStr, param)
curs.close()
def seEncode(ustr, encoding='utf-8'):
'''负责把入数据库的字符串,转化成utf-8编码'''
if ustr is None:
return ''
if isinstance(ustr, unicode):
return ustr.encode(encoding, 'ignore')
elif isinstance(ustr, (list,tuple,set)):
return '%s' % ','.join([seEncode(s,encoding) for s in ustr])
return str(ustr)
def exportSiteProdCat(site_id,level=3):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT name, url, LEVEL FROM `prod_catagory` WHERE site_id =%s AND LEVEL =%s '
param = (site_id,level)
curs.execute(sqlStr,param)
result = curs.fetchall()
curs.close()
conn.close()
return result
def buildCatDbMap():#有一个问题没有解决,对应的level 1 与level 2没有对应上分类
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
catBaseId = 1
conn = getConnect()
for f in glob.glob1(curPath, 'lusen.3C'):
print f
for line in file(f):
line = line.strip()
if line == '':
continue
ret = line.split('|')
try:
selfCatId = int(ret[-1])
except ValueError:
selfCatId = 0
catBaseId = 0
else:
catBaseId = 1
id = ret[-2]
print selfCatId, catBaseId, id
updateProdCat(conn, selfCatId, catBaseId, id)
conn.close()
if __name__ == '__main__':
#buildCatDbMap()
f = file(r"c:\amazon.cat",'w')
for t in exportSiteProdCat(site_id=1):
s = seEncode(t)
f.write(s)
f.write(os.linesep)
f.close() | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-11
@author: zhongfeng
'''
from itertools import chain
import os
from dbproc.basedbproc import getCatBySiteIdAndLevel,getAllWebsiteBaseInfo,seEncode
def createSiteCat(site_id):
catDict = {}
for level in xrange(0,3):
catLevelRet = getCatBySiteIdAndLevel(site_id, level)
catDict.update(((int(it[0]),it[1]) for it in catLevelRet))
finalLevelRet = getCatBySiteIdAndLevel(site_id, 3)
result = []
for t in finalLevelRet:
tr = [seEncode(it, encoding='gb18030') for it in chain([(pId,catDict[int(pId)]) for pId in eval(t[-1])],(t[1],t[0],os.linesep))]
result.append( '|'.join([s.replace('|',' ') for s in tr]))
#result.append(os.linesep)
return result
if __name__ == '__main__':
baseDir = r'c:\catinfo'
if not os.path.exists(baseDir):
os.makedirs(baseDir)
for en_name,id in getAllWebsiteBaseInfo():
print en_name,id
fName = os.path.join(baseDir,en_name)
result = createSiteCat(id)
with open(fName,'w') as output:
output.writelines(result)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-11
@author: zhongfeng
'''
from itertools import chain
import os
from dbproc.basedbproc import getCatBySiteIdAndLevel,getAllWebsiteBaseInfo,seEncode
def createSiteCat(site_id):
catDict = {}
for level in xrange(0,3):
catLevelRet = getCatBySiteIdAndLevel(site_id, level)
catDict.update(((int(it[0]),it[1]) for it in catLevelRet))
finalLevelRet = getCatBySiteIdAndLevel(site_id, 3)
result = []
for t in finalLevelRet:
tr = [seEncode(it, encoding='gb18030') for it in chain([(pId,catDict[int(pId)]) for pId in eval(t[-1])],(t[1],t[0],os.linesep))]
result.append( '|'.join([s.replace('|',' ') for s in tr]))
#result.append(os.linesep)
return result
if __name__ == '__main__':
baseDir = r'c:\catinfo'
if not os.path.exists(baseDir):
os.makedirs(baseDir)
for en_name,id in getAllWebsiteBaseInfo():
print en_name,id
fName = os.path.join(baseDir,en_name)
result = createSiteCat(id)
with open(fName,'w') as output:
output.writelines(result)
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import ez_setup
import shutil, sys, os, glob
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import py2exe
curpath = os.path.dirname(os.path.abspath(__file__))
def find_py_modules():
fileList = (os.path.split(full)[-1] for full in glob.glob(os.path.join(curpath, r'src/*.py')))
return [os.path.splitext(fileName)[0] for fileName in fileList]
def find_data_files(base_dir, files=['logging.conf', 'urls.cfg', 'spider.conf']):
data_files = []
for fAbsPath in (os.path.join(curpath,base_dir ,f) for f in files):
if os.path.exists(fAbsPath):
data_files.append(fAbsPath)
return data_files
def singleSetUp(site_name):
dist_dir = 'dist/%s' % site_name
if os.path.isdir(dist_dir): # 删除上次的生成结果
print 'rm %s dist_dir first' % dist_dir
shutil.rmtree(dist_dir)
bDir = 'src/%s' % site_name
setup(
name = "%sspider" % site_name,
cmdclass = {'py2exe': py2exe.build_exe.py2exe},
version = '1.0',
packages = find_packages(bDir),# include all packages under src
package_dir = {'':'src'}, # tell distutils packages are under src
py_modules = find_py_modules(),
console = ['src/%s/%sspider.py' % (site_name,site_name)],
zip_safe=True,
#test_suite = "test.test_enum.suite",
package_data={'': ["*.*"],},
options={'py2exe': {'optimize': 2,
'compressed': True,
'dist_dir': dist_dir, } },
#data_files = find_data_files(bDir),
# installed or upgraded on the target machine
install_requires=['chardet', 'enum', 'BeautifulSoup', 'threadpool'],
# PyPI metadata
# metadata for upload to PyPI
author="zhongfeng",
author_email="fzhong@travelsky.com",
description="21obuys Package",
license="PSF",
keywords="crawlers",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU General Public License (GPL)",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
],
)
class BuildSpiderExe(object):
def __init__(self):
self.sites=['amazon','coo8','dangdang','gome','icson','j360buy',
'newegg','suning','efeihu','lusen']
#self.sites = ['j360buy']
def run(self):
for site_name in self.sites:
singleSetUp(site_name)
if os.path.isdir('build'): # 清除build文件夹
shutil.rmtree('build')
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.argv.append('py2exe')
BuildSpiderExe().run()
| Python |
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c3"
DEFAULT_URL = "http://cheeseshop.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
}
import sys, os
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
try:
import setuptools
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
except ImportError:
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
import pkg_resources
try:
pkg_resources.require("setuptools>="+version)
except pkg_resources.VersionConflict, e:
# XXX could we install in a subprocess here?
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first.\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
# tell the user to uninstall obsolete version
use_setuptools(version)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import ez_setup
import shutil, sys, os, glob
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import py2exe
curpath = os.path.dirname(os.path.abspath(__file__))
def find_py_modules():
fileList = (os.path.split(full)[-1] for full in glob.glob(os.path.join(curpath, r'src/*.py')))
return [os.path.splitext(fileName)[0] for fileName in fileList]
def find_data_files(base_dir, files=['logging.conf', 'urls.cfg', 'spider.conf']):
data_files = []
for fAbsPath in (os.path.join(curpath,base_dir ,f) for f in files):
if os.path.exists(fAbsPath):
data_files.append(fAbsPath)
return data_files
def singleSetUp(site_name):
dist_dir = 'dist/%s' % site_name
if os.path.isdir(dist_dir): # 删除上次的生成结果
print 'rm %s dist_dir first' % dist_dir
shutil.rmtree(dist_dir)
bDir = 'src/%s' % site_name
setup(
name = "%sspider" % site_name,
cmdclass = {'py2exe': py2exe.build_exe.py2exe},
version = '1.0',
packages = find_packages(bDir),# include all packages under src
package_dir = {'':'src'}, # tell distutils packages are under src
py_modules = find_py_modules(),
console = ['src/%s/%sspider.py' % (site_name,site_name)],
zip_safe=True,
#test_suite = "test.test_enum.suite",
package_data={'': ["*.*"],},
options={'py2exe': {'optimize': 2,
'compressed': True,
'dist_dir': dist_dir, } },
#data_files = find_data_files(bDir),
# installed or upgraded on the target machine
install_requires=['chardet', 'enum', 'BeautifulSoup', 'threadpool'],
# PyPI metadata
# metadata for upload to PyPI
author="zhongfeng",
author_email="fzhong@travelsky.com",
description="21obuys Package",
license="PSF",
keywords="crawlers",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU General Public License (GPL)",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
],
)
class BuildSpiderExe(object):
def __init__(self):
self.sites=['amazon','coo8','dangdang','gome','icson','j360buy',
'newegg','suning','efeihu','lusen']
#self.sites = ['j360buy']
def run(self):
for site_name in self.sites:
singleSetUp(site_name)
if os.path.isdir('build'): # 清除build文件夹
shutil.rmtree('build')
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.argv.append('py2exe')
BuildSpiderExe().run()
| Python |
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c3"
DEFAULT_URL = "http://cheeseshop.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
}
import sys, os
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
try:
import setuptools
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
except ImportError:
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
import pkg_resources
try:
pkg_resources.require("setuptools>="+version)
except pkg_resources.VersionConflict, e:
# XXX could we install in a subprocess here?
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first.\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
# tell the user to uninstall obsolete version
use_setuptools(version)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from j360buy.j360_feature import __360buy_FEATURES_MAP__
import Image
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_360Buy(CaptchaProfile):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.__catagory_FEATURES_MAP__ = dict([(feature_to_data(key),value) for key,value in __360buy_FEATURES_MAP__.iteritems()])
cls._inst = super(CaptchaProfile_360Buy, cls).__new__(cls)
return cls._inst
def filter(self, im):
return im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
def split(self, im):
matrix = {(48,12) : [(15, 3, 21, 11), (23, 3, 25, 11),(27,3,33,11),(35,3,41,11)],
(52,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,33,11),(35,3,41,11),(43,3,49,11)],
(65,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,41,11),(43,3,49,11),(51,3,57,11)],
(75,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,45,11),(47,3,49,11),(51,3,57,11),(59, 3, 65, 11)],
(80,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,45,11),(47,3,53,11),(55,3,57,11),(59, 3, 65, 11),(67,3,73,11)]
}
return [im.crop(box) for box in matrix[im.size]]
def match(self, im):
imageData = feature_to_data(CaptchaImageAlgorithm.GetBinaryMap(im))
result = self.__catagory_FEATURES_MAP__.get(imageData,None)
if result != None:
return result
print CaptchaImageAlgorithm.GetBinaryMap(im)
source = im.getdata()
algorithm = CaptchaAlgorithm()
minimal = min(__360buy_FEATURES_MAP__, key=lambda feature:algorithm.LevenshteinDistance(source, feature_to_data(feature)))
#print minimal
return __360buy_FEATURES_MAP__[minimal]
def captcha_360buy(filename):
return captcha(filename, CaptchaProfile_360Buy())
def test():
print captcha_360buy(r'c:\gp359329,2.png')
if __name__ == '__main__':
im = Image.open(r'c:\1.png')
im2 = Image.open(r'c:\1.png')
diff = ImageChops.difference(im, im2)
im = im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
dt = im.getdata()
print im.size
it1 = im.crop((15, 3, 21, 11))
it2 = im.crop((23, 3, 29, 11))
it3 = im.crop((31, 3, 37, 11))
it4 = im.crop((39, 3, 45, 11))
it5 = im.crop((47, 3, 49, 11))
it6 = im.crop((51, 3, 57, 11))
it7 = im.crop((59, 3, 65, 11))
cia = CaptchaImageAlgorithm()
s7 = cia.GetBinaryMap(it1)
print s7
profile = CaptchaProfile_360Buy()
print '+++++++++++++++++++++++++++'
for t in range(100):
print captcha_360buy(r'c:\5.png')
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from cStringIO import StringIO
from j360buy.image_price import captcha_360buy
from crawlerhttp import crawle
from pageparser import *
from threadpool import ThreadPool, WorkRequest
import json
import os
import re
import threading
import urllib
class J360buyAllSortParser(RootCatagoryPageParser):
'''
从http://www.360buy.com/allSort.aspx获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.360buy.com'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(J360buyAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='div', attrs={'id':'allsort'})
for t in allSort.findAll(name='div', attrs={'id':re.compile('JDS_[0-9]+')}):#一级分类
sort_1 = t.find(name='div', attrs={'class':'mt'})
name, url = ParserUtils.parserTag_A(sort_1.h2.a)
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(name='div', attrs={'class':'mc'})
for tt in sort_2(name='dl'):#二级分类
name, url = ParserUtils.parserTag_A(tt.dt.a)
url = ''.join((self.mainHost, url))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.dd(name='em'):#三级分类
name, url = ParserUtils.parserTag_A(ttt.a)
url = ''.join((self.mainHost, '/', url))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class J360buySort3PageParser(Sort3PageParser):
'''
360Buy三级页面解析类
'''
pricePageNum = 8
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(J360buySort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-0-0-0-0-1-1-{}'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def getTotal(self):
pageSeg = self.soup.find(name='div', attrs={'id':'filter'}).find(attrs={'class':'pagin pagin-m fr'})
totalPage = int(pageSeg.span.string.split('/')[-1])
return totalPage
def __getAdWords(self, plist):
adQueryDict = eval(re.compile(r'{.*}').search(str(plist.script)).group())
baseUrl = 'http://www.360buy.com/JdService.aspx?callback=GetJdwsmentsCallback&action=GetJdwsment'
url = '&'.join((baseUrl, urllib.urlencode(adQueryDict)))
result = crawle(url)
ct = re.compile(r'{.*}').search(result.content)
if ct is None:
return []
jObj = json.loads(ct.group())
return jObj['html']
def parserPageInfos(self):
def getProductPrice(*req):
priceImgUrl = req[0]
result = crawle(priceImgUrl)
proc_normal_result(req, result)
print 'Get price:%s' % priceImgUrl
return result
def proc_normal_result(req, result):
args = req
if result.code == 200:
prodDetail = args[1]
resultList = args[2]
prodDetail.privPrice = captcha_360buy(StringIO(result.content))
resultList.append(prodDetail)
else:
print args[0]
resultList = []
plist = self.soup.find(name='div', attrs={'id':'plist'})
if plist is None:
raise Exception("Page Error")
return resultList
try:
pool = ThreadPool(self.pricePageNum)
pid_ad = dict([[int(wa['Wid']), wa['AdTitle']] for wa in self.__getAdWords(plist)])
for li in plist(name='li', attrs={'sku':re.compile('[0-9]+')}):
pid = int(li['sku'])
pName = li.find(name='div', attrs={'class':'p-name'}).a.getText()
priceImgUrl = li.find(name='div', attrs={'class':'p-price'}).img['src']
adWords = pid_ad.get(pid, '')
prodDetail = ProductDetails(productId=pid, name=pName, adWords=adWords)
req = WorkRequest(getProductPrice, [priceImgUrl, prodDetail, resultList, pool], None,
callback=None)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=self.pricePageNum)
return resultList
class J360buySort4PageParser(J360buySort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(J360buySort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def test360BuyAllSortPage():
fileName = os.path.join(testFilePath, 'allSort.aspx')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.360buy.com/allSort.aspx', name='360buy')
firstPage = J360buyAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
print '/'.join(sort_3.getSavePathL())
print sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath, '360buy_2011-08-15_12-26-01.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.360buy.com/products/737-794-870.html', parentPath=[('test')], catagoryLevel=3)
sort3Page = J360buySort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4
def testSort3Details():
fileName = os.path.join(testFilePath, '360buy_2011-08-15_12-26-01.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.360buy.com/products/737-794-870.html', parentPath=[('test')], catagoryLevel=3)
sort3Page = J360buySort3PageParser(content, sort_3_urlsum)
sort3Page.parserPageInfos()
if __name__ == '__main__':
#test360BuyAllSortPage()
#testSort3Page()
testSort3Details()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
__360buy_FEATURES_MAP__ = {
'''
__
__
__
__
__
__
##
##
'''
:
'.',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
#___##
_###__
'''
:
'0',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
##__##
_###__
'''
:
'0',
'''
__##__
_#_#__
__##__
__##__
__#___
__##__
__#___
_####_
'''
:
'1',
'''
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
__##__
____##
____##
##__#_
_####_
'''
:
'3',
'''
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
____#_
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
_#####
_#____
_##___
_#_##_
____##
____#_
##__##
_###__
'''
:
'5',
'''
__###_
_##___
#_____
#####_
#___##
##__#_
#___##
_###__
'''
:
'6',
'''
######
____#_
___##_
___#__
__##__
__#___
_##___
_#____
'''
:
'7',
'''
_####_
##__##
#___#_
_###__
##__##
##__##
#___#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
_###__
##__##
#___##
##__#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
##__##
_##_##
____##
___#__
_###__
'''
:
'9',
'''
_####_
##__##
#___#_
##__##
_###_#
____##
___#__
_###__
'''
:
'9',
} | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from j360buy.j360_feature import __360buy_FEATURES_MAP__
import Image
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_360Buy(CaptchaProfile):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.__catagory_FEATURES_MAP__ = dict([(feature_to_data(key),value) for key,value in __360buy_FEATURES_MAP__.iteritems()])
cls._inst = super(CaptchaProfile_360Buy, cls).__new__(cls)
return cls._inst
def filter(self, im):
return im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
def split(self, im):
matrix = {(48,12) : [(15, 3, 21, 11), (23, 3, 25, 11),(27,3,33,11),(35,3,41,11)],
(52,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,33,11),(35,3,41,11),(43,3,49,11)],
(65,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,41,11),(43,3,49,11),(51,3,57,11)],
(75,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,45,11),(47,3,49,11),(51,3,57,11),(59, 3, 65, 11)],
(80,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,45,11),(47,3,53,11),(55,3,57,11),(59, 3, 65, 11),(67,3,73,11)]
}
return [im.crop(box) for box in matrix[im.size]]
def match(self, im):
imageData = feature_to_data(CaptchaImageAlgorithm.GetBinaryMap(im))
result = self.__catagory_FEATURES_MAP__.get(imageData,None)
if result != None:
return result
print CaptchaImageAlgorithm.GetBinaryMap(im)
source = im.getdata()
algorithm = CaptchaAlgorithm()
minimal = min(__360buy_FEATURES_MAP__, key=lambda feature:algorithm.LevenshteinDistance(source, feature_to_data(feature)))
#print minimal
return __360buy_FEATURES_MAP__[minimal]
def captcha_360buy(filename):
return captcha(filename, CaptchaProfile_360Buy())
def test():
print captcha_360buy(r'c:\gp359329,2.png')
if __name__ == '__main__':
im = Image.open(r'c:\1.png')
im2 = Image.open(r'c:\1.png')
diff = ImageChops.difference(im, im2)
im = im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
dt = im.getdata()
print im.size
it1 = im.crop((15, 3, 21, 11))
it2 = im.crop((23, 3, 29, 11))
it3 = im.crop((31, 3, 37, 11))
it4 = im.crop((39, 3, 45, 11))
it5 = im.crop((47, 3, 49, 11))
it6 = im.crop((51, 3, 57, 11))
it7 = im.crop((59, 3, 65, 11))
cia = CaptchaImageAlgorithm()
s7 = cia.GetBinaryMap(it1)
print s7
profile = CaptchaProfile_360Buy()
print '+++++++++++++++++++++++++++'
for t in range(100):
print captcha_360buy(r'c:\5.png')
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-1
@author: zhongfeng
'''
from j360pageparser import J360buyAllSortParser,J360buySort3PageParser,J360buySort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:J360buyAllSortParser,3:J360buySort3PageParser,4:J360buySort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://www.360buy.com/products/737-964-795.html',name='360buy',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.360buy.com/products/652-653-659-0-0-0-0-0-0-0-1-1-2.html',
name='digital',isRecursed = False,catagoryLevel = 4)
j360buyRoot = ObuyUrlSummary(url = r'http://www.360buy.com/allSort.aspx',name='360buy',
isRecursed = True,catagoryLevel = 0)
pcare = ObuyUrlSummary(url = r'http://www.360buy.com/products/652-653-000.html',
name='手机',isRecursed = False,catagoryLevel = 2)
pdigital = ObuyUrlSummary(url = r'http://www.360buy.com/digital.html',name='digital',catagoryLevel = 1)
pelectronic = ObuyUrlSummary(url = r'http://www.360buy.com/electronic.html',name='electronic',catagoryLevel = 1)
pcomputer = ObuyUrlSummary(url = r'http://www.360buy.com/computer.html',name='computer',catagoryLevel = 1)
includes = [pelectronic,pdigital,pcomputer]
spider = ObuySpider(rootUrlSummary = j360buyRoot,parserDict = parserDict,include = includes,exclude = None,threadNum = 5)
spider.spide()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
__360buy_FEATURES_MAP__ = {
'''
__
__
__
__
__
__
##
##
'''
:
'.',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
#___##
_###__
'''
:
'0',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
##__##
_###__
'''
:
'0',
'''
__##__
_#_#__
__##__
__##__
__#___
__##__
__#___
_####_
'''
:
'1',
'''
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
__##__
____##
____##
##__#_
_####_
'''
:
'3',
'''
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
____#_
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
_#####
_#____
_##___
_#_##_
____##
____#_
##__##
_###__
'''
:
'5',
'''
__###_
_##___
#_____
#####_
#___##
##__#_
#___##
_###__
'''
:
'6',
'''
######
____#_
___##_
___#__
__##__
__#___
_##___
_#____
'''
:
'7',
'''
_####_
##__##
#___#_
_###__
##__##
##__##
#___#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
_###__
##__##
#___##
##__#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
##__##
_##_##
____##
___#__
_###__
'''
:
'9',
'''
_####_
##__##
#___#_
##__##
_###_#
____##
___#__
_###__
'''
:
'9',
} | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-1
@author: zhongfeng
'''
from j360pageparser import J360buyAllSortParser,J360buySort3PageParser,J360buySort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:J360buyAllSortParser,3:J360buySort3PageParser,4:J360buySort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://www.360buy.com/products/737-964-795.html',name='360buy',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.360buy.com/products/652-653-659-0-0-0-0-0-0-0-1-1-2.html',
name='digital',isRecursed = False,catagoryLevel = 4)
j360buyRoot = ObuyUrlSummary(url = r'http://www.360buy.com/allSort.aspx',name='360buy',
isRecursed = True,catagoryLevel = 0)
pcare = ObuyUrlSummary(url = r'http://www.360buy.com/products/652-653-000.html',
name='手机',isRecursed = False,catagoryLevel = 2)
pdigital = ObuyUrlSummary(url = r'http://www.360buy.com/digital.html',name='digital',catagoryLevel = 1)
pelectronic = ObuyUrlSummary(url = r'http://www.360buy.com/electronic.html',name='electronic',catagoryLevel = 1)
pcomputer = ObuyUrlSummary(url = r'http://www.360buy.com/computer.html',name='computer',catagoryLevel = 1)
includes = [pelectronic,pdigital,pcomputer]
spider = ObuySpider(rootUrlSummary = j360buyRoot,parserDict = parserDict,include = includes,exclude = None,threadNum = 5)
spider.spide()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
import Image
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaAlgorithm(object):
'''captcha algorithm'''
def LevenshteinDistance(self, m, n):
c = [[i] for i in range(0, len(m) + 1)]
c[0] = [j for j in range(0, len(n) + 1)]
for i in range(0, len(m)):
for j in range(0, len(n)):
c[i + 1].append(
min(
c[i][j + 1] + 1,
c[i + 1][j] + 1,
c[i][j] + (0 if m[i] == n[j] else 1)
)
)
return c[-1][-1]
class CaptchaImageAlgorithm(object):
'''captcha image algorithm'''
@staticmethod
def GetPixelsXEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for x in xrange(xsize):
weight = sum(1 if pixels[x, y] == 0 else 0 for y in xrange(ysize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(x)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def GetPixelsYEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for y in xrange(ysize):
weight = sum(1 if pixels[x, y] == 0 else 0 for x in xrange(xsize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(y)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def StripYEdge(im):
yedges = CaptchaImageAlgorithm.GetPixelsYEdges(im)
y1, y2 = yedges[0][0], yedges[-1][1]
return im.crop((0, y1, im.size[0], y2))
@staticmethod
def GetBinaryMap(im):
xsize, ysize = im.size
pixels = im.load()
return '\n'.join(''.join('#' if pixels[x, y] == 0 else '_' for x in xrange(xsize)) for y in xrange(ysize))
@staticmethod
def getBitMapIn(im):
xsize, ysize = im.size
pixels = im.load()
return tuple( 0 if pixels[x, y] == 0 else 255 for x in xrange(xsize) for y in xrange(ysize))
class CaptchaProfile(object):
def fiter(self, im):
raise NotImplemented
def split(self, im):
raise NotImplemented
def match(self, im):
raise NotImplemented
def feature_to_data(feature):
feature = re.sub(r'[\t\s]', '', feature)
feature = re.sub(r'[\r\n]', '', feature)
return tuple(0 if x == '#' else 255 for x in feature)
def captcha(filename, profile):
#s = time.time()
im = Image.open(filename)
#s2 = time.time()
#print 'open',s2-s
im = profile.filter(im)
#s3 = time.time()
#print 'filter',s3 - s2
im_list = profile.split(im)
#s4 = time.time()
#print 'split',s4 - s3
result = ''.join(profile.match(im) for im in im_list)
#print 'match',time.time() - s4
return result
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2006-02-11
抓取核心类,用于抓取页面;
可以支持登陆抓取等;
@author: zhongfeng
'''
from __future__ import with_statement
import urllib
import urllib2
import socket
import gzip
import zlib
import cookielib
from copy import deepcopy
from threadpool import ThreadPool,makeRequests
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from enum import Enum
#设置超时
timeout = 15
socket.setdefaulttimeout(timeout)
#默认错误
UnKnownErrCode=700
#http headers
commonHeaders = {"Accept":"*/*", "User-Agent": "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; GTB6.5)"}
CrawlerType = Enum('GET_URL', 'GET_MESSAGE', 'POST_MESSAGE')
proxiesDic = {'http':'61.152.108.19:8080'}
class UrlSummary(object):
''' URLSummary 用于表示 Url信息抓取摘要信息,包括http请求的headers设定,采用get or post方式等.'''
def __init__(self, url, data=None, headers=None,
crawlerType=CrawlerType.GET_URL):
assert url != None #url不能为None
self.url = url
self.headers = deepcopy(commonHeaders) if headers is None else headers
self.crawlerType = crawlerType
self.data = data
def __str__(self):
return str(vars(self))
__repr__ = __str__
class CrawlResult(object):
''' 用于保存crawl page结果,包括内容,返回的状态码等 '''
def __init__(self, url=None, code=UnKnownErrCode, content='', headers=None):
self.url = url
self.code = code
self.content = content
self.headers = headers
def __str__(self):
return str(vars(self))
__repr__ = __str__
# deflate support
def deflate(data): # zlib only provides the zlib compress format, not the deflate format;
try: # so on top of all there's this workaround:
return zlib.decompress(data, - zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
class ContentEncodingProcessor(urllib2.BaseHandler):
"""A handler to add gzip capabilities to urllib2 requests """
# add headers to requests
def http_request(self, req):
req.add_header("Accept-Encoding", "gzip,deflate")
return req
# decode
def http_response(self, req, resp):
old_resp = resp
if resp.headers.get("content-encoding") == "gzip":
gz = gzip.GzipFile(fileobj=StringIO(resp.read()),
mode="r")
resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
if resp.headers.get("content-encoding") == "deflate":
gz = StringIO(deflate(resp.read()))
resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
return resp
class HTTPRefererProcessor(urllib2.BaseHandler):
"""A handler to add Referer capabilities to urllib2 requests """
def __init__(self):
self.referer = None
def http_request(self, req):
if not req.has_header("Referer"):
if self.referer is None:
self.referer = req.get_host()
req.add_unredirected_header("Referer", self.referer)
return req
def http_response(self, req, resp):
self.referer = resp.geturl()
return resp
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
'''用于处理 301 /302 重定向,可以记录到发生了重定向行为的code'''
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
#result.code = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
#result.code = code
return result
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
''' 用于处理301,302以外的httperror '''
def http_error_default(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
result.code = code
return result
def __buildCookieProcessor():
''' 处理cookies用于登陆使用 '''
cj = cookielib.CookieJar()
return urllib2.HTTPCookieProcessor(cj)
def getHandlers(debug = False,cookies = True,proxy = None,extraHandlers = None):
smartRediHandler = urllib2.HTTPRedirectHandler() #重定向处理
defaultErrorHandler = DefaultErrorHandler() #默认错误处理
contentEncodingProc = ContentEncodingProcessor() #gzip,deflate解码
httpRefererProc = HTTPRefererProcessor() #防盗链破解,request添加referer header
handlers = [smartRediHandler, defaultErrorHandler,
contentEncodingProc, httpRefererProc]
if proxy != None:
handlers.append(urllib2.ProxyHandler(proxy)) #设置代理
if debug:
handlers.append(urllib2.HTTPHandler(debuglevel=debug)) #urllib2的调试功能
if cookies:
handlers.append(__buildCookieProcessor()) #Cookies支持,用于登陆
if extraHandlers != None:
handlers.extend(list(extraHandlers))
return handlers
def createOpener(handlers = None):
if handlers is None:
handlers = getHandlers()
#设定handlers
opener = urllib2.build_opener(*handlers)
#urllib2.install_opener(opener)
return opener
class CrawlerHttp(object):
''' 提供抓取数据服务的facade接口类 '''
def __init__(self, urlSummary, opener=None):
self.urlSummary = urlSummary
if opener is None:
self.opener = createOpener()
else:
self.opener = opener
def __createRequest(self):
request = None
url = self.urlSummary.url
data = None
if self.urlSummary.data != None:
data = urllib.urlencode(self.urlSummary.data)
if self.urlSummary.crawlerType == CrawlerType.POST_MESSAGE:
request = urllib2.Request(url, data)
elif self.urlSummary.crawlerType == CrawlerType.GET_MESSAGE :
fullUrl = ''.join([url, '?', data])
request = urllib2.Request(fullUrl)
else:
request = urllib2.Request(url)
headers = self.urlSummary.headers
if headers:
for k, v in headers.items():
request.add_header(k, v)
return request
def __getResponseStreamData(self,resp):
dataArr = []
try:
if resp != None:
while True:
data = resp.read(102400)#onetimesize 100k
if not data:
break
dataArr.append(data)
except IOError, e:
raise e
return ''.join(dataArr)
def fetch(self, isGetData = True, islogin = False, retries=0):
'''Fetch data and metadata from a URL, file, stream, or string'''
result = CrawlResult()
resp = None
try:
req = self.__createRequest()
resp = self.opener.open(req)
if isGetData: #如果为false,则不读取,仅获得response的headers等信息
result.content = self.__getResponseStreamData(resp)
except IOError, e:
print 'Couldn\'t fulfill the request.Error code:%s, Reason: %s,URL: %s' % \
(getattr(e,'code',UnKnownErrCode),getattr(e,'reason','Unknown Error'), req.get_full_url())
finally:
if resp != None:
result.headers = getattr(resp, 'headers', None)
result.url = getattr(resp, 'url', '')
result.code = getattr(resp, 'code', UnKnownErrCode)
if islogin:
result.loginResponse = resp #登陆操作时,response不能关闭,待后续操作完成后关闭
else:
resp.close()
if result.code >= 400 and retries != 0:
print 'sleep 3 seconds.try again'
self.fetch(isGetData,islogin,retries - 1)
return result
def crawle(urlSum, debug = False,proxy = None):
handlers = getHandlers(debug = debug,proxy = proxy)
opener = createOpener(handlers)
return crawleDepOpener(urlSum,opener)
def crawleDepOpener(urlSum,opener,reservelogin = False):
if not isinstance(urlSum, UrlSummary):
urlSum = UrlSummary(urlSum)# May Be Url String
crawler = CrawlerHttp(urlSum,opener)
crawlResult = crawler.fetch(islogin = reservelogin)
return crawlResult
def login(urlSum,debug = False,proxy = None):
handlers = getHandlers(debug = debug,proxy = proxy,cookies = True)
opener = createOpener(handlers)
loginResult = crawleDepOpener(urlSum,opener,reservelogin = True)
return (opener,loginResult)
def logout(loginResult):
return loginResult.loginResponse.close()
def crawleOnLogin(loginUrlSum,desUrlSum,debug = False,proxy = None):
opener,loginResult = login(loginUrlSum,debug,proxy)
try:
if loginResult.code == 200:
result = crawleDepOpener(desUrlSum,opener)
finally:
logout(loginResult)
pass
return result
class MutiDownloader(object):
''' multi-thread downloading tool '''
def __init__(self, threadNum = 1):
self.threadNum = threadNum
self.dataAll = [ t for t in range(threadNum)]
def _getResourceFileSize(self,urlSummary):
crawler = CrawlerHttp(urlSummary)
result = crawler.fetch(isGetData = False)
contentLen = None
if result.code == 200:
contentLen = result.headers.get('Content-Length')
return int(contentLen) if contentLen else -1
@staticmethod
def splitBlocks(totalsize, blockNum):
blocksize = totalsize/blockNum
ranges = []
for i in range(0, blockNum - 1):
ranges.append((i*blocksize, i*blocksize +blocksize - 1))
ranges.append(( blocksize*(blockNum - 1), totalsize -1 ))
return ranges
@staticmethod
def downloadPart(urlSum,partNum):
return crawle(urlSum,debug = True)
def save_result(self,request, result):
partNum = request.args[1]
self.dataAll[partNum] = result.content
# this will be called when an exception occurs within a thread
@staticmethod
def handle_exception(request, exc_info):
if not isinstance(exc_info, tuple):
# Something is seriously wrong...
print request
print exc_info
raise SystemExit
print "**** Exception occured in request #%s: %s" % \
(request.requestID, exc_info)
def download(self,urlSummary):
totalSize = self._getResourceFileSize(urlSummary)
ranges = MutiDownloader.splitBlocks(totalSize,self.threadNum)
urlSums = [deepcopy(urlSummary).headers.__setitem__('Range','bytes={}-{}'.format(*ranges[i]))
for i in range(self.threadNum)]
urlRequests = [([k,v],{})for k,v in enumerate(urlSums)]
requests = makeRequests(MutiDownloader.downloadPart, urlRequests,
self.save_result, MutiDownloader.handle_exception)
pool = ThreadPool(self.threadNum)
for request in requests:
pool.putRequest(request)
pool.wait()
#return ''.join(self.dataAll)
def __detectChardet(crawlResult):
import chardet
if crawlResult.code == 200:
print 'Page %s :Content code is %s' % (crawlResult.url, chardet.detect(crawlResult.content))
if __name__ == '__main__':
#===========================================================================
# bookKey = {'url':'search-alias=stripbooks', 'field-keywords':'Java(TM) and JMX: Building Manageable Systems'}
# firstSearchUrl = UrlSummary(url='http://www.amazon.com/s/ref=nb_sb_noss', data=bookKey, crawlerType=CrawlerType.GET_MESSAGE)
# bookSearchPageResult = crawle(firstSearchUrl)
# print bookSearchPageResult.content
# regx = r'(http://www.amazon.com/[-a-zA-Z]*/[a-z]*/[0-9]*)/ref=sr_1_1'
#
# f = file(r'c:/ff.html', 'w')
# f.write(bookSearchPageResult.content)
# f.close()
# amazonUrl = 'http://www.amazon.com/Head-First-Servlets-JSP-Certified/dp/0596516681'
# import chardet
# DEBUG = 1
#===========================================================================
urlSummary = UrlSummary(url="http://www.coo8.com/allcatalog/")
result = crawle(urlSummary)
print result.content
with open(r'c:allcatalog.html', 'w') as outputFile:
outputFile.write(result.content)
#downloader = MutiDownloader()
#downloader.download(urlSummary)
#===========================================================================
# newSmthLoginData = {'id':'dao123mao', 'passwd':'902910','x':'38','y':'1'}
# newsmthUrlSum = UrlSummary(url='http://www.newsmth.net/bbslogin2.php', data=newSmthLoginData, crawlerType=CrawlerType.POST_MESSAGE)
# #opener = login(newsmthUrlSum,debug = True)
# desUrlSum = UrlSummary('http://www.newsmth.net/bbsmailbox.php?path=.DIR&title=%CA%D5%BC%FE%CF%E4')
# result = crawleOnLogin(newsmthUrlSum,desUrlSum,debug = True)
# print result.code, result.content
# with open(r'c:tt1.html', 'w') as outputFile:
# outputFile.write(result.content)
# import re
# regx = r'<span class="tag"><a href="(/tag/.*\?ref_=tag_dpp_cust_itdp_t)" title="([0-9]*) customers tagged this product'
# p = re.compile(regx)
# for t in p.finditer(result.content):
# print t.group(1),t.group(2)
# import os
# print os.sys.path
# bookKey = {'key':u'Java编程思想','catalog':'01'}
# bookKey['key'] = bookKey['key'].encode('gb2312')
# print bookKey['key']
# firstSearchUrl = UrlSummary(url='http://search.dangdang.com/book/search_pub.php',data=bookKey,crawlerType = CrawlerType.GET_MESSAGE)
# bookSearchPageResult = crawle(firstSearchUrl)
# from extract.pageparser import DangDangSearchPageParser ,BookDetailParser
# t = DangDangSearchPageParser(bookSearchPageResult.content)
# urlSummary = t.parserResult()
#
#===========================================================================
#bookDetailPageResult = crawle(urlSummary)
#import chardet
#print chardet.detect(bookDetailPageResult.content)
#bookDetailParser = BookDetailParser(bookDetailPageResult.content)
#bookDetail = bookDetailParser.parserResult()
#from persistence.dbsaver import insertBookDetail
#print insertBookDetail(bookDetail)
#print chardet.detect(bookDetail.contentAbs)
#print bookDetail.contentAbs.decode('GB2312')
#f = file(r'c:/ff.html','w')
#f.write(bookDetailPageResult.content)
#f.close()
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-02
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
class IcsonAllSortParser(RootCatagoryPageParser):
'''
从http://sz.icson.com/portal.html获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = 'http://sz.icson.com/'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'id':'protal_list'})
for t in allSort.findAll(name='div',attrs={'class':'item_hd'}):#一级分类
name,url = ParserUtils.parserTag_A(t.find(name='a'))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='div',attrs={'class':'item_bd'})
for tt in sort_2(name='dl'):#二级分类
name = tt.dt.getText()
url = ''.join((self.mainHost,name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.findAll(name='a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class IcsonSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-6-10-20-0-{}--'
return '%s%s.%s' % (urlSegs[0].replace('--------',''), pageSeg, urlSegs[1])
def getTotal(self):
nextSeg = self.soup.find(name='a',attrs={'class':'page-next'})
if nextSeg != None:
t = nextSeg.findPreviousSibling(name='a').getText()
return int(t)
else:
return 1
def parserPageInfos(self):
plist = self.soup.find(name='li',attrs={'class':'item_list'})
resultList = []
for prod in plist:
pNameSeg = prod.find(attrs={'class':'wrap_info'})
pName,url = ParserUtils.parserTag_A(pNameSeg.a)
adWords = pNameSeg.find(name='p',attrs={'class':'hot'})
pid = url.rsplit('-',1)[-1].split('.')[0]
t = prod.find(attrs={'class':'price_icson'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
prodDetail = ProductDetails(productId=pid, privPrice = currentPrice,
name=pName, adWords=adWords)
resultList.append(prodDetail)
return resultList
class IcsonSort4PageParser(IcsonSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testIcsonAllSortPage():
fileName = os.path.join(testFilePath,'dangcat.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='Icson')
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
firstPage = IcsonAllSortParser(content, rootUrlSum,include = [pserver])
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[('test')], catagoryLevel=3)
sort3Page = IcsonSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[('test')], catagoryLevel=3)
sort3Page = IcsonSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
def testRegx():
regx = u'共([0-9]*)页'
p = re.compile(regx)
fileName = os.path.join(testFilePath,'4001011.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
s = soup.find(name='span',attrs = {'id':'all_num'}).getText()
content = content.decode('gb18030','ignore')
print p.search(s).group(1)
if __name__ == '__main__':
#testRegx()
#testIcsonAllSortPage()
testSort3Page()
testSort3Details()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from icson.icsonpageparser import IcsonAllSortParser,IcsonSort3PageParser,IcsonSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:IcsonAllSortParser,3:IcsonSort3PageParser,4:IcsonSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4002134&store=eq0',name='dangdang',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.newegg.com.cn/SubCategory/1046-3.htm?pageSize=96',
name='digital',catagoryLevel = 4)
#spider = ObuySpider(rootUrlSummary = sort3,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
#spider.spide()
IcsonRoot = ObuyUrlSummary(url = r'http://category.dangdang.com/',name='dangdang',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
spider = ObuySpider(rootUrlSummary = IcsonRoot,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
spider.spide() | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from icson.icsonpageparser import IcsonAllSortParser,IcsonSort3PageParser,IcsonSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:IcsonAllSortParser,3:IcsonSort3PageParser,4:IcsonSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4002134&store=eq0',name='dangdang',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.newegg.com.cn/SubCategory/1046-3.htm?pageSize=96',
name='digital',catagoryLevel = 4)
#spider = ObuySpider(rootUrlSummary = sort3,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
#spider.spide()
IcsonRoot = ObuyUrlSummary(url = r'http://category.dangdang.com/',name='dangdang',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
spider = ObuySpider(rootUrlSummary = IcsonRoot,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
spider.spide() | Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# setup.py
# Part of 21obuys, a package providing enumerated types for Python.
#
# Copyright © 2007 Ben Finney
# This is free software; you may copy, modify and/or distribute this work
# under the terms of the GNU General Public License, version 2 or later
# or, at your option, the terms of the Python license.
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name = "21obus",
version = '1.0',
scripts = ['src/360buyspider.py'],
packages = find_packages('src'), # include all packages under src
package_dir = {'':'src'}, # tell distutils packages are under src
#py_modules = [main_module_name],
# setuptools metadata
zip_safe = True,
#test_suite = "test.test_enum.suite",
#package_data = {
# '': ["LICENSE.*"],
#},
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires = ['chardet','enum','BeautifulSoup','threadpool'],
# PyPI metadata
# metadata for upload to PyPI
author = "zhongfeng",
author_email = "fzhong@travelsky.com",
description = "21obuys Package",
license = "PSF",
keywords = "360buy newegg crawlers",
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU General Public License (GPL)",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
],
)
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import time
import random
from BeautifulSoup import BeautifulSoup
from crawlerhttp import UrlSummary, crawle
urlsProxy = ["http://proxy.ipcn.org/proxylist.html"]
#urlsProxy = ["http://www.proxycn.com/html_proxy/http-1.html"]
desSite = 'http://www.360buy.com'
class ChoiceProxy(object):
proxyList = []
def __init__(self):
pass
def __new__(cls):
if '_inst' not in vars(cls):
cls.__initProxyList()
cls._inst = super(ChoiceProxy, cls).__new__(cls)
return cls._inst
@classmethod
def __initProxyList(cls):
ipcnProxyPageResult = crawle(urlsProxy[0])
if ipcnProxyPageResult.code == 200:
#soup = BeautifulSoup(ipcnProxyPageResult.content)
#proxyContents = soup.find('pre').contents[0]
p = re.compile(r'(\d+\.\d+\.\d+\.\d+:[0-9]+)')
for proxyIp in p.findall(ipcnProxyPageResult.content):
if(cls.__testProxy(proxyIp)):
print proxyIp
cls.proxyList.append(proxyIp)
@classmethod
def __testProxy(cls, proxy):
proxyDicts = {'http':proxy}
start = time.time()
result = crawle(desSite, proxy = proxyDicts)
end = time.time()
estime = end - start
print proxy, estime
if result.code != 200 or estime > 10:
return False
return True
@staticmethod
def choice():
if len(ChoiceProxy.proxyList) == 0:
return None
return random.choice(ChoiceProxy.proxyList)
def choiceHttpProxy():
return {'http':ChoiceProxy.choice()}
if __name__ == '__main__':
for i in range(10):
print ChoiceProxy().choice()
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
class SuningAllSortParser(RootCatagoryPageParser):
'''
从http://www.suning.com/获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.suning.com'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(SuningAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs = {'id':'SNmenuNav'})
for t in allSort.findAll(name = 'dl'):#一级
t = t.dt
name,url = ParserUtils.parserTag_A(t.a)
url = ''.join((self.mainHost,url))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='dd').find(name='ul',attrs={'class':'sideleft'})
for tt in sort_2(name='li'):#二级分类
name = tt.b.getText().strip()
url = '/'.join((self.mainHost,name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.div.findAll(name = 'a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
url = ''.join((self.mainHost,url))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class SuningSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(SuningSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-0-0-0-0-1-1-{}'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def getTotal(self):
pageSeg = self.soup.find(name='div',attrs={'id':'toolbar'}).find(attrs={'class':'thispage'}).getText()
totalPage = int(pageSeg.split('/')[-1])
return totalPage
def parserPageInfos(self):
plist = self.soup.find(name='div', attrs={'id':'plist'}).find(name='ul')
resultList = []
for li in plist(name='li'):
pName,url = ParserUtils.parserTag_A(li.find(name='div', attrs={'class':'p-name'}).a)
pid = url.rsplit('/',1)[-1].split('.')[0]
url = ''.join((r'http://www.gome.com.cn',url))
price = ParserUtils.getPrice(li.find(name='div', attrs={'class':'p-price'}).getText())
prodDetail = ProductDetails(productId=pid, privPrice = price,name=pName, adWords='')
resultList.append(prodDetail)
return resultList
class SuningSort4PageParser(SuningSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(SuningSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath,'SuningAllsort.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.Suning.com.cn/allSort.html', name='gome')
firstPage = SuningAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
pass
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'10000000-10000012-10000070.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.Suning.com.cn/products/10000000-10000012-10000070.html',
parentPath=[('test')], catagoryLevel=3)
sort3Page = SuningSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'10000000-10000012-10000070.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.gome.com.cn/products/10000000-10000012-10000070.html',
parentPath=[('test')], catagoryLevel=3)
sort3Page = SuningSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
import urllib
url = 'http://localhost/webapp/wcs/stores/servlet/odeSearch?storeId=10052&catalogId=10051&categoryId=20003&langId=-7&ip_state=c0%3ds%253A9%253Bcity_id%253B%253Aeq%253B1001B.s%253A9%253Bcity_id%253B%253Aeq%253B5006F.s%253A9%253Bcity_id%253B%253Aeq%253B5006Z.s%253A9%253Bcity_id%253B%253Ass%253B0000A%26c1%3ds%253A9%253Biphrase%2bbundle%2btaxonomy%2bid%2bfrom%2broot%253B%253Ass%253B%253A20003%26q%3d20%26a0%3diphrase%2bbundle%2btaxonomy%252F%252Fv%253A0%26i%3dsitemap%2bid%26qt%3d1313391335%26qid%3dq8GzGmE5P2Ss3%26vid%3dvSXajhCLXuWWu%26ioe%3dUTF-8%26s2%3dsitemap%2bid%252F%252F1%26qtid%3dn8GzGmE5P2Ss3%26s1%3dpublishTime%252F%252F0%26rid%3dr8OlldtbsEwdf%26s0%3drank%252F%252F0%26t%3d0%26m0%3diphrase%2bbundle%2bid%26mcmode%3dtest&suggestionWordList=&isCatalogSearch=1&isList=0&sortType=0¤tPage=1'
print urllib.unquote(url)
#testAllSortPage()
#testSort3Page()
#testSort3Details()
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-28
@author: zhongfeng
'''
from crawlerhttp import crawle
from logfacade import LoggerFactory
from threadpool import ThreadPool, WorkRequest
from urlparse import urlparse
import os
import time
import threading
from threading import stack_size
stack_size(32768*32)
logger = LoggerFactory.getLogger()
def __procSubUrlRequests(parser,result,spider):
'''SubUrl 入队'''
parserResult = parser.parserSubUrlSums()
if parserResult is not None:
for subUrlSum in parserResult:
print 'SubUrlSum put Q: %s ,level: %s' \
% (subUrlSum.url,subUrlSum.catagoryLevel)
_putSpideRequest(subUrlSum,spider)
def __procPageInfos(parser,urlsum):
'''解析页面的详细信息,例如product信息'''
resultList = parser.parserPageInfos()
if resultList is not None:
siteName = urlparse(urlsum.url).hostname.split('.')[1]
logger = LoggerFactory.getLogger(logName=siteName)
for parserResult in resultList:
logger.info(parserResult.logstr())
def _putSpideRequest(urlsum,spider):
req = WorkRequest(main_spide, [urlsum,spider], None,
callback=proc_result,exc_callback=handle_exception)
spider.pool.putRequest(req)
def __saveErrorPage(url,content):
curModDir = os.path.dirname(os.path.abspath(__file__))
siteName = urlparse(url).hostname.split('.')[1]
curtDate = time.strftime("%Y-%m-%d")
errorFilePath = os.path.join(curModDir,'error_page',siteName,curtDate)
if not os.path.exists(errorFilePath):
os.makedirs(errorFilePath)
curtime = time.strftime("%Y-%m-%d_%H-%M-%S")
fileName = '%s_%s.html' %(siteName,curtime)
fullPath = os.path.join(errorFilePath,fileName)
with open(fullPath,'w') as output:
output.write(content)
def handle_exception(request, exc_info):
if not isinstance(exc_info, tuple):
# Something is seriously wrong...
print request
print exc_info
raise SystemExit
print "**** Exception occured in request #%s: %s" % \
(request.requestID, exc_info)
def proc_result(request, result):
pass
# args = request.args
# urlsum = args[0]
# spider = args[1]
# if result.code == 200:
# print "**** Result from request #%s: %d" % (urlsum.url, result.code)
# ParserClass = spider.parserDict.get(urlsum.catagoryLevel,None)
# if ParserClass is None:
# return
# parser = ParserClass(result.content,urlsum,urlsum.include,
# urlsum.exclude)
# try:
# if urlsum.isRecursed:
# __procSubUrlRequests(parser,result,spider)
# if spider.procDetails:
# __procPageInfos(parser,urlsum)
# except Exception,e:
# logger = LoggerFactory.getLogger()
# logger.error('ParserException.Reason:%s,URL:%s'% (e,urlsum.url))
# __saveErrorPage(urlsum.url,result.content)
# if urlsum.stat == 0:
# urlsum.stat = result.code
# spider.pool.putRequest(request)
#
# elif urlsum.stat == 0:
# urlsum.stat = result.code
# spider.pool.putRequest(request)
# else:
# print "Failed %s:%d" % (urlsum.url, result.code)
def __reinqueue_proc(urlsum,result,spider):
if urlsum.stat == 0:
urlsum.stat = result.code
logger.info("urlsum reinqueue:%s" % urlsum.url)
_putSpideRequest(urlsum,spider)
else:
logger.error( "Failed %s:%d" % (urlsum.url, result.code))
def proc_normal_result(reqArgs, result):
urlsum = reqArgs[0]
spider = reqArgs[1]
if result.content == '':
__reinqueue_proc(urlsum, result, spider)
return
if result.code == 200:
if result.content == '':
__reinqueue_proc(urlsum, result, spider)
return
print "**** Result from request #%s: %d" % (urlsum.url, result.code)
ParserClass = spider.parserDict.get(urlsum.catagoryLevel,None)
if ParserClass is None:
return
parser = ParserClass(result.content,urlsum,urlsum.include,
urlsum.exclude)
try:
if urlsum.isRecursed:
__procSubUrlRequests(parser,result,spider)
if spider.procDetails:
__procPageInfos(parser,urlsum)
except Exception,e:
logger.error('ParserException.Reason:%s,URL:%s'% (e.message,urlsum.url))
__saveErrorPage(urlsum.url,result.content)
__reinqueue_proc(urlsum,result,spider)
else:
__reinqueue_proc(urlsum,result,spider)
def main_spide(*req):
print "(active worker threads: %i)" % (threading.activeCount()-1, )
urlsum = req[0]
result = crawle(urlsum)
proc_normal_result(req, result)
return result
class ObuySpider(object):
def __init__(self,rootUrlSummary = None,parserDict =None,threadNum = 5,
procDetails = True,include = None,exclude = None):
self.rootUrlSummary = rootUrlSummary
self.parserDict = parserDict
self.procDetails = procDetails #是否解析页面的详细信息
self.rootUrlSummary.include = include
self.rootUrlSummary.exclude = exclude
self.pool = ThreadPool(threadNum)
def spide(self):
_putSpideRequest(self.rootUrlSummary,self)
self.pool.wait()
if __name__ == '__main__':
logger = LoggerFactory.getLogger()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
import Image
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaAlgorithm(object):
'''captcha algorithm'''
def LevenshteinDistance(self, m, n):
c = [[i] for i in range(0, len(m) + 1)]
c[0] = [j for j in range(0, len(n) + 1)]
for i in range(0, len(m)):
for j in range(0, len(n)):
c[i + 1].append(
min(
c[i][j + 1] + 1,
c[i + 1][j] + 1,
c[i][j] + (0 if m[i] == n[j] else 1)
)
)
return c[-1][-1]
class CaptchaImageAlgorithm(object):
'''captcha image algorithm'''
@staticmethod
def GetPixelsXEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for x in xrange(xsize):
weight = sum(1 if pixels[x, y] == 0 else 0 for y in xrange(ysize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(x)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def GetPixelsYEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for y in xrange(ysize):
weight = sum(1 if pixels[x, y] == 0 else 0 for x in xrange(xsize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(y)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def StripYEdge(im):
yedges = CaptchaImageAlgorithm.GetPixelsYEdges(im)
y1, y2 = yedges[0][0], yedges[-1][1]
return im.crop((0, y1, im.size[0], y2))
@staticmethod
def GetBinaryMap(im):
xsize, ysize = im.size
pixels = im.load()
return '\n'.join(''.join('#' if pixels[x, y] == 0 else '_' for x in xrange(xsize)) for y in xrange(ysize))
@staticmethod
def getBitMapIn(im):
xsize, ysize = im.size
pixels = im.load()
return tuple( 0 if pixels[x, y] == 0 else 255 for x in xrange(xsize) for y in xrange(ysize))
class CaptchaProfile(object):
def fiter(self, im):
raise NotImplemented
def split(self, im):
raise NotImplemented
def match(self, im):
raise NotImplemented
def feature_to_data(feature):
feature = re.sub(r'[\t\s]', '', feature)
feature = re.sub(r'[\r\n]', '', feature)
return tuple(0 if x == '#' else 255 for x in feature)
def captcha(filename, profile):
#s = time.time()
im = Image.open(filename)
#s2 = time.time()
#print 'open',s2-s
im = profile.filter(im)
#s3 = time.time()
#print 'filter',s3 - s2
im_list = profile.split(im)
#s4 = time.time()
#print 'split',s4 - s3
result = ''.join(profile.match(im) for im in im_list)
#print 'match',time.time() - s4
return result
| Python |
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c3"
DEFAULT_URL = "http://cheeseshop.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
}
import sys, os
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
try:
import setuptools
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
except ImportError:
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
import pkg_resources
try:
pkg_resources.require("setuptools>="+version)
except pkg_resources.VersionConflict, e:
# XXX could we install in a subprocess here?
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first.\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
# tell the user to uninstall obsolete version
use_setuptools(version)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
class GomeAllSortParser(RootCatagoryPageParser):
'''
从http://www.gome.com.cn/allSort.html获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.gome.com.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs = {'id':'allsort'})
for t in allSort.findAll(name = 'div',attrs = {'class':'m'}):#一级
name,url = ParserUtils.parserTag_A(t.find(attrs = {'class':'mt'}).h2.a)
url = ''.join((self.mainHost,url))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs = {'class':'mc'})
for tt in sort_2(name='dl'):#二级分类
if tt.dt.a is not None:
name, url = ParserUtils.parserTag_A(tt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
sort_2_urlsum.catagoryLevel = 3
finalUrlList.append(sort_2_urlsum)
continue
name = tt.dt.getText().strip()
url = '/'.join((self.mainHost,name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.dd.findAll(name = 'em'):#三级分类
name, url = ParserUtils.parserTag_A(ttt.a)
url = ''.join((self.mainHost,url))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class GomeSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-0-0-0-0-1-1-{}'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def getTotal(self):
pageSeg = self.soup.find(name='div',attrs={'id':'toolbar'}).find(attrs={'class':'thispage'}).getText()
totalPage = int(pageSeg.split('/')[-1])
return totalPage
def parserPageInfos(self):
plist = self.soup.find(name='div', attrs={'id':'plist'}).find(name='ul')
resultList = []
for li in plist(name='li'):
pName,url = ParserUtils.parserTag_A(li.find(name='div', attrs={'class':'p-name'}).a)
pid = url.rsplit('/',1)[-1].split('.')[0]
url = ''.join((r'http://www.gome.com.cn',url))
price = ParserUtils.getPrice(li.find(name='div', attrs={'class':'p-price'}).getText())
prodDetail = ProductDetails(productId=pid, privPrice = price,name=pName, adWords='')
resultList.append(prodDetail)
return resultList
class GomeSort4PageParser(GomeSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath,'gomeAllsort.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.gome.com.cn/allSort.html', name='gome')
firstPage = GomeAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
pass
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'10000000-10000012-10000070.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.gome.com.cn/products/10000000-10000012-10000070.html',
parentPath=[('test')], catagoryLevel=3)
sort3Page = GomeSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'10000000-10000012-10000070.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.gome.com.cn/products/10000000-10000012-10000070.html',
parentPath=[('test')], catagoryLevel=3)
sort3Page = GomeSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
testAllSortPage()
testSort3Page()
testSort3Details()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from gomepageparser import GomeAllSortParser,GomeSort3PageParser,GomeSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:GomeAllSortParser,3:GomeSort3PageParser,4:GomeSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://www.gome.com.cn/products/10000000-10000012-10000070.html',name='gome',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/products/10000000-10000012-10000070-0-0-0-0-0-0-0-1-1-3.html',
name='手机',catagoryLevel = 4)
newEggRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/allSort.html',name='gome',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://www.gome.com.cn/tv.html',
name='电视',catagoryLevel = 1)
spider = ObuySpider(rootUrlSummary = newEggRoot,parserDict = parserDict,include = None,exclude = None,threadNum = 5)
spider.spide() | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from gomepageparser import GomeAllSortParser,GomeSort3PageParser,GomeSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:GomeAllSortParser,3:GomeSort3PageParser,4:GomeSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://www.gome.com.cn/products/10000000-10000012-10000070.html',name='gome',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/products/10000000-10000012-10000070-0-0-0-0-0-0-0-1-1-3.html',
name='手机',catagoryLevel = 4)
newEggRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/allSort.html',name='gome',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://www.gome.com.cn/tv.html',
name='电视',catagoryLevel = 1)
spider = ObuySpider(rootUrlSummary = newEggRoot,parserDict = parserDict,include = None,exclude = None,threadNum = 5)
spider.spide() | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from pageparser import ObuyUrlSummary
from spider import ObuySpider
from amazon.amazonpageparser import AmazonSort3JsonParser, AmazonSort1Parser,\
AmazonSort2PageParser
from amazon.amazonpageparser import AmazonAllSortParser
if __name__ == '__main__':
parserDict = {0:AmazonAllSortParser,1:AmazonSort1Parser,2:AmazonSort2PageParser,3:AmazonSort3JsonParser}
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort1 = ObuyUrlSummary(url = ur'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dappliances&field-keywords=&x=17&y=11',name='amazon',
parentPath=[rootObuyUrlSummary],isRecursed = True,catagoryLevel = 1)
sort2 = ObuyUrlSummary(url = ur'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A80207071%2Cn%3A!80208071%2Cn%3A81948071&bbn=80208071&ie=UTF8&qid=1313135682&rnid=80208071',name='amazon',
parentPath=[rootObuyUrlSummary,sort1],isRecursed = True,catagoryLevel = 2)
spider = ObuySpider(rootUrlSummary = rootObuyUrlSummary,parserDict = parserDict,include =None,exclude = None,threadNum = 5)
spider.spide()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from pageparser import ObuyUrlSummary
from spider import ObuySpider
from amazon.amazonpageparser import AmazonSort3JsonParser, AmazonSort1Parser,\
AmazonSort2PageParser
from amazon.amazonpageparser import AmazonAllSortParser
if __name__ == '__main__':
parserDict = {0:AmazonAllSortParser,1:AmazonSort1Parser,2:AmazonSort2PageParser,3:AmazonSort3JsonParser}
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort1 = ObuyUrlSummary(url = ur'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dappliances&field-keywords=&x=17&y=11',name='amazon',
parentPath=[rootObuyUrlSummary],isRecursed = True,catagoryLevel = 1)
sort2 = ObuyUrlSummary(url = ur'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A80207071%2Cn%3A!80208071%2Cn%3A81948071&bbn=80208071&ie=UTF8&qid=1313135682&rnid=80208071',name='amazon',
parentPath=[rootObuyUrlSummary,sort1],isRecursed = True,catagoryLevel = 2)
spider = ObuySpider(rootUrlSummary = rootObuyUrlSummary,parserDict = parserDict,include =None,exclude = None,threadNum = 5)
spider.spide()
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import re
from copy import deepcopy
class AmazonAllSortParser(RootCatagoryPageParser):
'''
从http://www.amazon.cn/gp/site-directory获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __getBaseSort2UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={"id":"siteDirectory"})
for t in allSort.findAll(name='div', attrs={"class":"popover-grouping"}):#一级分类
name = t.find(name='div', attrs={"class":"popover-category-name"}).h2.getText()
url = ''.join((self.mainHost, name))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSiblings(name='div')
for tt in sort_2:#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
url = ''.join((self.mainHost,url))
if name.startswith(u'所有'):
continue
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort2UrlSums()
return self.filterUrlList(result)
class AmazonSort2Parser(RootCatagoryPageParser):
'''
从http://www.amazon.cn/gp/site-directory获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort2Parser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __isCat(self, catName):
return catName.find(u'分类') >= 0
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort3 = self.soup.findAll(name='div', attrs={"class":"unified_widget blurb"})
for alls3 in allSort3:
if self.__isCat(alls3.h2.getText()):
break
for t in alls3.findAll(name='div',attrs={'class':'title'}):
name, url = ParserUtils.parserTag_A(t.a)
url = ''.join((self.mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
class AmazonSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserPageInfos(self):
resultList = []
for prod in self.soup.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
pName, url = ParserUtils.parserTag_A(prod.find(name='div', attrs={'class':'title'}).a)
pid = pName
currentPrice = ParserUtils.getPrice(prod.find(name='div',attrs={'class':'newPrice'}).span.getText())
bypastSeg = prod.find(name='div',attrs={'class':'newPrice'}).strike
pastPrice = '0.00'
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
prodDetail = ProductDetails(productId=pid, privPrice=currentPrice, pubPrice=pastPrice,
name=pName, adWords='')
resultList.append(prodDetail)
return resultList
def __getNextPageUrl(self):
nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
fullUrl = None
if nextPageSeg != None:
name,url = ParserUtils.parserTag_A(nextPageSeg)
print url
url = url.replace(r'/gp/search','#')
baseUrl = self.rootUrlSummary.url.rsplit('#')[0]
fullUrl = ''.join((baseUrl,url))
return fullUrl
def parserSubUrlSums(self):
result = self.__getNextPageUrl()
if result is None:
return []
else:
urlSum = deepcopy(self.rootUrlSummary)
urlSum.url = result
return [urlSum]
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'amazonSite.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/site-directory/ref=topnav_sad', name='Amazon')
include = [ ObuyUrlSummary(url=r'http://http://www.newegg.com.cn/Category/536.htm',
name='服务器', catagoryLevel=2)]
firstPage = AmazonAllSortParser(content, rootUrlSum, include=None)
for sort_2 in firstPage.parserSubUrlSums():
#for index, urlsum in enumerate(sort_3.parentPath):
#print '\t' * index, str(urlsum.getUrlSumAbstract())
print sort_2.url , sort_2.catagoryLevel
def testSort2Page():
fileName = os.path.join(testFilePath, '888465051.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/%E7%94%B5%E8%84%91%E5%8F%8A%E9%85%8D%E4%BB%B6/b/ref=sd_allcat_pc?ie=UTF8&node=888465051',
parentPath=[('test')], catagoryLevel=2)
sort3Page = AmazonSort2Parser(content, sort_2_urlsum)
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testSort3Page():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(content, sort_3_urlsum)
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testSort3Details():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
def testComment():
from BeautifulSoup import BeautifulSoup, Comment
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
print comment.extract()
def testJson():
import json
fileName = os.path.join(testFilePath, 'watch_json.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
segList = content.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('results-btf'):
print '+++++++++++++++++'
jsonRet = jsonObj['results-btf']['data']['value']
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(jsonRet, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
elif jsonObj.has_key('results-atf-next'):
print '--------------'
jsonRet = jsonObj['results-atf-next']['data']['value']
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(jsonRet)
comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(comment.extract(), sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
#testAllSortPage()
#testSort2Page()
#testSort3Page()
#testSort3Details()
#testComment()
testJson()
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from BeautifulSoup import BeautifulSoup, Comment
from copy import deepcopy
from pageparser import *
import itertools
import json
import os
import re
import urllib
import urlparse
class AmazonAllSortParser(RootCatagoryPageParser):
'''
从http://www.amazon.cn获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __getBaseSort1UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='select',attrs={"id":"searchDropdownBox"})
base_url = r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url={}&field-keywords=&x=20&y=15'
for t in allSort.findAll(name='option'):#一级分类
searchAias = t['value']
name = searchAias.split('=')[-1]
if name == 'aps':
continue
url = base_url.format(urllib.quote(searchAias))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary)
finalUrlList.append(sort_1_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort1UrlSums()
return self.filterUrlList(result)
class AmazonSort1Parser(RootCatagoryPageParser):
'''
从一级分类获取所有的2级分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort1Parser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __isCat(self, catName):
return catName.find(u'类别') >= 0
def __getBaseSort2UrlSums(self):
finalUrlList = []
sort2 = self.soup.find(name='div', attrs={"id":"refinements"})
#refId = 'ref_%s' % urllib.unquote(sort2['data-browseladder']).split(':')[-1]
#allSort2Seg = sort2.find(name='ul',attrs={'id':refId})
for catSeg in sort2(name='h2'):
if self.__isCat(catSeg.getText().strip()):
break
allSort2Seg = catSeg.findNextSibling(name='ul')
for t in allSort2Seg.findAll(name='a'):
name, url = ParserUtils.parserTag_A(t)
url = ''.join((self.mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort2UrlSums()
return self.filterUrlList(result)
class AmazonSort2PageParser(Sort3PageParser):
'''
二级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort2PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parseProduct(self, prod):
titleSeg = prod.find(name='div', attrs={'class':'title'})
if titleSeg is None:
return
pName, url = ParserUtils.parserTag_A(titleSeg.a)
pid = pName
priceSeg = prod.find(name='div', attrs={'class':'newPrice'})
pastPrice = '0.00'
currentPrice = '0.00'
if priceSeg != None:
currentPrice = ParserUtils.getPrice(priceSeg.span.getText())
bypastSeg = priceSeg.strike
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
prodDetail = ProductDetails(productId=pid, privPrice=currentPrice, pubPrice=pastPrice,
name=pName, adWords='')
return prodDetail
def parserPageInfos(self):
resultList = []
soupRoot = self.soup
for prod in soupRoot.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
resultsAtfNextSeg = self.soup.find(attrs = {'id':'results-atf-next'})
if resultsAtfNextSeg != None:
resultsAtfNext = resultsAtfNextSeg.find(text=lambda text:isinstance(text, Comment))
spt = BeautifulSoup(resultsAtfNext,convertEntities = BeautifulSoup.HTML_ENTITIES)
for prod in spt.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
return resultList
def __nextPagePattern(self):
return r'http://www.amazon.cn/mn/search/ajax/{}&tab={}&pageTypeID={}&fromHash=§ion=BTF&fromApp=undefined&fromPage=undefined&version=2'
def __getNextPageUrl(self):
nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
fullUrl = None
if nextPageSeg != None:
name,url = ParserUtils.parserTag_A(nextPageSeg)
t= urlparse.urlparse(url)
qsDict = urlparse.parse_qs(t.query)
pageTypeID = qsDict['rh'][0].split(',')[-1].split(':')[-1]
ref = url.replace(r'/gp/search/','')
tab = self.rootUrlSummary.parentPath[1].name
fullUrl = self.__nextPagePattern().format(ref,tab,pageTypeID)
return fullUrl
def parserSubUrlSums(self):
nextPageUrl = self.__getNextPageUrl()
if nextPageUrl is None:
return []
else:
urlSum = self.buildSort_4(nextPageUrl)
urlSum.catagoryLevel = 3
return [urlSum]
class AmazonSort3JsonParser(Parser):
'''
Sort3Json解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort3JsonParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
segList = self.dataStr.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
self.pageNextSeg = jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
self.resultsBtf = jsonObj['results-btf']['data']['value']
if jsonObj.has_key('results-atf-next'):
self.resultsAtf = jsonObj['results-atf-next']['data']['value']
def parserPageInfos(self):
result = []
retBtf = AmazonSort2PageParser(self.resultsBtf,self.rootUrlSummary).parserPageInfos()
retAtf = AmazonSort2PageParser(self.resultsAtf,self.rootUrlSummary).parserPageInfos()
result.extend(itertools.chain(retBtf,retAtf))
return result
def parserSubUrlSums(self):
return AmazonSort2PageParser(self.pageNextSeg,self.rootUrlSummary).parserSubUrlSums()
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'amazon.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='Amazon')
include = [ ObuyUrlSummary(url=r'http://http://www.newegg.com.cn/Category/536.htm',
name='服务器', catagoryLevel=2)]
firstPage = AmazonAllSortParser(content, rootUrlSum, include=None)
for sort_1 in firstPage.parserSubUrlSums():
#for index, urlsum in enumerate(sort_3.parentPath):
#print '\t' * index, str(urlsum.getUrlSumAbstract())
print sort_1.url , sort_1.catagoryLevel
def testSort1Page():
fileName = os.path.join(testFilePath, 'toys_games.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[('test')], catagoryLevel=1)
sort2Page = AmazonSort1Parser(content, sort_1_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
def testSort2Page():
fileName = os.path.join(testFilePath, 'amazon_2011-08-12_15-58-49.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort2Page = AmazonSort2PageParser(content, sort_2_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
for product in sort2Page.parserPageInfos():
print product.logstr()
def testSort3Details():
fileName = os.path.join(testFilePath, 'toys_games_1.json')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort3Page = AmazonSort3JsonParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testComment():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
print comment.extract()
def testJson():
import json
fileName = os.path.join(testFilePath, 'toys_games_1.json')
with open(fileName, 'r') as fInput:
content = fInput.read()
segList = content.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
print jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
print '+++++++++++++++++'
jsonRet = jsonObj['results-btf']['data']['value']
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort2PageParser(jsonRet, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
elif jsonObj.has_key('results-atf-next'):
print '--------------'
jsonRet = jsonObj['results-atf-next']['data']['value']
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(jsonRet)
comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort2PageParser(comment.extract(), sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
# testAllSortPage()
#testSort1Page()
testSort2Page()
#testSort3Details()
#testComment()
#testJson()
#/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051
#/gp/search/ref=sr_pg_2?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=2&bbn=647071051&ie=UTF8&qid=131311239
#ref=sr_pg_2?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=2&bbn=647071051&ie=UTF8&qid=1313112393&tab=toys-and-games&pageTypeID=1982054051&fromHash=&fromRH=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051§ion=BTF&fromApp=undefined&fromPage=undefined&version=2
#ref=sr_pg_3?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=3&bbn=647071051&ie=UTF8&qid=1313112553&tab=toys-and-games&pageTypeID=1982054051&fromHash=%2Fref%3Dsr_pg_2%3Frh%3Dn%253A647070051%252Cn%253A%2521647071051%252Cn%253A1982054051%26page%3D2%26bbn%3D647071051%26ie%3DUTF8%26qid%3D1313112393§ion=BTF&fromApp=gp%2Fsearch&fromPage=results&version=2
#ref=sr_pg_5?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=5&bbn=647071051&ie=UTF8&qid=1313112793&tab=toys-and-games&pageTypeID=1982054051&fromHash=%2Fref%3Dsr_pg_4%3Frh%3Dn%253A647070051%252Cn%253A%2521647071051%252Cn%253A1982054051%26page%3D4%26bbn%3D647071051%26ie%3DUTF8%26qid%3D1313112677§ion=BTF&fromApp=gp%2Fsearch&fromPage=results&version=2 | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from dangpageparser import DangDangAllSortParser,DangDangSort3PageParser,DangDangSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:DangDangAllSortParser,3:DangDangSort3PageParser,4:DangDangSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4002134&store=eq0',name='dangdang',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.newegg.com.cn/SubCategory/1046-3.htm?pageSize=96',
name='digital',catagoryLevel = 4)
#spider = ObuySpider(rootUrlSummary = sort3,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
#spider.spide()
dangdangRoot = ObuyUrlSummary(url = r'http://category.dangdang.com/',name='dangdang',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
spider = ObuySpider(rootUrlSummary = dangdangRoot,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
spider.spide() | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from dangpageparser import DangDangAllSortParser,DangDangSort3PageParser,DangDangSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:DangDangAllSortParser,3:DangDangSort3PageParser,4:DangDangSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4002134&store=eq0',name='dangdang',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.newegg.com.cn/SubCategory/1046-3.htm?pageSize=96',
name='digital',catagoryLevel = 4)
#spider = ObuySpider(rootUrlSummary = sort3,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
#spider.spide()
dangdangRoot = ObuyUrlSummary(url = r'http://category.dangdang.com/',name='dangdang',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
spider = ObuySpider(rootUrlSummary = dangdangRoot,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
spider.spide() | Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-02
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
class DangDangAllSortParser(RootCatagoryPageParser):
'''
从http://category.dangdang.com/?ref=www-0-C 获取所有的分类信息,
组合成ObuyUrlSummary,不包含图书
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'class':'categories_mainBody'})
for t in allSort.findAll(name='div',attrs={'id':re.compile(r'[a-z]*')}):#一级分类
name = t['id']
if name == 'book': #不解析图书
continue
url = ''.join((r'http://category.dangdang.com/',name))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs={'class':''.join([name,'_details'])})
for tt in sort_2(name='li'):#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.a.findNextSiblings(name='a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
url = '&'.join((url,'store=eq0'))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class DangDangSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
pageSeg = 'p={}'
return '%s&%s' % (self.rootUrlSummary.url,pageSeg)
def getTotal(self):
regx = u'共([0-9]*)页'
p = re.compile(regx)
s = self.soup.find(name='span',attrs = {'id':'all_num'})
if s is None: #dangdang_2011-08-04_10-00-04.html页面格式解析
st = self.soup.find(name='input',attrs = {'id':'jumpto'})
if st != None:
s = st.findNextSibling(name='span')
if s is None:
return 1
pageNum = s.getText()
return int(p.search(pageNum).group(1))
def parserPageInfos(self):
plist = self.soup.find(name='ul',attrs={'class':'mode_goods clearfix'})
resultList = []
if plist is None:
prodSeg = self.soup.findAll(attrs = {'class':'listitem '})
else:
prodSeg = plist.findAll(name='li')
for prod in prodSeg:
pNameSeg = prod.find(attrs={'class':'name'})
if pNameSeg is None:
pNameSeg = prod.find(attrs={'class':'title'})
pName,url = ParserUtils.parserTag_A(pNameSeg.a)
pid = url.rsplit('=',1)[-1]
t = prod.find(attrs={'class':'price_d'})
if t != None :
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
t = prod.find(attrs={'class':'price_m'})
if t != None:
pastPrice = ParserUtils.getPrice(t.getText())
else:
pastPrice = 0.00
prodDetail = ProductDetails(productId=pid, privPrice = currentPrice,pubPrice=pastPrice,
name=pName, adWords='')
resultList.append(prodDetail)
return resultList
class DangDangSort4PageParser(DangDangSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testDangDangAllSortPage():
fileName = os.path.join(testFilePath,'dangcat.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
firstPage = DangDangAllSortParser(content, rootUrlSum,include = [pserver])
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[('test')], catagoryLevel=3)
sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[('test')], catagoryLevel=3)
sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
def testRegx():
regx = u'共([0-9]*)页'
p = re.compile(regx)
fileName = os.path.join(testFilePath,'4001011.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
s = soup.find(name='span',attrs = {'id':'all_num'}).getText()
content = content.decode('gb18030','ignore')
print p.search(s).group(1)
if __name__ == '__main__':
#testRegx()
#testDangDangAllSortPage()
testSort3Page()
testSort3Details()
| Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-07-11
日志工厂类
@author: zhongfeng
'''
import logging.config
import os
class LoggerFactory(object):
_loggerFac = None
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
#curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
modPath = os.path.dirname(__file__)
logCfg = os.path.join(modPath,'logging.conf')
logging.config.fileConfig(logCfg)
cls._inst = super(LoggerFactory, cls).__new__(cls)
return cls._inst
@classmethod
def getLogger(cls,logName='root'):
if cls._loggerFac == None:
cls._loggerFac = LoggerFactory()
if isinstance(logName,type): #传递的是一个类,去类名
logName = logName.__name__
return logging.getLogger(logName)
def __del__(self):
logging.shutdown()
def testMutiThread():
from threadpool import ThreadPool,WorkRequest
def printlog(msg):
logger = LoggerFactory.getLogger()
logger.info('-'.join([msg,ctime()]))
urls = (r'http://www.360buy.com/product/{}.html'.format(str(proid)) for proid in xrange(1,14000))
#print urls
#requests = makeRequests(printlog,urls)
print "Creating thread pool with 3 worker threads."
main = ThreadPool(3)
[main.putRequest(WorkRequest(printlog,[url])) for url in urls ]
main.wait()
if __name__ == '__main__':
from time import ctime
for t in range(10):
logger = LoggerFactory.getLogger('360buy')
logger.info(' %d this is a test %s' % ( t, ctime() ))
| Python |
import os, marshal, thread
# Filename used for index files, must not contain numbers
INDEX_FILENAME = 'index'
# Exception thrown when calling get() on an empty queue
class Empty(Exception): pass
class PersistentQueue:
def __init__(self, name, cache_size=512, marshal=marshal):
"""
Create a persistent FIFO queue named by the 'name' argument.
The number of cached queue items at the head and tail of the queue
is determined by the optional 'cache_size' parameter. By default
the marshal module is used to (de)serialize queue items, but you
may specify an alternative serialize module/instance with the
optional 'marshal' argument (e.g. pickle).
"""
assert cache_size > 0, 'Cache size must be larger than 0'
self.name = name
self.cache_size = cache_size
self.marshal = marshal
self.index_file = os.path.join(name, INDEX_FILENAME)
self.temp_file = os.path.join(name, 'tempfile')
self.mutex = thread.allocate_lock()
self._init_index()
def _init_index(self):
if not os.path.exists(self.name):
os.mkdir(self.name)
if os.path.exists(self.index_file):
index_file = open(self.index_file)
print os.path.abspath(self.index_file)
self.head, self.tail = map(lambda x: int(x),
index_file.read().split(' '))
index_file.close()
else:
self.head, self.tail = 0, 1
def _load_cache(cache, num):
name = os.path.join(self.name, str(num))
mode = 'rb+' if os.path.exists(name) else 'wb+'
cachefile = open(name, mode)
try:
setattr(self, cache, self.marshal.load(cachefile))
except EOFError:
setattr(self, cache, [])
cachefile.close()
_load_cache('put_cache', self.tail)
_load_cache('get_cache', self.head)
assert self.head < self.tail, 'Head not less than tail'
def _sync_index(self):
assert self.head < self.tail, 'Head not less than tail'
index_file = open(self.temp_file, 'w')
index_file.write('%d %d' % (self.head, self.tail))
index_file.close()
if os.path.exists(self.index_file):
os.remove(self.index_file)
os.rename(self.temp_file, self.index_file)
def _split(self):
put_file = os.path.join(self.name, str(self.tail))
temp_file = open(self.temp_file, 'wb')
self.marshal.dump(self.put_cache, temp_file)
temp_file.close()
if os.path.exists(put_file):
os.remove(put_file)
os.rename(self.temp_file, put_file)
self.tail += 1
if len(self.put_cache) <= self.cache_size:
self.put_cache = []
else:
self.put_cache = self.put_cache[:self.cache_size]
self._sync_index()
def _join(self):
current = self.head + 1
if current == self.tail:
self.get_cache = self.put_cache
self.put_cache = []
else:
get_file = open(os.path.join(self.name, str(current)), 'rb')
self.get_cache = self.marshal.load(get_file)
get_file.close()
try:
os.remove(os.path.join(self.name, str(self.head)))
except:
pass
self.head = current
if self.head == self.tail:
self.head = self.tail - 1
self._sync_index()
def _sync(self):
self._sync_index()
get_file = os.path.join(self.name, str(self.head))
temp_file = open(self.temp_file, 'wb')
self.marshal.dump(self.get_cache, temp_file)
temp_file.close()
if os.path.exists(get_file):
os.remove(get_file)
os.rename(self.temp_file, get_file)
put_file = os.path.join(self.name, str(self.tail))
temp_file = open(self.temp_file, 'wb')
self.marshal.dump(self.put_cache, temp_file)
temp_file.close()
if os.path.exists(put_file):
os.remove(put_file)
os.rename(self.temp_file, put_file)
def __len__(self):
"""
Return number of items in queue.
"""
self.mutex.acquire()
try:
return (((self.tail-self.head)-1)*self.cache_size) + \
len(self.put_cache) + len(self.get_cache)
finally:
self.mutex.release()
def sync(self):
"""
Synchronize memory caches to disk.
"""
self.mutex.acquire()
try:
self._sync()
finally:
self.mutex.release()
def put(self, obj):
"""
Put the item 'obj' on the queue.
"""
self.mutex.acquire()
try:
self.put_cache.append(obj)
if len(self.put_cache) >= self.cache_size:
self._split()
finally:
self.mutex.release()
def get(self):
"""
Get an item from the queue.
Throws Empty exception if the queue is empty.
"""
self.mutex.acquire()
try:
if len(self.get_cache) > 0:
return self.get_cache.pop(0)
else:
self._join()
if len(self.get_cache) > 0:
return self.get_cache.pop(0)
else:
raise Empty
finally:
self.mutex.release()
def close(self):
"""
Close the queue. Implicitly synchronizes memory caches to disk.
No further accesses should be made through this queue instance.
"""
self.mutex.acquire()
try:
self._sync()
if os.path.exists(self.temp_file):
try:
os.remove(self.temp_file)
except:
pass
finally:
self.mutex.release()
## Tests
if __name__ == "__main__":
ELEMENTS = 1000
p = PersistentQueue('test', 1)
print 'Enqueueing %d items, cache size = %d' % (ELEMENTS,
p.cache_size)
for a in range(ELEMENTS/2):
print p.get()
from time import sleep
for a in range(ELEMENTS):
sleep(1)
print a
p.put(str(a))
p.sync()
print 'Queue length (using __len__):', len(p)
print 'Dequeueing %d items' % (ELEMENTS/2)
for a in range(ELEMENTS/2):
p.get()
print 'Queue length (using __len__):', len(p)
print 'Dequeueing %d items' % (ELEMENTS/2)
for a in range(ELEMENTS/2):
p.get()
print 'Queue length (using __len__):', len(p)
p.sync()
p.close()
| Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from neweggpageparser import NewEggAllSortParser,NewEggSort3PageParser,NewEggSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:NewEggAllSortParser,3:NewEggSort3PageParser,4:NewEggSort4PageParser}
sort3 = ObuyUrlSummary(url = ur'http://www.newegg.com.cn/SubCategory/1046.htm?pageSize=96',name='newegg',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = ur'http://www.newegg.com.cn/SubCategory/1046-3.htm?pageSize=96',
name='digital',catagoryLevel = 4)
newEggRoot = ObuyUrlSummary(url = ur'http://www.newegg.com.cn/CategoryList.htm',name='newegg')
pserver = ObuyUrlSummary(url = ur'http://www.newegg.com.cn/Category/536.htm',
name='服务器',catagoryLevel = 2)
spider = ObuySpider(rootUrlSummary = newEggRoot,parserDict = parserDict,include =None,exclude = None,threadNum = 5)
spider.spide() | Python |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import re
class NewEggAllSortParser(RootCatagoryPageParser):
'''
从http://www.newegg.com.cn/CategoryList.htm获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.newegg.com.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name = 'div',attrs={'class':'allCateList'})
for t in allSort.findAll(attrs={'id':re.compile('pd[0-9]+')}):#一级分类
name = t.getText()
url = '#'.join((r'http://www.newegg.com.cn/CategoryList.htm',t['id']))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='dl')
for tt in sort_2(name='dt'):#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.findNextSibling(name='dd').findAll(name = 'a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
url = '?'.join((url,'pageSize=96'))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class NewEggSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-{}'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def getTotal(self):
pageSeg = self.soup.find(name='div',attrs={'class':'pageNav'}).find(name='ins').getText()
totalPage = int(pageSeg.split('/')[-1])
return totalPage
def parserPageInfos(self):
plist = self.soup.find(attrs={'id':'itemGrid1'})
resultList = []
for prod in plist.findAll(attrs={'class':'itemCell noSeller'}):
pName,url = ParserUtils.parserTag_A(prod.find(name ='p',attrs={'class':'info'}).a)
pid = url.rsplit('/',1)[-1].split('.')[0]
currentPrice = ParserUtils.getPrice(prod.find(attrs={'class':'current'}).strong.getText())
bypastSeg = prod.find(attrs={'class':'bypast'})
pastPrice = '0.00'
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
prodDetail = ProductDetails(productId=pid, privPrice = currentPrice,pubPrice=pastPrice,
name=pName, adWords='')
resultList.append(prodDetail)
return resultList
class NewEggSort4PageParser(NewEggSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testNewEggAllSortPage():
fileName = os.path.join(testFilePath,'CategoryList.htm')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/CategoryList.htm', name='newegg')
include = [ ObuyUrlSummary(url = r'http://http://www.newegg.com.cn/Category/536.htm',
name='服务器',catagoryLevel = 2)]
firstPage = NewEggAllSortParser(content, rootUrlSum,include = include)
for sort_3 in firstPage.getBaseSort3UrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print '\t'*index,str(urlsum.getUrlSumAbstract())
print sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'newegg_2011-08-12_17-32-49.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/SubCategory/1043.htm?pageSize=96',
parentPath=[('test')], catagoryLevel=3)
sort3Page = NewEggSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'newegg_2011-08-12_17-32-49.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/SubCategory/1043.htm?pageSize=96',
parentPath=[('test')], catagoryLevel=3)
sort3Page = NewEggSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
#testNewEggAllSortPage()
#testSort3Page()
testSort3Details()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.