from selenium import webdriver
from bs4 import beautifulsoup
import re
import win32com.client
import threading,time
import mysqldb
def mydebug():
driver.quit()
exit(0)
def catchdate(s):
页面数据提取
soup = beautifulsoup(s)
z = []
global nowtimes
m = soup.findall(div,class_=date-buy)
for obj in m:
try:
tmp = obj.find('br').contents
except exception, e:
continue
if(tmp != ):
z.append(tmp)
nowtimes += 1
return z
def gettimes(n,t):
获取当前进度
return 当前进度为: + str(int(100*n/t)) + %
#———————————————————————————————————| 程序开始 |—————————————————————————————————
#确定图书大类
cate = {3273:历史,3279:心理学,3276:政治军事,3275:国学古籍,3274:哲学宗教,3277:法律,3280:文化,3281:社会科学}
#断点续抓
num1 = input(bookid:)
num2 = input(pagenumber:)
#生成图书大类链接,共需17355*20 = 347100次
totaltimes = 347100.0
nowtimes = 0
#开启webdirver的phantomjs对象
#driver = webdriver.phantomjs()
driver = webdriver.ie('c:\python27\scripts\iedriverserver')
#driver = webdriver.chrome('c:\python27\scripts\chromedriver')
#读出mysql中的评论页面,进行抓取
# 连接数据库
try:
conn = mysqldb.connect(host='localhost',user='root',passwd='',db='jd')
except exception, e:
print e
sys.exit()
# 获取cursor对象
cursor = conn.cursor()
sql = select * from booknew order by pagenumber desc
cursor.execute(sql)
alldata = cursor.fetchall()
flag = 0
flag2 = 0
# 如果有数据返回就循环输出,http://club.jd.com/review/10178500-1-154.html
if alldata:
for rec in alldata:
#rec[0]--bookid,rec[1]--cateid,rec[2]--pagenumber
if(rec[0] != str(num1) and flag == 0):
continue
else:
flag = 1
for p in range(num2,rec[2]):
if(flag2 == 0):
num2 = 0
flag2 = 1
p += 1
link = http://club.jd.com/review/ + rec[0] + -1- + str(p) + .html
#抓网页
driver.get(link)
html = driver.page_source
#抓评论
buydate = catchdate(html)
#写入数据库
for z in buydate:
sql = insert into ljj (id, cateid, bookid, date) values (null, ' + rec[0] + ',' + rec[1] + ',' + z[0] + ');
try:
cursor.execute(sql)
except exception, e:
print e
conn.commit()
print gettimes(nowtimes,totaltimes)
driver.quit()
cursor.close()
conn.close()