#coding=gbk
import os
import sys
import re
import time
import urllib2
def perror_and_exit(message, status = -1):
sys.stderr.write(message + '\n')
sys.exit(status)
def get_text_from_html_tag(html):
pattern_text = re.compile(r>.*? return pattern_text.findall(html)[0][1:-2].strip()
def parse_alexa(url):
url_alexa = http://icp.alexa.cn/index.php?q=%s % url
print url_alexa
#handle exception
times = 0
while times ) + 1:]
company_website_name = get_text_from_html_tag(match_td[4])
company_website_home_page = get_text_from_html_tag(match_td[5])
company_website_home_page = company_website_home_page[company_website_home_page.rfind(>) + 1:]
company_detail_url = get_text_from_html_tag(match_td[7])
pattern_href = re.compile(rhref=\.*?\, re.dotall | re.multiline)
match_href = pattern_href.findall(company_detail_url)
if len(match_href) == 0:
company_detail_url =
else:
company_detail_url = match_href[0][len(href=\):-1]
return [url, company_name, company_properties, company_icp, company_website_name, company_website_home_page, company_detail_url]
pass
if __name__ == __main__:
fw = file(out.txt, w)
for url in sys.stdin:
fw.write(\t.join(parse_alexa(url)) + \n)
#coding=gbk
import os
import sys
import re
import time
import urllib2
def perror_and_exit(message, status = -1):
sys.stderr.write(message + '\n')
sys.exit(status)
def get_text_from_html_tag(html):
pattern_text = re.compile(r>.*? return pattern_text.findall(html)[0][1:-2].strip()
def parse_alexa(url):
url_alexa = http://icp.alexa.cn/index.php?q=%s % url
print url_alexa
#handle exception
times = 0
while times ) + 1:]
company_website_name = get_text_from_html_tag(match_td[4])
company_website_home_page = get_text_from_html_tag(match_td[5])
company_website_home_page = company_website_home_page[company_website_home_page.rfind(>) + 1:]
company_detail_url = get_text_from_html_tag(match_td[7])
pattern_href = re.compile(rhref=\.*?\, re.dotall | re.multiline)
match_href = pattern_href.findall(company_detail_url)
if len(match_href) == 0:
company_detail_url =
else:
company_detail_url = match_href[0][len(href=\):-1]
return [url, company_name, company_properties, company_icp, company_website_name, company_website_home_page, company_detail_url]
pass
if __name__ == __main__:
fw = file(out.txt, w)
for url in sys.stdin:
fw.write(\t.join(parse_alexa(url)) + \n)[python] view plaincopyprint? time.sleep(2)
pass
time.sleep(2)
pass
每次抓取都会sleep 2s,防止ip被封,实际上即使sleep了ip过一段时间还是会被封
由于是结构化抓取,当网站格式变化此程序将无法使用