#交代:代码凌乱,新手一个,论坛都是高手,我也是鼓了很大勇气,发出来就是被批评和进步的
from urllib import request
from urllib import request
from urllib import error
import io
import sys
import re
from functools import reduce
import xlrd,xlwt
import numpy as np
import time
from xlutils.copy import copy
from openpyxl import Workbook
from openpyxl import load_workbook
#python 35
#
class Spider():
new_excel_file = xlwt.Workbook(encoding='utf-8')
readbook1 = xlrd.open_workbook(r'C:\\Users\\Administrator\\Desktop\\mingshi1.xlsx')
url = 'http://m******?id='
one_pattern = '<div id="artDiv" style="border:0; background-color:#fff; font-size:14px;line-height:180%">([\s\S]*?)</div>'
lieshu=0
#one1_pattern = '>([\s\S]*?)<'
def fetch_content(self,url1):#正则匹配
while(1):
try:
r = request.urlopen(url1)
htmls = r.read()
htmls = str(htmls,encoding='utf-8')
#print(htmls)
return htmls
break
except error.HTTPError as e:
print(e.code)
self.Error_input()
except error.URLError as e:
print(e.reason)
self.Error_input()
def Error_input(self):#遇到URL或者HTTP错误提示
Error_if = input("Some Error, (enter)here we go?**************************(everything) Exit")
if Error_if:
sys.exit()
else:
pass
def analysis(self,htmls):#处理正则后的数据
one_html = re.findall(Spider.one_pattern,htmls)
#print (one_html)
if one_html:
for html in one_html:
content = html
# content = re.findall(Spider.one_pattern,html)
print(len(content))
if len(content):
t1 = reduce(lambda x,y:x+y,content)
else:
t1 = ' '
else:
t1= ' '
#print(t)
return t1
def go(self):#循环 读 xlw和循环 写 xlw
"""
循环读xlw
"""
#定义循环 读 xlw的变量
readbook = xlrd.open_workbook(r'C:\\Users\\Administrator\\Desktop\\mingshi.xlsx')
table = readbook.sheets()[0]
start=1 #开始的行
end=639 #结束的行
list_values=[]
#执行循序 1.1循环 读 第1列xlw的代码块,上面变量有行数的开始行和结束行
#
for x in range(start,end):
values=[]
row =table.row_values(x)
for i in range(1):
values.append(row[i])
list_values.append(values)
datamatrix=list(np.array(list_values))
#print(type(datamatrix))
"""
循环写xlw
"""
#定义循环 写 xlw的变量
wb = load_workbook(r'C:\\mingshi11111.xlsx')
ws=wb.active
hangshu = 1 #行
lieshu = 5 #列
rows=[]
for row in ws.iter_rows():#获取所有行
rows.append(row)
"""
#抛弃使用xlsw库,缺点:写xlw会有大小限制,超过限制会出错
# 写xls w for循环外
# book1 = xlrd.open_workbook(r'路径')
# book2 = copy(book1)#拷贝一份原来的excel
# sheet = book2.get_sheet(0)#获取第几个sheet页,book2现在的是xlutils里的方法,不是xlrd的
"""
#循环读xlw的代码块和url+id处理
for y in datamatrix:
url1=("http://m******px?id="+str(int(y)))
#url1=("http://m******x?id=20131210120041954")
#
print(('*')*127)
print("url:"+url1) #执行循序2.1 打印第一个url
print(('*')*127)
htmls = self.fetch_content(url1) #执行循序2.2 正则htmls
self.analysis(htmls) #执行循序2.3 过滤htmls放入list后使用reduce追加内容,变成一体连续内容
"""
openpyxl_start
"""
#执行循序3.1 写xlw文件
print('正在写入第'+str(hangshu)+'行')
if hangshu == 639:#
break
else:
rows[hangshu][lieshu].value = self.analysis(htmls) #[hangshu][lieshu]第hangshu行,第lieshu列
wb.save("C:\\mingshi111111.xlsx")
print('已写入第'+str(hangshu)+'行')
hangshu+=1
#
"""
写xls w 循环内
"""
"""
if lieshu==639: #写循环次数
break
else:
sheet.write(lieshu, 5, self.analysis(htmls))
book2.save('c:\\ms.xls')
lieshu+=1
"""
"""
注释
"""
print(('*')*127)
print('sleep 1秒')
print(('*')*127)
time.sleep(1)
spider = Spider()
spider.go()