Python爬蟲(chóng)爬取、解析數(shù)據(jù)操作示例
本文實(shí)例講述了Python爬蟲(chóng)爬取、解析數(shù)據(jù)操作。分享給大家供大家參考,具體如下:
爬蟲(chóng) 當(dāng)當(dāng)網(wǎng) http://search.dangdang.com/?key=python&act=input&page_index=1
獲取書(shū)籍相關(guān)信息 面向?qū)ο笏枷? 利用不同解析方式和存儲(chǔ)方式引用相關(guān)庫(kù)
import requestsimport reimport csvimport pymysqlfrom bs4 import BeautifulSoupfrom lxml import etreeimport lxmlfrom lxml import html
類代碼實(shí)現(xiàn)部分
class DDSpider(object): #對(duì)象屬性 參數(shù) 關(guān)鍵字 頁(yè)數(shù) def __init__(self,key=’python’,page=1): self.url = ’http://search.dangdang.com/?key=’+key+’&act=input&page_index={}’ self.page = page self.headers = {’User-Agent’:’Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36’} #私有對(duì)象方法 def __my_url(self): my_url = [] if self.page < 1: my_page = 2 else: my_page = self.page+1 #循環(huán)遍歷每一頁(yè) for i in range(1,my_page): my_url.append(self.url.format(i)) return my_url #私有對(duì)象方法 請(qǐng)求數(shù)據(jù) def __my_request(self,url,parser_type): #循環(huán)遍歷每一頁(yè) response = requests.get(url=url,headers=self.headers) if response.status_code == 200: return self.__my_parser(response.text,parser_type) else: return None #私有對(duì)象方法 解析數(shù)據(jù) 1 利用正則 2 bs4 3 xpath def __my_parser(self,html,my_type=1): if my_type == 1: pattern = re.compile(’<p.*?class=[’']name[’'].*?name=[’']title[’'].*?<a.*?title=[’'](.*?)[’'].*?href=[’'](.*?)[’'].*?name=[’']itemlist-title[’'].*?<p class=[’']detail[’'].*?>(.*?)</p>.*?<span.*?class=[’']search_now_price[’'].*?>(.*?)</span>.*?<p.*?class=[’']search_book_author[’'].*?><span>.*?<a.*?name=[’']itemlist-author[’'].*?title=[’'](.*?)[’'].*?</span>’,re.S) result = re.findall(pattern,html) elif my_type == 2: soup = BeautifulSoup(html,’lxml’) result = [] title_url = soup.find_all(’a’,attrs={’name’:’itemlist-title’}) for i in range(0,len(title_url)):title = soup.find_all(’a’,attrs={’name’:’itemlist-title’})[i].attrs[’title’]url = soup.find_all(’a’,attrs={’name’:’itemlist-title’})[i].attrs[’href’]price = soup.find_all(’span’,attrs={’class’:’search_now_price’})[i].get_text()author = soup.find_all(’a’,attrs={’name’:’itemlist-author’})[i].attrs[’title’]desc = soup.find_all(’p’,attrs={’class’:’detail’})[i].get_text()my_tuple = (title,url,desc,price,author)result.append(my_tuple) else: html = etree.HTML(html) li_all = html.xpath(’//div[@id='search_nature_rg']/ul/li’) result = [] for i in range(len(li_all)):title = html.xpath(’//div[@id='search_nature_rg']/ul/li[{}]/p[@class='name']/a/@title’.format(i+1))url = html.xpath(’//div[@id='search_nature_rg']/ul/li[{}]/p[@class='name']/a/@href’.format(i+1))price = html.xpath(’//div[@id='search_nature_rg']/ul/li[{}]//span[@class='search_now_price']/text()’.format(i+1))author_num = html.xpath(’//div[@id='search_nature_rg']/ul/li[{}]/p[@class='search_book_author']/span[1]/a’.format(i+1))if len(author_num) != 0: #有作者 a標(biāo)簽 author = html.xpath(’//div[@id='search_nature_rg']/ul/li[{}]/p[@class='search_book_author']/span[1]/a[1]/@title’.format(i+1))else: #沒(méi)有作者 a標(biāo)簽 author = html.xpath(’//div[@id='search_nature_rg']/ul/li[{}]/p[@class='search_book_author']/span[1]/text()’.format(i+1))desc = html.xpath(’//div[@id='search_nature_rg']/ul/li[{}]/p[@class='detail']/text()’.format(i+1))my_tuple = (' '.join(title),' '.join(url),' '.join(desc),' '.join(price),' '.join(author))result.append(my_tuple) return result #私有對(duì)象方法 存儲(chǔ)數(shù)據(jù) 1 txt 2 csv 3 mysql def __my_save(self,data,save_type=1): #循環(huán)遍歷 for value in data: if save_type == 1:with open(’ddw.txt’,’a+’,encoding='utf-8') as f: f.write(’【名稱】:{}【作者】:{}【價(jià)格】:{}【簡(jiǎn)介】:{}【鏈接】:{}’.format(value[0],value[4],value[3],value[2],value[1])) elif save_type == 2:with open(’ddw.csv’,’a+’,newline=’’,encoding=’utf-8-sig’) as f: writer = csv.writer(f) #轉(zhuǎn)化為列表 存儲(chǔ) writer.writerow(list(value)) else:conn = pymysql.connect(host=’127.0.0.1’,user=’root’,passwd=’’,db=’’,port=3306,charset=’utf8’)cursor = conn.cursor()sql = ’’cursor.execute(sql)conn.commit()cursor.close()conn.close() #公有對(duì)象方法 執(zhí)行所有爬蟲(chóng)操作 def my_run(self,parser_type=1,save_type=1): my_url = self.__my_url() for value in my_url: result = self.__my_request(value,parser_type) self.__my_save(result,save_type)
調(diào)用爬蟲(chóng)類實(shí)現(xiàn)數(shù)據(jù)獲取
if __name__ == ’__main__’: #實(shí)例化創(chuàng)建對(duì)象 dd = DDSpider(’python’,0) #參數(shù) 解析方式 my_run(parser_type,save_type) # parser_type 1 利用正則 2 bs4 3 xpath #存儲(chǔ)方式 save_type 1 txt 2 csv 3 mysql dd.my_run(2,1)
==總結(jié)一下: ==
1. 總體感覺(jué)正則表達(dá)式更簡(jiǎn)便一些 , 代碼也會(huì)更簡(jiǎn)便 , 但是正則部分相對(duì)復(fù)雜和困難2. bs4和xpath 需要對(duì)html代碼有一定了解 , 取每條數(shù)據(jù)多個(gè)值時(shí)相對(duì)較繁瑣
更多關(guān)于Python相關(guān)內(nèi)容可查看本站專題:《Python Socket編程技巧總結(jié)》、《Python正則表達(dá)式用法總結(jié)》、《Python數(shù)據(jù)結(jié)構(gòu)與算法教程》、《Python函數(shù)使用技巧總結(jié)》、《Python字符串操作技巧匯總》、《Python入門與進(jìn)階經(jīng)典教程》及《Python文件與目錄操作技巧匯總》
希望本文所述對(duì)大家Python程序設(shè)計(jì)有所幫助。
相關(guān)文章:
1. Java雙向鏈表按照順序添加節(jié)點(diǎn)的方法實(shí)例2. python 寫一個(gè)文件分發(fā)小程序3. 詳解JSP 內(nèi)置對(duì)象request常見(jiàn)用法4. jsp實(shí)現(xiàn)textarea中的文字保存換行空格存到數(shù)據(jù)庫(kù)的方法5. 如何用python 操作MongoDB數(shù)據(jù)庫(kù)6. python 爬取豆瓣網(wǎng)頁(yè)的示例7. java實(shí)現(xiàn)2048小游戲(含注釋)8. Python實(shí)現(xiàn)播放和錄制聲音的功能9. ASP 連接Access數(shù)據(jù)庫(kù)的登陸系統(tǒng)10. 網(wǎng)頁(yè)中img圖片使用css實(shí)現(xiàn)等比例自動(dòng)縮放不變形(代碼已測(cè)試)
