"""
Created on Wed Nov 23 00:03:30 2022
@author: fch
"""
import time
'''
网站网址:https://m.7160.top/rentiyishu/
'''
import random
from pyquery import PyQuery as pq
import requests
import os
from joblib import Parallel, delayed
import socket
socket.setdefaulttimeout(10)
# 忽略requests证书警告
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.DEFAULT_RETRIES = 5 # 增加重试连接次数
IP_list=[]
with open('./IP代理池.txt','r') as f:
for line in f:
line = line.strip('\n') # 删除换行符
IP_list.append(line)
f.close()
'''
random随机读取到后为字符串类型,用eval转回字典
'''
# proxie = eval(random.choice(IP_list))
user_agent_list = [
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10',
'Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+',
'Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)',
'UCWEB7.0.2.37/28/999',
'NOKIA5700/ UCWEB7.0.2.37/28/999',
'Openwave/ UCWEB7.0.2.37/28/999',
'Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999',
'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25',
]
def Res_url(url):
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
headers = {'User-Agent': random.choice(user_agent_list),
'Referer': 'https://m.7160.top/rentiyishu/',
"Connection": "close"}
try:
s=requests.session()
s.keep_alive = False
r = s.get(url, headers=headers, proxies=eval(random.choice(IP_list)), timeout=(5, 10),verify=False,allow_redirects=False)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print(url)
return 'over'
'''
爬取首页中所有伪图片的链接地址和名称,返回成列表
'''
def Parse_page(html):
page_url_list,fil_name_list=[],[]
doc = pq(html)
items = doc('.listUll2 li').items()
for item in items:
page_url_list.append(item('div a').attr('href'))
fil_name_list.append(item('p').text())
return page_url_list,fil_name_list
def Get_img(page_url, file_name):
num = 1
base_url = page_url.split('.')
while True:
if num == 1:
url = page_url
else:
url = base_url[0] + '.' + base_url[1] + '.' + base_url[2] + '_' + str(num) + '.' + base_url[3]
time.sleep(1)
html = Res_url(url)
if html != 'over':
doc = pq(html)
img_url = doc('.ArticleBox img').attr('src')
img_basename = doc('.ArticleBox img').attr('alt')
if img_basename==None:
img_name=img_basename
else:
img_name=img_basename.replace('/','')
if img_url == None:
num += 1
else:
Save_img(img_url, file_name, img_name)
num += 1
else:
break #跳出while循环
def Save_img(img_url, file_name, img_name):
time.sleep(1)
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
headers = {'User-Agent': random.choice(user_agent_list),
'Referer': 'https://m.7160.top/rentiyishu/',
"Connection": "close"}
houzui = img_url.split('.')[-1]
addr = './images_3/' + file_name
if not os.path.isdir(addr):
os.makedirs(addr)
r=requests.get(img_url, headers=headers, proxies=eval(random.choice(IP_list)), verify=False).content
if r.status_code==200:
with open('./images_3/{}/{}.{}'.format(file_name, img_name, houzui), 'wb') as f:
f.write(r.content)
f.close()
else:
print('图片地址不存在')
def main():
page_num = 1
base_url = 'https://m.7160.top/rentiyishu/index'
while True:
if page_num == 1:
url = base_url + '.html'
else:
url = base_url + '_' + str(page_num) + '.html'
html = Res_url(url)
if html != 'over':
print('保存第', page_num, '页')
page_url_list, file_name_list=Parse_page(html)
'''
多线程:n_jobs=30即同时30个线程
'''
Parallel(n_jobs=5)(delayed(Get_img)(page_url,file_name) for page_url,file_name in zip(page_url_list,file_name_list))
else:
print('程序结束')
return '程序结束'
page_num += 1
main()
'''
网站网址:http://www.meinuzi.com/
'''
import random
from pyquery import PyQuery as pq
import requests
proxies=[
{'HTTP':'http://122.140.5.115:9999'},
{'HTTP':'http://113.101.96.66:8080'},
{'HTTP':'http://113.124.86.24:9999'},
{'HTTP':'http://121.13.252.58:41564'},
{'HTTP':'http://61.216.185.88:60808'},
{'HTTP':'http://58.20.184.187:9091'},
{'HTTP':'http://183.236.123.242:8060'},
{'HTTP':'http://116.9.163.205:58080'},
{'HTTP':'http://222.74.73.202:42055'},
{'HTTP':'http://183.247.202.208:30001'},
{'HTTP':'http://39.108.101.55:1080'},
{'HTTP':'http://47.105.91.226:8118'},
]
proxie=random.choice(proxies)
user_agent_list = ['Chrome/86.0.4240.198',
'Chrome/101.0.4951.64',
'Chrome/96.0.4664.45',
'Chrome/94.0.4606.41'
]
headers = {'User-Agent': random.choice(user_agent_list),
'Referer':'http://www.meinuzi.com'}
'''
请求url,返回HTML
'''
def Req_url(url):
try:
r=requests.get(url,headers=headers,proxies=proxie)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.text
except:
# print('ERROR!')
return 'over'
'''
解析初始页面,找到图片页面的网址
'''
def Parse_page(html):
doc=pq(html)
items=doc('.m-list-main li').items()
i=1
for item in items:
page_url=item('.u-img a').attr('href')
name=item('.u-img img').attr('alt')
print("保存第",i,'张图')
Get_img(page_url,name)
i+=1
'''
解析图片页面的网址,找到每张图片的url
'''
def Get_img(page_url,name):
html=Req_url(page_url)
doc=pq(html)
img_baseurl=doc('.g-mn .m-list-content img').attr('src')
Save_img(img_baseurl,name)
'''
根据得到的图片url,保存到文件夹
'''
def Save_img(img_baseurl,name):
img_url=headers['Referer']+img_baseurl
data=requests.get(img_url,headers=headers,proxies=proxie,timeout=15).content
houzui=img_url.split('.')[-1]
with open('./images_1/{}.{}'.format(name,houzui),'wb') as f:
f.write(data)
def main():
num = 7 # 从第num页开始
'''
当返回值为‘over’的时候,说明页面不存在,即已经爬取全部页面
'''
while True:
page_url='http://www.meinuzi.com/index_'+str(num)+'.html'
html = Req_url(page_url)
if html != 'over':
'''
页面存在但无图片数据,标签中只有换行符时
'''
doc=pq(html)
item = doc('.m-list-main ul').text()
if item!='' \
'':
print('**********第',num,'页**********')
Parse_page(html)
num+=1
else:
print('程序结束')
return '程序结束'
else:
print('程序结束')
return '程序结束'
main()
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 19 22:44:37 2022
@author: fch
"""
import random
from pyquery import PyQuery as pq
import requests
import os
import time
import json
proxies=[
{'HTTP':'http://122.140.5.115:9999'},
{'HTTP':'http://113.101.96.66:8080'},
{'HTTP':'http://113.124.86.24:9999'},
{'HTTP':'http://121.13.252.58:41564'},
{'HTTP':'http://61.216.185.88:60808'},
{'HTTP':'http://58.20.184.187:9091'},
{'HTTP':'http://183.236.123.242:8060'},
{'HTTP':'http://116.9.163.205:58080'},
{'HTTP':'http://222.74.73.202:42055'},
{'HTTP':'http://183.247.202.208:30001'},
{'HTTP':'http://39.108.101.55:1080'},
{'HTTP':'http://47.105.91.226:8118'},
]
proxie=random.choice(proxies)
user_agent_list = ['Chrome/86.0.4240.198',
'Chrome/101.0.4951.64',
'Chrome/96.0.4664.45',
'Chrome/94.0.4606.41'
]
headers = {'User-Agent': random.choice(user_agent_list)}
list1 = [
"Chrome/68.0.3440.106",
"Chrome/67.0.3396.99",
"Chrome/64.0.3282.186",
"Chrome/62.0.3202.62",
"Chrome/45.0.2454.101"
]
header={'User-Agent': random.choice(list1),
'Referer':'http://81.68.202.74/',
'sec-ch-ua': 'Google Chrome',
'sec-ch-ua-platform':'Windows'
}
base_url = 'http://81.68.202.74/datu/page/'
# verify=False忽略SSL警告
def Base_page(Page_num):
url = base_url + str(Page_num)
try:
r = requests.get(url, headers=headers,proxies=proxie)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print('ERROR!!!')
def Page_url(fir_html):
doc = pq(fir_html)
items = doc('.posts-wrapper .entry-media').items()
num = 1
for item in items:
page_url=item('a').attr('href')
fil_name=item('.lazyload').attr('alt').replace(":","")
# print(fil_name,page_url)
print('------ 个数:',num,'------')
Parse_img(page_url, fil_name)
num+=1
def Parse_img(page_url,fil_name):
time.sleep(0.25)
i=1
r = requests.get(page_url, headers=headers,proxies=proxie)
r.raise_for_status()
r.encoding = r.apparent_encoding
sec_html=r.text
doc=pq(sec_html)
items=doc('.entry-content p img').items()
for item in items:
dict={
'url':item.attr('src'),
'name':item.attr('title')
}
print("保存第",i,'张图')
i+=1
time.sleep(1.25)
Save_img(dict,fil_name)
def Save_img(dic,fil_name):
url=dic['url']
# time.sleep(0.25)
data = requests.get(url,headers=header,proxies=proxie).content
name=dic['name']
houzui=url.split('.')[-1]
addr='./images_2/'+fil_name
if not os.path.isdir(addr):
os.makedirs(addr)
with open('./images_2/{}/{}.{}'.format(fil_name,name,houzui),'wb') as f:
f.write(data)
def main():
#左闭右开,到21页
for Page_num in range(1,22):
print("*************第",Page_num,'页*************')
fir_html = Base_page(Page_num)
Page_url(fir_html)
print("结束!")
main()
from urllib.parse import urlencode
import requests
import re
def Get_page(page_num):
headers={
'Host':'so.toutiao.com',
'user-agent':'Mozilla/5.0',
'cookie':'tt=acd600b5ac865033f0ee83a63ed44675; '
}
base_url='https://so.toutiao.com/search?'
params = {
'keyword': '街拍',
'pd': 'atlas',
'dvpf': 'pc',
'aid': '4916',
'page_num': page_num,
'search_json': '%7B%22from_search_id%22%3A%2220220104115420010212192151532E8188%22%2C%22origin_keyword%22%3A%22%E8%A1%97%E6%8B%8D%22%2C%22image_keyword%22%3A%22%E8%A1%97%E6%8B%8D%22%7D',
'rawJSON': 1,
'search_id': '2022062517173701021219402539E36546'
}
url=base_url+urlencode(params)
try:
r=requests.get(url,headers=headers)
if r.status_code==200:
return r.json()
except:
print('Failed!')
def get_images(json):
images = json['rawData']['data']
for image in images:
yield {
'name': image['text'],
'url':image['img_url']
}
def save_image(dict):
data=requests.get(dict['url']).content
#把“和 :都替换成‘,’,否则python路径会识别错误
name=dict['name'].replace(':',',')
name=name.replace('"',',')
name=name.replace('?','')
name=name.split('|')[-1] #碰到‘|’就取最后一个
houzui=dict['url'].split('.')[-1] #截取后缀,比如JPEG,png等图片的格式名称
with open("./images/{}.{}".format(name,houzui),'wb') as f:
f.write(data)
def main():
for page_num in range(1,20):
json=Get_page(page_num)
dict=get_images(json)
for d in dict:
save_image(d)
main()
import pymysql
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from pyquery import PyQuery as pq
import pymongo
'''链接mongodb'''
client=pymongo.MongoClient("mongodb://*********:27017")
db=client.test
col=db.taobao
'''连接mysql'''
db=pymysql.Connect(host='*********',user='root',password='*********',port=3306,db='xiaomishop')
cursor=db.cursor()
table='taobao'
'''初始url'''
kw='iPad' #关键词
url='https://s.taobao.com/search?q='+kw
driver=webdriver.Chrome() #指定浏览器对象
wait=WebDriverWait(driver,50) #最大等待时长
def Get_page(page_num):
'''
获取page_num页的页面信息(html)
:param page_num:
:return: Null
'''
print('='*25,'正在爬取第',page_num,'页','='*25)
try:
driver.get(url)
input = wait.until(EC.presence_of_element_located((By.CLASS_NAME,'J_Input'))) # 直到定位到搜索框
submit=wait.until(EC.element_to_be_clickable((By.CLASS_NAME,'J_Submit'))) #直到定位到能点击‘确定’
input.clear()
input.send_keys(page_num)
submit.click()
Get_product()
except TimeoutError:
Get_page(page_num)
def Get_product():
'''
获取每页上所有商品的名称、价格、图片、购买量、店铺名称、店铺位置
:return:Null
'''
html=driver.page_source
doc=pq(html)
items=doc('.J_MouserOnverReq').items()
for item in items:
product={
'name': item.find('.ctx-box .row-2 a').text(),
'pic_img':item.find('.pic-box-inner .J_Itemlist_Pic_657878599145').attr('src'),
'price':item.find('.ctx-box .row-1 strong').text(),
'nums':item.find('.ctx-box .row-1 .deal-cnt').text(),
'shop':item.find('.ctx-box .row-3 .shop span').text(),
'location':item.find('.ctx-box .row-3 .location').text()
}
# write_to_mongodb(product)
# write_to_mysql(product)
print(product)
def write_to_mongodb(dic):
'''
字典结果保存到MongoDB
:param dic:
:return: Null
'''
col.insert_one(dic)
def write_to_mysql(dic):
'''
字典结果写入mysql
:param dic:
:return:
'''
keys=','.join(dic.keys())
values=','.join(['%s']*len(dic))
sql='insert into {table}({keys}) values ({values})'.format(table=table,keys=keys,values=values)
try:
if cursor.execute(sql,tuple(dic.values())):
print('successful')
db.commit()
except:
print('failed')
db.rollback()
if __name__ == '__main__':
for page_num in range(1,101):
Get_page(page_num)
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver import ActionChains
'''基本使用'''
#声明浏览器对象
browser=webdriver.Chrome()
try:
'''访问页面'''
browser.get('https://www.baidu.com')
input=browser.find_element(By.ID,'kw')
input.send_keys('python')
input.send_keys(Keys.ENTER)
wait=WebDriverWait(browser,10)
wait.until(EC.presence_of_element_located((By.ID,'content_left')))
print('='*50)
print(browser.current_url)
print('=' * 50)
print(browser.get_cookies())
print('=' * 50)
# print(browser.page_source)
browser.quit()
except:
browser.close()
'''节点交互'''
driver=webdriver.Chrome()
try:
driver.get('https://www.taobao.com')
input=driver.find_element(By.ID,'q')
input.send_keys('手机')
# input.clear() #清除
button=driver.find_element(By.CLASS_NAME,'btn-search')
button.click()
driver.close()
except:
driver.close()
'''动作链'''
driver=webdriver.Chrome()
try:
driver.get('https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
driver.switch_to.frame('iframeResult') #切换frame
source=driver.find_element(By.ID,'draggable')
target=driver.find_element(By.ID,'droppable')
actions=ActionChains(driver)
actions.drag_and_drop(source,target)
actions.perform()
except:
driver.close()
'''执行JavaScript,比如下拉进度条'''
driver=webdriver.Chrome()
try:
driver.get('https://www.zhihu.com/explore')
driver.execute_script('window.scrollTo(0,document.body.scrollHeight)')
driver.execute_script('alert("To Bottom")')
except:
print('error')
driver.close()
'''获取属性'''
driver=webdriver.Chrome()
try:
driver.get('https://www.zhihu.com/explore')
logo=driver.find_element(By.ID,'special')
print(logo)
print(logo.get_attribute('class'))
driver.close()
except:
print('error')
driver.close()
'''获取文本'''
driver=webdriver.Chrome()
try:
driver.get('https://www.zhihu.com/explore')
res=driver.find_element(By.CLASS_NAME,'ExploreSpecialCard-contentTitle')
print(res.text)
driver.close()
except:
print('error')
driver.close()
'''获取id、位置、标签吗和大小'''
driver=webdriver.Chrome()
try:
driver.get('https://www.zhihu.com/explore')
res=driver.find_element(By.CLASS_NAME,'ExploreSpecialCard-contentTitle')
print('id',res.id)
print('位置',res.location)
print('标签',res.tag_name)
print('大小',res.size)
driver.close()
except:
print('error')
driver.close()
'''延时显式等待'''
driver=webdriver.Chrome()
try:
driver.get('https://www.taobao.com')
#引入WebDriverWait对象,设置最长等待时间
wait=WebDriverWait(driver,10)
#调用until方法,传入等待条件;EC.presence_of_element_located代表节点出现
input=wait.until(EC.presence_of_element_located((By.ID,'q')))
button=wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'.btn-search')))
print(input,button)
driver.close()
except:
print('error')
driver.close()
'''前进和后退'''
driver=webdriver.Chrome()
try:
driver.get('https://www.taobao.com/')
driver.get('https://www.baidu.com/')
driver.get('https://www.python.org/')
driver.back() #后退
time.sleep(1)
driver.forward() #前进
driver.close()
except:
print('error')
driver.close()