"""
Created on Wed Nov 23 00:03:30 2022
@author: fch
"""
import time
'''
网站网址:https://m.7160.top/rentiyishu/
'''
import random
from pyquery import PyQuery as pq
import requests
import os
from joblib import Parallel, delayed
import socket
socket.setdefaulttimeout(10)
# 忽略requests证书警告
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.DEFAULT_RETRIES = 5 # 增加重试连接次数
IP_list=[]
with open('./IP代理池.txt','r') as f:
for line in f:
line = line.strip('\n') # 删除换行符
IP_list.append(line)
f.close()
'''
random随机读取到后为字符串类型,用eval转回字典
'''
# proxie = eval(random.choice(IP_list))
user_agent_list = [
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10',
'Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+',
'Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)',
'UCWEB7.0.2.37/28/999',
'NOKIA5700/ UCWEB7.0.2.37/28/999',
'Openwave/ UCWEB7.0.2.37/28/999',
'Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999',
'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25',
]
def Res_url(url):
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
headers = {'User-Agent': random.choice(user_agent_list),
'Referer': 'https://m.7160.top/rentiyishu/',
"Connection": "close"}
try:
s=requests.session()
s.keep_alive = False
r = s.get(url, headers=headers, proxies=eval(random.choice(IP_list)), timeout=(5, 10),verify=False,allow_redirects=False)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print(url)
return 'over'
'''
爬取首页中所有伪图片的链接地址和名称,返回成列表
'''
def Parse_page(html):
page_url_list,fil_name_list=[],[]
doc = pq(html)
items = doc('.listUll2 li').items()
for item in items:
page_url_list.append(item('div a').attr('href'))
fil_name_list.append(item('p').text())
return page_url_list,fil_name_list
def Get_img(page_url, file_name):
num = 1
base_url = page_url.split('.')
while True:
if num == 1:
url = page_url
else:
url = base_url[0] + '.' + base_url[1] + '.' + base_url[2] + '_' + str(num) + '.' + base_url[3]
time.sleep(1)
html = Res_url(url)
if html != 'over':
doc = pq(html)
img_url = doc('.ArticleBox img').attr('src')
img_basename = doc('.ArticleBox img').attr('alt')
if img_basename==None:
img_name=img_basename
else:
img_name=img_basename.replace('/','')
if img_url == None:
num += 1
else:
Save_img(img_url, file_name, img_name)
num += 1
else:
break #跳出while循环
def Save_img(img_url, file_name, img_name):
time.sleep(1)
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
headers = {'User-Agent': random.choice(user_agent_list),
'Referer': 'https://m.7160.top/rentiyishu/',
"Connection": "close"}
houzui = img_url.split('.')[-1]
addr = './images_3/' + file_name
if not os.path.isdir(addr):
os.makedirs(addr)
r=requests.get(img_url, headers=headers, proxies=eval(random.choice(IP_list)), verify=False).content
if r.status_code==200:
with open('./images_3/{}/{}.{}'.format(file_name, img_name, houzui), 'wb') as f:
f.write(r.content)
f.close()
else:
print('图片地址不存在')
def main():
page_num = 1
base_url = 'https://m.7160.top/rentiyishu/index'
while True:
if page_num == 1:
url = base_url + '.html'
else:
url = base_url + '_' + str(page_num) + '.html'
html = Res_url(url)
if html != 'over':
print('保存第', page_num, '页')
page_url_list, file_name_list=Parse_page(html)
'''
多线程:n_jobs=30即同时30个线程
'''
Parallel(n_jobs=5)(delayed(Get_img)(page_url,file_name) for page_url,file_name in zip(page_url_list,file_name_list))
else:
print('程序结束')
return '程序结束'
page_num += 1
main()
import requests
from bs4 import BeautifulSoup
import time
'''
IP代理网站的页数
'''
num=51
list_ip = []
list_port = []
list_type=[]
list_headers_ip = []
def check_ip(list_ip):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36 Edg/91.0.864.71',
'Connection': 'close',
'Referer': 'https://m.7160.top/rentiyishu/'
}
# url = 'https://www.baidu.com' # 以百度为例,检测IP的可行性
url = 'https://pic.99ym.cn/d/file/202009/mz1kqud4v2i.jpg'
can_use = []
for ip in list_ip:
try:
response = requests.get(url=url, headers=headers, proxies=ip, timeout=3, verify=False) # 在0.1秒之内请求百度的服务器
if response.status_code == 200:
can_use.append(ip)
except Exception as e:
print(e)
return can_use
for start in range(1, num):
url = 'https://www.kuaidaili.com/free/inha/{}/'.format(start) # 每页15个数据,共爬取10页
print("正在处理url: ", url)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36 Edg/91.0.864.71'}
response = requests.get(url=url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
ip = soup.select('#list > table > tbody > tr > td:nth-child(1)')
port = soup.select('#list > table > tbody > tr > td:nth-child(2)')
type = soup.select('#list > table > tbody > tr > td:nth-child(4)')
for i in ip:
list_ip.append(i.get_text())
for i in port:
list_port.append(i.get_text())
for i in type:
list_type.append(i.get_text())
time.sleep(0.5) # 防止爬取太快,数据爬取不全
# 代理ip的形式: 'http':'http://119.14.253.128:8088'
for i in range(len(list_ip)):
if list_type[i]=='HTTP':
proxies = {
'HTTP':'http://'+list_ip[i]+':'+list_port[i]
}
else:
proxies = {
'HTTPS': 'https://' + list_ip[i] + ':' + list_port[i]
}
list_headers_ip.append(proxies)
can_use = check_ip(list_headers_ip)
print('能用的代理IP为:', can_use)
print('能用的代理IP数量为:', len(can_use))
with open('./IP代理池.txt','w') as f:
for i in can_use:
f.write(str(i)+'\n')
f.close()
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 19 22:44:37 2022
@author: fch
"""
import random
from pyquery import PyQuery as pq
import requests
import os
import time
import json
proxies=[
{'HTTP':'http://122.140.5.115:9999'},
{'HTTP':'http://113.101.96.66:8080'},
{'HTTP':'http://113.124.86.24:9999'},
{'HTTP':'http://121.13.252.58:41564'},
{'HTTP':'http://61.216.185.88:60808'},
{'HTTP':'http://58.20.184.187:9091'},
{'HTTP':'http://183.236.123.242:8060'},
{'HTTP':'http://116.9.163.205:58080'},
{'HTTP':'http://222.74.73.202:42055'},
{'HTTP':'http://183.247.202.208:30001'},
{'HTTP':'http://39.108.101.55:1080'},
{'HTTP':'http://47.105.91.226:8118'},
]
proxie=random.choice(proxies)
user_agent_list = ['Chrome/86.0.4240.198',
'Chrome/101.0.4951.64',
'Chrome/96.0.4664.45',
'Chrome/94.0.4606.41'
]
headers = {'User-Agent': random.choice(user_agent_list)}
list1 = [
"Chrome/68.0.3440.106",
"Chrome/67.0.3396.99",
"Chrome/64.0.3282.186",
"Chrome/62.0.3202.62",
"Chrome/45.0.2454.101"
]
header={'User-Agent': random.choice(list1),
'Referer':'http://81.68.202.74/',
'sec-ch-ua': 'Google Chrome',
'sec-ch-ua-platform':'Windows'
}
base_url = 'http://81.68.202.74/datu/page/'
# verify=False忽略SSL警告
def Base_page(Page_num):
url = base_url + str(Page_num)
try:
r = requests.get(url, headers=headers,proxies=proxie)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print('ERROR!!!')
def Page_url(fir_html):
doc = pq(fir_html)
items = doc('.posts-wrapper .entry-media').items()
num = 1
for item in items:
page_url=item('a').attr('href')
fil_name=item('.lazyload').attr('alt').replace(":","")
# print(fil_name,page_url)
print('------ 个数:',num,'------')
Parse_img(page_url, fil_name)
num+=1
def Parse_img(page_url,fil_name):
time.sleep(0.25)
i=1
r = requests.get(page_url, headers=headers,proxies=proxie)
r.raise_for_status()
r.encoding = r.apparent_encoding
sec_html=r.text
doc=pq(sec_html)
items=doc('.entry-content p img').items()
for item in items:
dict={
'url':item.attr('src'),
'name':item.attr('title')
}
print("保存第",i,'张图')
i+=1
time.sleep(1.25)
Save_img(dict,fil_name)
def Save_img(dic,fil_name):
url=dic['url']
# time.sleep(0.25)
data = requests.get(url,headers=header,proxies=proxie).content
name=dic['name']
houzui=url.split('.')[-1]
addr='./images_2/'+fil_name
if not os.path.isdir(addr):
os.makedirs(addr)
with open('./images_2/{}/{}.{}'.format(fil_name,name,houzui),'wb') as f:
f.write(data)
def main():
#左闭右开,到21页
for Page_num in range(1,22):
print("*************第",Page_num,'页*************')
fir_html = Base_page(Page_num)
Page_url(fir_html)
print("结束!")
main()
from urllib.parse import urlencode
import requests
import re
def Get_page(page_num):
headers={
'Host':'so.toutiao.com',
'user-agent':'Mozilla/5.0',
'cookie':'tt=acd600b5ac865033f0ee83a63ed44675; '
}
base_url='https://so.toutiao.com/search?'
params = {
'keyword': '街拍',
'pd': 'atlas',
'dvpf': 'pc',
'aid': '4916',
'page_num': page_num,
'search_json': '%7B%22from_search_id%22%3A%2220220104115420010212192151532E8188%22%2C%22origin_keyword%22%3A%22%E8%A1%97%E6%8B%8D%22%2C%22image_keyword%22%3A%22%E8%A1%97%E6%8B%8D%22%7D',
'rawJSON': 1,
'search_id': '2022062517173701021219402539E36546'
}
url=base_url+urlencode(params)
try:
r=requests.get(url,headers=headers)
if r.status_code==200:
return r.json()
except:
print('Failed!')
def get_images(json):
images = json['rawData']['data']
for image in images:
yield {
'name': image['text'],
'url':image['img_url']
}
def save_image(dict):
data=requests.get(dict['url']).content
#把“和 :都替换成‘,’,否则python路径会识别错误
name=dict['name'].replace(':',',')
name=name.replace('"',',')
name=name.replace('?','')
name=name.split('|')[-1] #碰到‘|’就取最后一个
houzui=dict['url'].split('.')[-1] #截取后缀,比如JPEG,png等图片的格式名称
with open("./images/{}.{}".format(name,houzui),'wb') as f:
f.write(data)
def main():
for page_num in range(1,20):
json=Get_page(page_num)
dict=get_images(json)
for d in dict:
save_image(d)
main()
tesseract的安装使用及配置问题解决: https://blog.csdn.net/Alexa_/article/details/121192132
Tesserocr报错:“RuntimeError: Failed to init API, possibly an invalid tessdata path:”解决办法:https://blog.csdn.net/Aiy5249/article/details/118055261
import pymysql
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from pyquery import PyQuery as pq
import pymongo
'''链接mongodb'''
client=pymongo.MongoClient("mongodb://*********:27017")
db=client.test
col=db.taobao
'''连接mysql'''
db=pymysql.Connect(host='*********',user='root',password='*********',port=3306,db='xiaomishop')
cursor=db.cursor()
table='taobao'
'''初始url'''
kw='iPad' #关键词
url='https://s.taobao.com/search?q='+kw
driver=webdriver.Chrome() #指定浏览器对象
wait=WebDriverWait(driver,50) #最大等待时长
def Get_page(page_num):
'''
获取page_num页的页面信息(html)
:param page_num:
:return: Null
'''
print('='*25,'正在爬取第',page_num,'页','='*25)
try:
driver.get(url)
input = wait.until(EC.presence_of_element_located((By.CLASS_NAME,'J_Input'))) # 直到定位到搜索框
submit=wait.until(EC.element_to_be_clickable((By.CLASS_NAME,'J_Submit'))) #直到定位到能点击‘确定’
input.clear()
input.send_keys(page_num)
submit.click()
Get_product()
except TimeoutError:
Get_page(page_num)
def Get_product():
'''
获取每页上所有商品的名称、价格、图片、购买量、店铺名称、店铺位置
:return:Null
'''
html=driver.page_source
doc=pq(html)
items=doc('.J_MouserOnverReq').items()
for item in items:
product={
'name': item.find('.ctx-box .row-2 a').text(),
'pic_img':item.find('.pic-box-inner .J_Itemlist_Pic_657878599145').attr('src'),
'price':item.find('.ctx-box .row-1 strong').text(),
'nums':item.find('.ctx-box .row-1 .deal-cnt').text(),
'shop':item.find('.ctx-box .row-3 .shop span').text(),
'location':item.find('.ctx-box .row-3 .location').text()
}
# write_to_mongodb(product)
# write_to_mysql(product)
print(product)
def write_to_mongodb(dic):
'''
字典结果保存到MongoDB
:param dic:
:return: Null
'''
col.insert_one(dic)
def write_to_mysql(dic):
'''
字典结果写入mysql
:param dic:
:return:
'''
keys=','.join(dic.keys())
values=','.join(['%s']*len(dic))
sql='insert into {table}({keys}) values ({values})'.format(table=table,keys=keys,values=values)
try:
if cursor.execute(sql,tuple(dic.values())):
print('successful')
db.commit()
except:
print('failed')
db.rollback()
if __name__ == '__main__':
for page_num in range(1,101):
Get_page(page_num)