'''
统计如下list单词及其出现的次数。
'''
a=['apple', 'banana', 'apple', 'tomato', 'orange', 'apple', 'banana', 'watermeton']
dic={}
for key in a:
dic[key]=dic.get(key,0) +1
print(dic)
#等同于
for key in a:
if key in dic.keys():
dic[key]=dic[key]+1
else:
dic[key]=1
print(dic)
'''
给列表中的字典排序(例如有如下list对象,将alist中的元素按照age从小到大排序)
'''
alist=[{"name":"a", "age":20}, {"name":"b", "age":30}, {"name":"c", "age":25}]
# 法一
for i in range(1,len(alist)):
for j in range(0,len(alist)-i):
if alist[j].get('age')>alist[j+1].get('age'):
alist[j],alist[j+1]=alist[j+1],alist[j]
print(alist)
#法二
alist.sort(key=lambda x: x['age'])
print(alist)
转载于:https://blog.csdn.net/weixin_45044554/article/details/125547411
'''
numpy
'''
import numpy as np
array001=np.array([1,2,3,4,5,6,7,8,9,10,11,12])
array002=np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]])
array003=array001.reshape(3,-1) #转换成3行,列需要计算
# array003=array001.reshape(-1,4) 转换成4列,行需要计算
# print(array003)
# array004=np.append(array001,[-1,-2,-3,-4])
array004=np.append(array002,[[-1],[-2],[-3]],axis=1) #增加一列
# print(array004)
array005=array002.T
# print(array005)
array006=np.delete(array005,1,axis=0) #axis=0 删除行,axis=1 删除列
# print(array006)
array002[1:2,1:2]=0
# print(array002)
arr1 = np.array([1,2,3])
arr2 = np.array([4,5,6])
# print('纵向堆叠后:\n',np.vstack((arr1,arr2)),
# '\n横向堆叠后:\n',np.hstack((arr1,arr2)))
# mat()方法可以把其他数据结构的对象转换为矩阵类型
array1 = [1,2,3]
array2 = [6,7,8]
array3 = [11,12,17]
matrix = np.mat([array1,array2,array3])
# print('显示该数据结构类型:',type(matrix))
# print(matrix)
# 矩阵合并。c_() 方法进行连接,根据参数顺序也将决定生产矩阵的结果;r_() 方法用于列连接
mat1 = np.mat([[1,2],[3,4]])
mat2 = np.mat([4,5])
matrix_r = np.c_[mat1,mat2.T]
# print('将mat2矩阵添加在原矩阵右侧\n',matrix_r)
matrix_l = np.c_[mat2.T,mat1]
# print('将mat2矩阵添加在原矩阵左侧\n',matrix_l)
matrix_u = np.r_[np.mat([array1]),matrix]
# print('在原矩阵上方连接矩阵\n',matrix_u)
# 矩阵常常与列表和数组进行数据类型转换
# print('矩阵列表转换:\n',matrix.tolist(),
# '\n矩阵转数组:\n',np.array(matrix))
'''
pandas
'''
import pandas as pd
sdata = {'Ohio':35000,'Texas':71000,'Oregon':16000,'Utah':5000}
s0 = pd.Series(sdata)
# print('利用字典生成的序列对象\n',s0)
# print('显示该数据结构类型:',type(s0))
s1 = pd.Series([6,1,2,9])
# print('利用列表生成的序列对象\n',s1)
s1=pd.Series([6,1,2,9],index=['a','b','c','d'])
# print(s1)
# 查询
# print('序列的值\n',s0.values)
# print('序列的索引\n',s0.index)
# print('按照下标查找序列',s0[2])
# print('按照索引值查找元素',s0['Utah'])
# print('按照下标批量查找序列\n',s0[:2])
# print('按照索引值批量查找元素\n',s0[['Ohio','Oregon']])
# 增加
s2=s1.append(pd.Series([12],index=['e']))
# print(s2)
# 删除
s3=s1.drop('a')
# print(s3)
# 修改
s1['a']=4
# print(s1)
# Series特殊操作
# 排序
# print(s1.sort_values()) #升序排序
#序列求中位数
# print(s1)
# print('中位数为:'+str(s1.median()))
# print('大于序列中位数的数\n',s1[s1>s1.median()])
# 序列的运算
s2 = pd.Series([4,3,5,8],index=['a','b','c','d'])
# print(s2+s1)
#时间序列
s3 = pd.Series([100,150,200])
# print('产生的序列是:\n',s3)
idx = pd.date_range(start='2019-9',freq='M',periods=3) #freq 是频率,“M”表示一个月的最后一天
# print('\n生成的时间序列是:\n',idx)
s3.index = idx
# print('\n产生的时间序列是:\n',s3)
'''pd.date_range(
start=None,#开始时间
end=None,#截止时间
periods=None,#总长度
freq=None,#时间间隔/频率,默认天,pd.date_range()默认频率为日历日,pd.bdate_range()默认频率为工作日
tz=None,#时区
normalize=False,#是否标准化到midnight
name=None,#date名称
closed=None,#首尾是否在内
)'''
#数据框(DataFrame)
# pd.DataFrame(data,columns,index)
dic1 = {'name':['Tom','Lily','Cindy','Petter'],'no':['001','002','003','004'],'age':[16,16,15,16],'gender':['m','f','f','m']}
df1 = pd.DataFrame(dic1)
# print('显示该数据结构类型',type(df1))
df1.index.name = 'id' #index.name给索引命名
# print(df1)
#查询
'''
获取列索引:df.cloums
获取行索引:df.index
获取值:df.value
'''
column = df1.no
row = df1.loc[3]
# print(df1)
# print('\n列数据索引\n',column,'\n行数据索引\n',row)
# 增加
# print('修改前:\n',df1)
df2 = df1.append([{'name':'Stark','no':'005','age':15,'gender':'m'}],ignore_index=True) #接着索引号为4,不写的话就是0
# print('增加行:\n',df2)
df2['new_Col'] = [1,2,3,4,5]
# print('增加列:\n',df2)
#删除
df3 = df1.copy()
# print('处理前的数据\n',df3)
df3b = df3.drop(['name'],axis=1)
# print('删除列后的数据框\n',df3b)
df3c = df3.drop([2])
# print('删除行后的数据框\n',df3c)
#合并修改
#按列合并
df4 = pd.DataFrame({'address':['school','home','school','school','home']})
df5 = pd.concat([df2,df4],axis=1)
# print('合并前的df2\n',df2)
# print('合并前的df4\n',df4)
# print('合并后的df5\n',df5)
#按行合并
df6 = pd.DataFrame({'name':['Tony'],'no':['005'],'age':[16],'gender':['m']})
df7 = pd.concat([df1,df6],axis=0,ignore_index=True)
# print('合并前的df1\n',df1)
# print('合并前的df6\n',df6)
# print('合并后的df7\n',df7)
"""
Created on Wed Nov 23 00:03:30 2022
@author: fch
"""
import time
'''
网站网址:https://m.7160.top/rentiyishu/
'''
import random
from pyquery import PyQuery as pq
import requests
import os
from joblib import Parallel, delayed
import socket
socket.setdefaulttimeout(10)
# 忽略requests证书警告
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.DEFAULT_RETRIES = 5 # 增加重试连接次数
IP_list=[]
with open('./IP代理池.txt','r') as f:
for line in f:
line = line.strip('\n') # 删除换行符
IP_list.append(line)
f.close()
'''
random随机读取到后为字符串类型,用eval转回字典
'''
# proxie = eval(random.choice(IP_list))
user_agent_list = [
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
'Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
'Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10',
'Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13',
'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+',
'Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)',
'UCWEB7.0.2.37/28/999',
'NOKIA5700/ UCWEB7.0.2.37/28/999',
'Openwave/ UCWEB7.0.2.37/28/999',
'Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999',
'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25',
]
def Res_url(url):
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
headers = {'User-Agent': random.choice(user_agent_list),
'Referer': 'https://m.7160.top/rentiyishu/',
"Connection": "close"}
try:
s=requests.session()
s.keep_alive = False
r = s.get(url, headers=headers, proxies=eval(random.choice(IP_list)), timeout=(5, 10),verify=False,allow_redirects=False)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print(url)
return 'over'
'''
爬取首页中所有伪图片的链接地址和名称,返回成列表
'''
def Parse_page(html):
page_url_list,fil_name_list=[],[]
doc = pq(html)
items = doc('.listUll2 li').items()
for item in items:
page_url_list.append(item('div a').attr('href'))
fil_name_list.append(item('p').text())
return page_url_list,fil_name_list
def Get_img(page_url, file_name):
num = 1
base_url = page_url.split('.')
while True:
if num == 1:
url = page_url
else:
url = base_url[0] + '.' + base_url[1] + '.' + base_url[2] + '_' + str(num) + '.' + base_url[3]
time.sleep(1)
html = Res_url(url)
if html != 'over':
doc = pq(html)
img_url = doc('.ArticleBox img').attr('src')
img_basename = doc('.ArticleBox img').attr('alt')
if img_basename==None:
img_name=img_basename
else:
img_name=img_basename.replace('/','')
if img_url == None:
num += 1
else:
Save_img(img_url, file_name, img_name)
num += 1
else:
break #跳出while循环
def Save_img(img_url, file_name, img_name):
time.sleep(1)
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
headers = {'User-Agent': random.choice(user_agent_list),
'Referer': 'https://m.7160.top/rentiyishu/',
"Connection": "close"}
houzui = img_url.split('.')[-1]
addr = './images_3/' + file_name
if not os.path.isdir(addr):
os.makedirs(addr)
r=requests.get(img_url, headers=headers, proxies=eval(random.choice(IP_list)), verify=False).content
if r.status_code==200:
with open('./images_3/{}/{}.{}'.format(file_name, img_name, houzui), 'wb') as f:
f.write(r.content)
f.close()
else:
print('图片地址不存在')
def main():
page_num = 1
base_url = 'https://m.7160.top/rentiyishu/index'
while True:
if page_num == 1:
url = base_url + '.html'
else:
url = base_url + '_' + str(page_num) + '.html'
html = Res_url(url)
if html != 'over':
print('保存第', page_num, '页')
page_url_list, file_name_list=Parse_page(html)
'''
多线程:n_jobs=30即同时30个线程
'''
Parallel(n_jobs=5)(delayed(Get_img)(page_url,file_name) for page_url,file_name in zip(page_url_list,file_name_list))
else:
print('程序结束')
return '程序结束'
page_num += 1
main()
import requests
from bs4 import BeautifulSoup
import time
'''
IP代理网站的页数
'''
num=51
list_ip = []
list_port = []
list_type=[]
list_headers_ip = []
def check_ip(list_ip):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36 Edg/91.0.864.71',
'Connection': 'close',
'Referer': 'https://m.7160.top/rentiyishu/'
}
# url = 'https://www.baidu.com' # 以百度为例,检测IP的可行性
url = 'https://pic.99ym.cn/d/file/202009/mz1kqud4v2i.jpg'
can_use = []
for ip in list_ip:
try:
response = requests.get(url=url, headers=headers, proxies=ip, timeout=3, verify=False) # 在0.1秒之内请求百度的服务器
if response.status_code == 200:
can_use.append(ip)
except Exception as e:
print(e)
return can_use
for start in range(1, num):
url = 'https://www.kuaidaili.com/free/inha/{}/'.format(start) # 每页15个数据,共爬取10页
print("正在处理url: ", url)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36 Edg/91.0.864.71'}
response = requests.get(url=url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
ip = soup.select('#list > table > tbody > tr > td:nth-child(1)')
port = soup.select('#list > table > tbody > tr > td:nth-child(2)')
type = soup.select('#list > table > tbody > tr > td:nth-child(4)')
for i in ip:
list_ip.append(i.get_text())
for i in port:
list_port.append(i.get_text())
for i in type:
list_type.append(i.get_text())
time.sleep(0.5) # 防止爬取太快,数据爬取不全
# 代理ip的形式: 'http':'http://119.14.253.128:8088'
for i in range(len(list_ip)):
if list_type[i]=='HTTP':
proxies = {
'HTTP':'http://'+list_ip[i]+':'+list_port[i]
}
else:
proxies = {
'HTTPS': 'https://' + list_ip[i] + ':' + list_port[i]
}
list_headers_ip.append(proxies)
can_use = check_ip(list_headers_ip)
print('能用的代理IP为:', can_use)
print('能用的代理IP数量为:', len(can_use))
with open('./IP代理池.txt','w') as f:
for i in can_use:
f.write(str(i)+'\n')
f.close()
'''
网站网址:http://www.meinuzi.com/
'''
import random
from pyquery import PyQuery as pq
import requests
proxies=[
{'HTTP':'http://122.140.5.115:9999'},
{'HTTP':'http://113.101.96.66:8080'},
{'HTTP':'http://113.124.86.24:9999'},
{'HTTP':'http://121.13.252.58:41564'},
{'HTTP':'http://61.216.185.88:60808'},
{'HTTP':'http://58.20.184.187:9091'},
{'HTTP':'http://183.236.123.242:8060'},
{'HTTP':'http://116.9.163.205:58080'},
{'HTTP':'http://222.74.73.202:42055'},
{'HTTP':'http://183.247.202.208:30001'},
{'HTTP':'http://39.108.101.55:1080'},
{'HTTP':'http://47.105.91.226:8118'},
]
proxie=random.choice(proxies)
user_agent_list = ['Chrome/86.0.4240.198',
'Chrome/101.0.4951.64',
'Chrome/96.0.4664.45',
'Chrome/94.0.4606.41'
]
headers = {'User-Agent': random.choice(user_agent_list),
'Referer':'http://www.meinuzi.com'}
'''
请求url,返回HTML
'''
def Req_url(url):
try:
r=requests.get(url,headers=headers,proxies=proxie)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.text
except:
# print('ERROR!')
return 'over'
'''
解析初始页面,找到图片页面的网址
'''
def Parse_page(html):
doc=pq(html)
items=doc('.m-list-main li').items()
i=1
for item in items:
page_url=item('.u-img a').attr('href')
name=item('.u-img img').attr('alt')
print("保存第",i,'张图')
Get_img(page_url,name)
i+=1
'''
解析图片页面的网址,找到每张图片的url
'''
def Get_img(page_url,name):
html=Req_url(page_url)
doc=pq(html)
img_baseurl=doc('.g-mn .m-list-content img').attr('src')
Save_img(img_baseurl,name)
'''
根据得到的图片url,保存到文件夹
'''
def Save_img(img_baseurl,name):
img_url=headers['Referer']+img_baseurl
data=requests.get(img_url,headers=headers,proxies=proxie,timeout=15).content
houzui=img_url.split('.')[-1]
with open('./images_1/{}.{}'.format(name,houzui),'wb') as f:
f.write(data)
def main():
num = 7 # 从第num页开始
'''
当返回值为‘over’的时候,说明页面不存在,即已经爬取全部页面
'''
while True:
page_url='http://www.meinuzi.com/index_'+str(num)+'.html'
html = Req_url(page_url)
if html != 'over':
'''
页面存在但无图片数据,标签中只有换行符时
'''
doc=pq(html)
item = doc('.m-list-main ul').text()
if item!='' \
'':
print('**********第',num,'页**********')
Parse_page(html)
num+=1
else:
print('程序结束')
return '程序结束'
else:
print('程序结束')
return '程序结束'
main()
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 19 22:44:37 2022
@author: fch
"""
import random
from pyquery import PyQuery as pq
import requests
import os
import time
import json
proxies=[
{'HTTP':'http://122.140.5.115:9999'},
{'HTTP':'http://113.101.96.66:8080'},
{'HTTP':'http://113.124.86.24:9999'},
{'HTTP':'http://121.13.252.58:41564'},
{'HTTP':'http://61.216.185.88:60808'},
{'HTTP':'http://58.20.184.187:9091'},
{'HTTP':'http://183.236.123.242:8060'},
{'HTTP':'http://116.9.163.205:58080'},
{'HTTP':'http://222.74.73.202:42055'},
{'HTTP':'http://183.247.202.208:30001'},
{'HTTP':'http://39.108.101.55:1080'},
{'HTTP':'http://47.105.91.226:8118'},
]
proxie=random.choice(proxies)
user_agent_list = ['Chrome/86.0.4240.198',
'Chrome/101.0.4951.64',
'Chrome/96.0.4664.45',
'Chrome/94.0.4606.41'
]
headers = {'User-Agent': random.choice(user_agent_list)}
list1 = [
"Chrome/68.0.3440.106",
"Chrome/67.0.3396.99",
"Chrome/64.0.3282.186",
"Chrome/62.0.3202.62",
"Chrome/45.0.2454.101"
]
header={'User-Agent': random.choice(list1),
'Referer':'http://81.68.202.74/',
'sec-ch-ua': 'Google Chrome',
'sec-ch-ua-platform':'Windows'
}
base_url = 'http://81.68.202.74/datu/page/'
# verify=False忽略SSL警告
def Base_page(Page_num):
url = base_url + str(Page_num)
try:
r = requests.get(url, headers=headers,proxies=proxie)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print('ERROR!!!')
def Page_url(fir_html):
doc = pq(fir_html)
items = doc('.posts-wrapper .entry-media').items()
num = 1
for item in items:
page_url=item('a').attr('href')
fil_name=item('.lazyload').attr('alt').replace(":","")
# print(fil_name,page_url)
print('------ 个数:',num,'------')
Parse_img(page_url, fil_name)
num+=1
def Parse_img(page_url,fil_name):
time.sleep(0.25)
i=1
r = requests.get(page_url, headers=headers,proxies=proxie)
r.raise_for_status()
r.encoding = r.apparent_encoding
sec_html=r.text
doc=pq(sec_html)
items=doc('.entry-content p img').items()
for item in items:
dict={
'url':item.attr('src'),
'name':item.attr('title')
}
print("保存第",i,'张图')
i+=1
time.sleep(1.25)
Save_img(dict,fil_name)
def Save_img(dic,fil_name):
url=dic['url']
# time.sleep(0.25)
data = requests.get(url,headers=header,proxies=proxie).content
name=dic['name']
houzui=url.split('.')[-1]
addr='./images_2/'+fil_name
if not os.path.isdir(addr):
os.makedirs(addr)
with open('./images_2/{}/{}.{}'.format(fil_name,name,houzui),'wb') as f:
f.write(data)
def main():
#左闭右开,到21页
for Page_num in range(1,22):
print("*************第",Page_num,'页*************')
fir_html = Base_page(Page_num)
Page_url(fir_html)
print("结束!")
main()