pyquery库使用

import csv
from pyquery import PyQuery as pq
import requests
import json

def Get_HTML(url):
    headers = {'User-Agent': 'Mozilla/5.0'}
    try:
        r = requests.get(url, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        print("Error!")

def parse_page(html):
    doc=pq(html)
    title=doc('.li03 a').text().split(' ')  #名称
    actor=doc('.li04 span').text().split(' ')  #演员表
    index=doc('.li01 b').text().split(' ')     #序号
    href=[]  #链接
    for i in doc('.li03 a').items():  #遍历
        href.append(i.attr('href'))
    score=doc('.li05 span').text().split(' ')  #评分
#保存成CSV文件
    # with open('1905电影排行榜.csv','a+') as csvfile:
    #     writer = csv.writer(csvfile,)
    #     writer.writerow(['序号', '名称', '链接', '演员', '评分'])
    #     for i in range(len(title)):
    #         writer.writerow([index[i],title[i],href[i],actor[i],score[i]])
#保存为字典
    Result={}
    for i in range(100):
        result={
            '序号':index[i],
            '名称':title[i],
            '链接':href[i],
            '演员':actor[i],
            '评分':score[i]
        }
        Result[index[i]]=result
    return Result
def write_to_txtfile(item):
    with open('1905电影排行榜.txt', 'a+', encoding='UTF-8') as f:
        f.write(json.dumps(item, ensure_ascii=False) + '\n')
def main():
    url = 'https://www.1905.com/vod/top/lst/'
    html = Get_HTML(url)
    item=parse_page(html)
#字典写成TXT文件
    # for i in range(1,len(item)+1):
        # write_to_txtfile(item[str(i)])
#字典写成CSV文件
    # with open('1905电影排行榜_111.csv','a+',encoding='utf-8') as csvfile:
    #     fieldnames = ['序号', '名称', '链接', '演员', '评分']
    #     writer=csv.DictWriter(csvfile,fieldnames=fieldnames)
    #     writer.writeheader()
    #     for i in range(len(item)):
    #         writer.writerow(item[str(i+1)])

main()

Ajax数据爬取

from urllib.parse import urlencode
import requests
import re

def Get_page(page_num):
    headers={
        'Host':'so.toutiao.com',
        'user-agent':'Mozilla/5.0',
        'cookie':'tt=acd600b5ac865033f0ee83a63ed44675; '
    }
    base_url='https://so.toutiao.com/search?'
    params = {
        'keyword': '街拍',
        'pd': 'atlas',
        'dvpf': 'pc',
        'aid': '4916',
        'page_num': page_num,
        'search_json': '%7B%22from_search_id%22%3A%2220220104115420010212192151532E8188%22%2C%22origin_keyword%22%3A%22%E8%A1%97%E6%8B%8D%22%2C%22image_keyword%22%3A%22%E8%A1%97%E6%8B%8D%22%7D',
        'rawJSON': 1,
        'search_id': '2022062517173701021219402539E36546'
    }
    url=base_url+urlencode(params)
    try:
        r=requests.get(url,headers=headers)
        if r.status_code==200:
            return r.json()
    except:
        print('Failed!')

def get_images(json):
    images = json['rawData']['data']
    for image in images:
        yield {
            'name': image['text'],
            'url':image['img_url']
        }

def save_image(dict):
    data=requests.get(dict['url']).content
#把“和 :都替换成‘,’,否则python路径会识别错误
    name=dict['name'].replace(':',',')
    name=name.replace('"',',')
    name=name.replace('?','')
    name=name.split('|')[-1]  #碰到‘|’就取最后一个
    houzui=dict['url'].split('.')[-1]   #截取后缀,比如JPEG,png等图片的格式名称
    with open("./images/{}.{}".format(name,houzui),'wb') as f:
        f.write(data)

def main():
    for page_num in range(1,20):
        json=Get_page(page_num)
        dict=get_images(json)
        for d in dict:
            save_image(d)


main()

selenium爬虫实例

import pymysql
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from pyquery import PyQuery as pq
import pymongo

'''链接mongodb'''
client=pymongo.MongoClient("mongodb://*********:27017")
db=client.test
col=db.taobao

'''连接mysql'''
db=pymysql.Connect(host='*********',user='root',password='*********',port=3306,db='xiaomishop')
cursor=db.cursor()
table='taobao'

'''初始url'''
kw='iPad'    #关键词
url='https://s.taobao.com/search?q='+kw

driver=webdriver.Chrome()  #指定浏览器对象
wait=WebDriverWait(driver,50)  #最大等待时长

def Get_page(page_num):
    '''
    获取page_num页的页面信息(html)
    :param page_num:
    :return: Null
    '''
    print('='*25,'正在爬取第',page_num,'页','='*25)
    try:
        driver.get(url)
        input = wait.until(EC.presence_of_element_located((By.CLASS_NAME,'J_Input')))  # 直到定位到搜索框
        submit=wait.until(EC.element_to_be_clickable((By.CLASS_NAME,'J_Submit')))    #直到定位到能点击‘确定’
        input.clear()
        input.send_keys(page_num)
        submit.click()
        Get_product()
    except TimeoutError:
        Get_page(page_num)

def Get_product():
    '''
    获取每页上所有商品的名称、价格、图片、购买量、店铺名称、店铺位置
    :return:Null
    '''
    html=driver.page_source
    doc=pq(html)
    items=doc('.J_MouserOnverReq').items()
    for item in items:
        product={
            'name': item.find('.ctx-box .row-2 a').text(),
            'pic_img':item.find('.pic-box-inner .J_Itemlist_Pic_657878599145').attr('src'),
            'price':item.find('.ctx-box .row-1 strong').text(),
            'nums':item.find('.ctx-box .row-1 .deal-cnt').text(),
            'shop':item.find('.ctx-box .row-3 .shop span').text(),
            'location':item.find('.ctx-box .row-3 .location').text()
        }
        # write_to_mongodb(product)
        # write_to_mysql(product)
        print(product)

def write_to_mongodb(dic):
    '''
    字典结果保存到MongoDB
    :param dic:
    :return: Null
    '''
    col.insert_one(dic)

def write_to_mysql(dic):
    '''
    字典结果写入mysql
    :param dic:
    :return:
    '''
    keys=','.join(dic.keys())
    values=','.join(['%s']*len(dic))
    sql='insert into {table}({keys}) values ({values})'.format(table=table,keys=keys,values=values)
    try:
        if cursor.execute(sql,tuple(dic.values())):
            print('successful')
            db.commit()
    except:
        print('failed')
        db.rollback()


if __name__ == '__main__':
    for page_num in range(1,101):
        Get_page(page_num)

动态渲染页面爬取---selenium的使用

import time

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver import ActionChains

'''基本使用'''
#声明浏览器对象
browser=webdriver.Chrome()
try:
    '''访问页面'''
    browser.get('https://www.baidu.com')
    input=browser.find_element(By.ID,'kw')
    input.send_keys('python')
    input.send_keys(Keys.ENTER)
    wait=WebDriverWait(browser,10)
    wait.until(EC.presence_of_element_located((By.ID,'content_left')))
    print('='*50)
    print(browser.current_url)
    print('=' * 50)
    print(browser.get_cookies())
    print('=' * 50)
    # print(browser.page_source)
    browser.quit()
except:
    browser.close()

'''节点交互'''
driver=webdriver.Chrome()
try:
    driver.get('https://www.taobao.com')
    input=driver.find_element(By.ID,'q')
    input.send_keys('手机')
    # input.clear()   #清除
    button=driver.find_element(By.CLASS_NAME,'btn-search')
    button.click()
    driver.close()
except:
    driver.close()


'''动作链'''
driver=webdriver.Chrome()
try:
    driver.get('https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
    driver.switch_to.frame('iframeResult')     #切换frame
    source=driver.find_element(By.ID,'draggable')
    target=driver.find_element(By.ID,'droppable')
    actions=ActionChains(driver)
    actions.drag_and_drop(source,target)
    actions.perform()

except:
    driver.close()

'''执行JavaScript,比如下拉进度条'''
driver=webdriver.Chrome()
try:
    driver.get('https://www.zhihu.com/explore')
    driver.execute_script('window.scrollTo(0,document.body.scrollHeight)')
    driver.execute_script('alert("To Bottom")')
except:
    print('error')
    driver.close()

'''获取属性'''
driver=webdriver.Chrome()
try:
    driver.get('https://www.zhihu.com/explore')
    logo=driver.find_element(By.ID,'special')
    print(logo)
    print(logo.get_attribute('class'))
    driver.close()
except:
    print('error')
    driver.close()

'''获取文本'''
driver=webdriver.Chrome()
try:
    driver.get('https://www.zhihu.com/explore')
    res=driver.find_element(By.CLASS_NAME,'ExploreSpecialCard-contentTitle')
    print(res.text)
    driver.close()
except:
    print('error')
    driver.close()

'''获取id、位置、标签吗和大小'''
driver=webdriver.Chrome()
try:
    driver.get('https://www.zhihu.com/explore')
    res=driver.find_element(By.CLASS_NAME,'ExploreSpecialCard-contentTitle')
    print('id',res.id)
    print('位置',res.location)
    print('标签',res.tag_name)
    print('大小',res.size)
    driver.close()
except:
    print('error')
    driver.close()

'''延时显式等待'''
driver=webdriver.Chrome()
try:
    driver.get('https://www.taobao.com')
#引入WebDriverWait对象,设置最长等待时间
    wait=WebDriverWait(driver,10)
#调用until方法,传入等待条件;EC.presence_of_element_located代表节点出现
    input=wait.until(EC.presence_of_element_located((By.ID,'q')))
    button=wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'.btn-search')))
    print(input,button)
    driver.close()
except:
    print('error')
    driver.close()

'''前进和后退'''
driver=webdriver.Chrome()
try:
    driver.get('https://www.taobao.com/')
    driver.get('https://www.baidu.com/')
    driver.get('https://www.python.org/')
    driver.back()   #后退
    time.sleep(1)
    driver.forward()  #前进
    driver.close()
except:
    print('error')
    driver.close()

爬虫爬取数据保存到mysql

import pymysql
from pyquery import PyQuery as pq
import requests

def Get_HTML(url):
    headers = {'User-Agent': 'Mozilla/5.0'}
    try:
        r = requests.get(url, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        print("Error!")

def parse_page(html):
    doc=pq(html)
    title=doc('.li03 a').text().split(' ')  #名称
    actor=doc('.li04 span').text().split(' ')  #演员表
    index=doc('.li01 b').text().split(' ')     #序号
    href=[]  #链接
    for i in doc('.li03 a').items():  #遍历
        href.append(i.attr('href'))
    score=doc('.li05 span').text().split(' ')  #评分
#保存为字典
    Result={}
    for i in range(100):
        result={
            '序号':index[i],
            '名称':title[i],
            '链接':href[i],
            '演员':actor[i],
            '评分':score[i]
        }
        Result[index[i]]=result
    return Result

def write_to_mysql(dict):
    db=pymysql.connect(host='*******',user='root',password='******',passwd='3306',db='*******')
    cursor=db.cursor()
    table='movie'  #表名
    keys=','.join(dict.keys())
    values=','.join(['%s']*len(dict))
    sql='insert into {table}({keys}) values ({values})'.format(table=table,keys=keys,values=values)
    try:
        if cursor.execute(sql,tuple(dict.values())):
            print("successful")
            db.commit()
    except:
        print("failed")
        db.rollback()
    db.close()

#若主键存在,进行更新操作;否则进行插入数据
def update_data(dict):
    db = pymysql.connect(host='*******',user='root',password='******',passwd='3306',db='*******')
    cursor=db.cursor()
    table='movie'    #表名
    keys=','.join(dict.keys())
    values=','.join(['%s']*len(dict))
    sql='insert into {table}({keys}) values ({values}) ON DUPLICATE KEY UPDATE'.format(table=table,keys=keys,values=values)
    update=','.join([' {key}=%s'.format(key=key) for key in dict])
    sql+=update
    try:
        if cursor.execute(sql,tuple(dict.values())*2):
            print("successful")
            db.commit()
    except:
        print("failed")
        db.rollback()
    db.close()

#查询
def search():
    db = pymysql.connect(host='*******',user='root',password='******',passwd='3306',db='*******')
    cursor = db.cursor()
    sql = 'select * from movie'
    try:
        cursor.execute(sql)
        print(cursor.rowcount)
        row = cursor.fetchone()
        while row:
            print('Row:', row)
            row = cursor.fetchone()
    except:
        print("Error")
    db.close()

def main():
    url = 'https://www.1905.com/vod/top/lst/'
    html = Get_HTML(url)
    item=parse_page(html)
    for i in range(len(item)):
        write_to_mysql(item[str(i+1)])
main()

国内疫情可视化

import requests
import json
import re
import pandas as pd
import time
from pyecharts.components import Table
from pyecharts import options as opts
from pyecharts.charts import Bar, Page,Pie,Line,Timeline,Map
from pyecharts.commons.utils import JsCode
from pyecharts.options import DataZoomOpts,ComponentTitleOpts





def Get_HTML():
    headers = {'User-Agent': 'Mozilla/5.0'}
    url = 'https://m.look.360.cn/events/feiyan?sv=&version=&market=&device=2&net=4&stype=&scene=&sub_scene=&refer_scene=&refer_subscene=&f=jsonp&location=true&sort=2&_=1649252949072&callback=jsonp2'
    try:
        r = requests.get(url, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        re = r.text[7:-2]
        response = json.loads(re)
        return response
    except:
        print("error!")
def China_data_Total():
    url='http://m.look.360.cn/subject/400?sign=360_6aa05217'
    headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/99.0'}
    html = requests.get(url, headers=headers).text
    pattern=re.compile('"qcmsint1":"0"},"feiyanTrend":(.*),"local":',re.S)
    item=re.findall(pattern,html)
    response=json.loads(item[0])
    data = {
        "截止日期":response['total']['modifyTime'],
        "现存确诊":response['total']['currentConfirmed'],
        "境外输入":response['total']['overseasInput'],
        "现存无症状":response['total']['asymptom'],
        "累计确诊":response['total']['diagnosed'],
        "累计治愈":response['total']['cured'],
        "累计死亡":response['total']['died'],
        "现存_较昨日":response['newDiff']['currentConfirmed'],
        "境外输入_较昨日":response['newDiff']['overseasInput'],
        "无症状_较昨日":response['newDiff']['asymptom'],
        "累计_较昨日":response['newDiff']['diagnosed'],
        "治愈_较昨日":response['newDiff']['cured'],
        "死亡_较昨日":response['newDiff']['died']
    }
    return data
# 国内数据
def China_data(response):
    P_name, P_Ljqz, P_Cured, P_Died, P_Xcqz, P_Xzqz = [], [], [], [], [], []    #省数据
    C_name, C_Ljqz, C_Cured, C_Died, C_Xcqz, C_Xzqz = [], [], [], [], [], []    #市数据
    for i in range(34):
        P_name.append(response['data'][i]['cityShortName'])  # 城市名称
        P_Ljqz.append(response['data'][i]['diagnosed'])  # 累计确诊
        P_Cured.append(response['data'][i]['cured'])  # 治愈
        P_Died.append(response['data'][i]['died'])  # 死亡
        P_Xcqz.append(response['data'][i]['currentConfirmed'])  # 现存确诊
        P_Xzqz.append(response['data'][i]['diffDiagnosed'])  # 新增确诊
    data = {
        "地区": P_name,
        "新增确诊": P_Xzqz,
        "现存确诊": P_Xcqz,
        "累计确诊": P_Ljqz,
        "治愈人数": P_Cured,
        "死亡人数": P_Died,
    }
    return data

#河南省数据
def Henan_data():
    url = 'https://m.look.360.cn/events/feiyanCityInfo?sv=&version=&market=&device=2&net=4&stype=&scene=&sub_scene=&refer_scene=&refer_subscene=&f=jsonp&ename=henan&_=1650426134574&callback=jsonp4'
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/99.0'}
    html = requests.get(url, headers=headers).text[7:-2]
    response=json.loads(html)
    # print(response['data']['cityInfo'])
    C_name, C_Ljqz, C_Cured, C_Died, C_Xcqz, C_Xzqz = [], [], [], [], [], []  # 市数据
    for i in range(len(response['data']['cityInfo'])):
        C_name.append(response['data']['cityInfo'][i]['cityName'])   #名称
        C_Ljqz.append(response['data']['cityInfo'][i]['diagnosed'])  # 累计确诊
        C_Cured.append(response['data']['cityInfo'][i]['cured'])  # 治愈
        C_Died.append(response['data']['cityInfo'][i]['died'])  # 死亡
        C_Xzqz.append(response['data']['cityInfo'][i]['diffDiagnosed'])  # 新增确诊
    data = {
        "地区": C_name,
        "新增确诊": C_Xzqz,
        "累计确诊": C_Ljqz,
        "治愈人数": C_Cured,
        "死亡人数": C_Died
    }
    return data

def Abroad_data(response):  # 国外数据
    C_name, Ljqz, Cured, Died, Xcqz, Xzqz = [], [], [], [], [], []
    for i in range(196):
        c_name = response['country'][i]['provinceName']  # 城市名称
        ljqz = response['country'][i]['diagnosed']  # 累计确诊
        cured = response['country'][i]['cured']  # 治愈
        died = response['country'][i]['died']  # 死亡
        xzqz = response['country'][i]['diffDiagnosed']  # 新增确诊
        C_name.append(c_name)
        Ljqz.append(ljqz)
        Cured.append(cured)
        Died.append(died)
        Xzqz.append(xzqz)
    data = {
        "地区": C_name,
        "新增确诊": Xzqz,
        "累计确诊": Ljqz,
        "治愈人数": Cured,
        "死亡人数": Died
    }
    return data

def TimeLine():
    headers = {'User-Agent': 'Mozilla/5.0'}
    url = 'https://api.look.360.cn/events/feiyanHomeMulTrend?sv=&version=&market=&device=2&net=4&stype=&scene=&sub_scene=&refer_scene=&refer_subscene=&f=jsonp&_=1650032782170&callback=jsonp3'
    try:
        r = requests.get(url, headers=headers)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        re = r.text[7:-2]
        response = json.loads(re)
    except:
        print("error!")
    time,Ljqz, Cured, Died, Xcqz, Xzqz, Xz_Died, Xz_Cured= [], [], [], [], [], [], [], []
    for i in range(len(response['data']['timeline_n1'])):
        time.append(response['data']['timeline_n1'][i]['time'])         #时间
        Xzqz.append(response['data']['timeline_n1'][i]['diagnosed'])    #新增确诊
        Xcqz.append(response['data']['timeline_n2'][i]['total_currentConfirmed']) #现存确诊
        Ljqz.append(response['data']['timeline_n2'][i]['total_diagnosed'])   #累计确诊
        Cured.append(response['data']['timeline_n3'][i]['total_cured'])      #累计治愈
        Died.append(response['data']['timeline_n3'][i]['total_died'])      #累计死亡
        Xz_Died.append(response['data']['timeline_n4'][i]['died'])        #新增死亡
        Xz_Cured.append(response['data']['timeline_n4'][i]['cured'])      #新增治愈
    data={
        "时间":time,
        "新增确诊":Xzqz,
        "现存确诊":Xcqz,
        "累计确诊":Ljqz,
        "累计治愈":Cured,
        "累计死亡":Died,
        "新增死亡":Xz_Died,
        "新增治愈":Xz_Cured
    }
    return data

def View(data_1,data_2,data_3,data_4):
    #柱状图
    c_1 = Timeline(init_opts=opts.InitOpts(chart_id='1'))
    c_1.add_schema(play_interval=1000,
                   label_opts=opts.series_options.LabelOpts(color='#cfe2f3'),is_auto_play = True)
    for i in range(3):
        #全国累计详情柱状图
        C_1 = (
            Bar()
                .add_xaxis(data_1['地区'][i*11:(i+1)*11])
                .add_yaxis("累计确诊", data_1['累计确诊'][i*11:(i+1)*11],stack='stack1',label_opts=opts.LabelOpts(is_show=False))
                .add_yaxis("新增确诊", data_1['新增确诊'][i*11:(i+1)*11],stack='stack1',label_opts=opts.LabelOpts(is_show=False))
                .add_yaxis("死亡人数", data_1['死亡人数'][i * 11:(i + 1) * 11],stack='stack1',label_opts=opts.LabelOpts(is_show=False))
                .add_yaxis("治愈人数", data_1['治愈人数'][i * 11:(i + 1) * 11],stack='stack1',label_opts=opts.LabelOpts(is_show=False))
                .add_yaxis("现存确诊", data_1['现存确诊'][i * 11:(i + 1) * 11],stack='stack1',label_opts=opts.LabelOpts(is_show=False))
                .reversal_axis()
                # .set_global_opts(legend_opts=opts.LegendOpts(selected_mode='single'))  # 单选模式
            .set_global_opts(xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(color='white')),#调整x轴字体颜色
                                yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(color='white')),
                             legend_opts=opts.LegendOpts(selected_mode='single'),
                             title_opts=opts.TitleOpts("国内疫情详情",pos_right=0,pos_top=20,title_textstyle_opts=opts.TextStyleOpts(font_size=13,color="#cfe2f3")))
        )
        c_1.add(C_1, "第{}页".format(i+1))
        # 全国地图
    c_2 = (
            Map(init_opts=opts.InitOpts(chart_id='2'))
                # .add("死亡人数", [list(z) for z in zip(data_1['地区'], data_1['死亡人数'])], "china",is_selected=False)        #默认关闭该标签
                .add("累计确诊",[list(z) for z in zip(data_1['地区'], data_1['累计确诊'])], "china")
                .add("死亡人数", [list(z) for z in zip(data_1['地区'], data_1['死亡人数'])], "china")
                .add("治愈人数", [list(z) for z in zip(data_1['地区'], data_1['治愈人数'])], "china")
                .add("新增确诊", [list(z) for z in zip(data_1['地区'], data_1['新增确诊'])], "china")
                .add("现存确诊", [list(z) for z in zip(data_1['地区'], data_1['现存确诊'])], "china")
                .set_global_opts(
                title_opts=opts.TitleOpts(""),
                visualmap_opts=opts.VisualMapOpts(max_=10000),
                legend_opts=opts.LegendOpts(selected_mode='single')    #单选模式
            )
        )
        #时间线折线图
    c_3 = (
            Line(init_opts=opts.InitOpts(chart_id='3'))
                .add_xaxis(data_3['时间'])
                .add_yaxis("新增确诊",data_3['新增确诊'],label_opts=opts.LabelOpts(is_show=False),linestyle_opts=opts.LabelOpts(color='#DC143C'))
                .add_yaxis("现存确诊",data_3['现存确诊'],label_opts=opts.LabelOpts(is_show=False),linestyle_opts=opts.LabelOpts(color='#a98175'))
                .add_yaxis("累计确诊",data_3['累计确诊'], label_opts=opts.LabelOpts(is_show=False),linestyle_opts=opts.LabelOpts(color='#60281e'))
                .add_yaxis("累计治愈",data_3['累计治愈'], label_opts=opts.LabelOpts(is_show=False),linestyle_opts=opts.LabelOpts(color='#2add9c'))
                .add_yaxis("累计死亡",data_3['累计死亡'], label_opts=opts.LabelOpts(is_show=False),linestyle_opts=opts.LabelOpts(color='black'))
                .add_yaxis("新增死亡",data_3['新增死亡'], label_opts=opts.LabelOpts(is_show=False),linestyle_opts=opts.LabelOpts(color='#758a99'))
                .add_yaxis("新增治愈",data_3['新增治愈'], label_opts=opts.LabelOpts(is_show=False),linestyle_opts=opts.LabelOpts(color='#1bd1a5'))
                .set_global_opts(
                title_opts=opts.TitleOpts(title="疫情汇总时间线",pos_top=20,title_textstyle_opts=opts.TextStyleOpts(font_size=13,color="#cfe2f3")),
                tooltip_opts=opts.TooltipOpts(trigger="axis"),
                xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(color='white')),  # 调整x轴字体颜色
                yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(color='white')),
                legend_opts=opts.LegendOpts(selected_mode='single')
            )
        )
        #河南省地图
    c_4 = (
            Map(init_opts=opts.InitOpts(chart_id='4'))
                # .add("死亡人数", [list(z) for z in zip(data_1['地区'], data_1['死亡人数'])], "china",is_selected=False)        #默认关闭该标签
                .add("累计确诊", [list(z) for z in zip(data_4['地区'], data_4['累计确诊'])], "河南")
                .add("死亡人数", [list(z) for z in zip(data_4['地区'], data_4['死亡人数'])], "河南")
                .add("治愈人数", [list(z) for z in zip(data_4['地区'], data_4['治愈人数'])], "河南")
                .add("新增确诊", [list(z) for z in zip(data_4['地区'], data_4['新增确诊'])], "河南")

                .set_global_opts(
                title_opts=opts.TitleOpts(title='省内详细信息',pos_top=30,title_textstyle_opts=opts.TextStyleOpts(font_size=13, color="#cfe2f3")),
                visualmap_opts=opts.VisualMapOpts(max_=500),
                legend_opts=opts.LegendOpts(selected_mode='single')  # 单选模式
            )
        )
        #省内死亡详情饼图
    c_5 = (
            Pie(init_opts=opts.InitOpts(chart_id='5'))
                .add("死亡人数", [list(z) for z in zip(data_4['地区'], data_4['死亡人数'])])
                .set_global_opts(title_opts=opts.TitleOpts('市死亡人数',title_textstyle_opts=opts.TextStyleOpts(font_size=13, color="gray"),),
                                 legend_opts=opts.LegendOpts(is_show=False))
                # .set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"),)
        )
    # 汇总数据与昨日相比
    data = China_data_Total()
    c_7 = (
        Pie(init_opts=opts.InitOpts(chart_id='7'))
        .set_global_opts(
                    title_opts=opts.TitleOpts(
                        title="现存确诊" + " " * 17 + "境外输入" + " " * 17 + "现存无症状", title_textstyle_opts=opts.TextStyleOpts(font_size=10, color="white"), ))
        )
    c_8 = (
        Pie(init_opts=opts.InitOpts(chart_id='8'))
        .set_global_opts(
                    title_opts=opts.TitleOpts(
                        title="累计确诊"+ " " * 17 + "累计治愈"+ " " * 17 + "累计死亡", title_textstyle_opts=opts.TextStyleOpts(font_size=10, color="white"), ))
        )
    c_9 = (
        Pie(init_opts=opts.InitOpts(chart_id='9'))
        .set_global_opts(
                    title_opts=opts.TitleOpts(
                        title=str(data['现存_较昨日']) + " " * 25 + str(data['境外输入_较昨日']) + " " * 25  + str(data['无症状_较昨日']), title_textstyle_opts=opts.TextStyleOpts(font_size=10, color="#e9e7ef"), ))
        )
    c_10 = (
        Pie(init_opts=opts.InitOpts(chart_id='10'))
        .set_global_opts(
                    title_opts=opts.TitleOpts(
                        title=str(data['累计_较昨日']) + " " * 25 + str(data['治愈_较昨日']) + " " * 25+ str(data['死亡_较昨日']), title_textstyle_opts=opts.TextStyleOpts(font_size=10, color="#e9e7ef")))
        )
    #数字
    c_11 = (
        Pie(init_opts=opts.InitOpts(chart_id='16'))
            .set_global_opts(
            title_opts=opts.TitleOpts(
                title=str(data['现存确诊']) + " " * 3 + str(data['境外输入']) + " " * 3 + str(data['现存无症状']),
                title_textstyle_opts=opts.TextStyleOpts(font_size=23, color="#CD3700")))
    )
    c_12 = (
        Pie(init_opts=opts.InitOpts(chart_id='17'))
            .set_global_opts(
            title_opts=opts.TitleOpts(
                title=str(data['累计确诊']) + " " * 3 + str(data['累计治愈']) + " " * 3 + str(data['累计死亡']),
                title_textstyle_opts=opts.TextStyleOpts(font_size=23, color="#7F7F7F")))
    )

    tu_1 = (
        Line(init_opts=opts.InitOpts(width="1500px",
                                     height="850px",
                                     bg_color={"type": "pattern", "image": JsCode("img"), "repeat": "no-repeat"},
                                     chart_id='11'))
            .add_xaxis([None])
            .add_yaxis("", [None])
            .set_global_opts(
            title_opts=opts.TitleOpts(),
            yaxis_opts=opts.AxisOpts(is_show=False),
            xaxis_opts=opts.AxisOpts(is_show=False))
    )

    tu_1.add_js_funcs(
        """
        var img = new Image(); img.src = './images/bg_body.jpg';

        """
    )
    tu_2 = (
        Line(init_opts=opts.InitOpts(
                                     bg_color={"type": "pattern", "image": JsCode("img"), "repeat": "no-repeat"},
                                     chart_id='12'))
            .add_xaxis([None])
            .add_yaxis("", [None])
            .set_global_opts(
            yaxis_opts=opts.AxisOpts(is_show=False),
            xaxis_opts=opts.AxisOpts(is_show=False))
    )
    tu_2.add_js_funcs(
        """
        var img = new Image(); img.src = './images/bg_box5.png';

        """
    )
    tu_3 = (
        Line(init_opts=opts.InitOpts(chart_id='13',
            bg_color={"type": "pattern", "image": JsCode("img"), "repeat": "no-repeat"},
        ))
            .add_xaxis([None])
            .add_yaxis("", [None])
            .set_global_opts(
            yaxis_opts=opts.AxisOpts(is_show=False),
            xaxis_opts=opts.AxisOpts(is_show=False))
    )
    tu_3.add_js_funcs(
        """
        var img = new Image(); img.src = './images/bg_box5.png';

        """
    )
    tu_4 = (
        Line(init_opts=opts.InitOpts(chart_id='14',
            bg_color={"type": "pattern", "image": JsCode("img"), "repeat": "no-repeat"},
        ))
            .add_xaxis([None])
            .add_yaxis("", [None])
            .set_global_opts(
            yaxis_opts=opts.AxisOpts(is_show=False),
            xaxis_opts=opts.AxisOpts(is_show=False))
    )
    tu_4.add_js_funcs(
        """
        var img = new Image(); img.src = './images/bg_box4.png';

        """
    )
    tu_5 = (
        Line(init_opts=opts.InitOpts(width="1500px",
                                     height="200px",
                                     bg_color={"type": "pattern", "image": JsCode("img"), "repeat": "no-repeat"},
                                     chart_id='15'))
            .add_xaxis([None])
            .add_yaxis("", [None])
            .set_global_opts(
            title_opts=opts.TitleOpts(title="全球疫情信息 截至" + data['截止日期'],
                                      pos_left='center',
                                      title_textstyle_opts=opts.TextStyleOpts(font_size=21, color='#51c2d5'),
                                      pos_top='23%'),
            yaxis_opts=opts.AxisOpts(is_show=False),
            xaxis_opts=opts.AxisOpts(is_show=False))
    )
    tu_5.add_js_funcs(
        """
        var img = new Image(); img.src = './images/bg_title.png';

        """
    )
    page = Page(layout=Page.DraggablePageLayout, page_title="可视化大屏")
    page.add(tu_1,tu_2,tu_3,tu_4,c_1,c_2,c_3,c_4,c_5,c_7,c_8,c_9,c_10,tu_5,c_11,c_12)
    page.render('国内疫情信息.html')

def Style():
    # 固定样式
    Page.save_resize_html(source="./国内疫情信息.html",  # 上面的HTML文件名称
                          cfg_file="chart_config_3.json",  # 保存的json配置文件
                          dest="new_国内疫情信息可视化大屏.html"  # 新HTML文件名称,可以空缺,默认resize_render.html
                          )
def main():
    a=0
    while True:
        data_1 = China_data(Get_HTML())
        data_2 = Abroad_data(Get_HTML())
        data_3 = TimeLine()
        data_4=Henan_data()
        date=China_data_Total()['截止日期']
        if date!=a:
            View(data_1, data_2, data_3,data_4)
            Style()
            a=date

main()