import pandas as pd
df = pd.read_csv('国内疫情数据.csv')
# csv转excel
df.to_excel('C:\\Users\\fch\\Desktop\\excel1.xlsx',encoding='gbk')
# csv转txt
df.to_csv('C:\\Users\\fch\\Desktop\\txt.txt',encoding='gbk')
# txt转excel
with open('C:\\Users\\fch\\Desktop\\excel2.xlsx','w',encoding='gbk') as f:
data = open('C:\\Users\\fch\\Desktop\\txt.txt',encoding='gbk')
for line in data:
f.write(line)
import math
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
# 1.读取catering_sale.xls 餐饮数据,用describe对数据输出基本描述。
# data=pd.read_excel('D:\作业\韩毅数据分析\demo\data\catering_sale.xls')
# print(data)
# data=data.describe()
# print(data)
# 2.随机生成数据样本D,一行为3到10,一行为13~20,分别用Spearman、Pearon方法、计算输出其相关系数
# data=np.array([np.arange(3,11),np.arange(13,21)])
# print(data)
# data_1=stats.spearmanr(data[0],data[1])
# print("spearmanr方法:"+'\n',data_1)
# data_2=stats.pearsonr(data[0],data[1])
# print("Pearonr方法:"+'\n',data_2)
# 3.随机生成5*5的矩阵,计算输出该矩阵的均值、中位数、标准差、方差、协方差矩阵
# data=np.random.randint(0,200,size=(5,5),dtype=int)
# print(data)
# ave=np.mean(data)
# print("均值:",ave)
# Var=np.var(data)
# print("方差:",Var)
# Std=np.std(data)
# print("标准差:",Std)
# med=np.median(data)
# print("中位数:",med)
# cov=np.cov(data)
# print("协方差矩阵:",cov)
# 4.随机生成1000个正态分布的数据,画出该数据的箱式图
# data=np.random.rand(1000)
# plt.figure(figsize=(10,10))
# plt.boxplot(data)
# plt.show()
# -*- coding: utf-8 -*-
# 1、读入catering_sale.xls销量数据,找出异常值置为空值,采用拉格朗日插值法对空值进行插值,将结果输出至sales.xls
# import pandas as pd
# from scipy.interpolate import lagrange # 导入拉格朗日插值函数
# # 拉格朗日插值代码
# import pandas as pd #导入数据分析库Pandas
# import numpy as np
# import matplotlib.pyplot as plt
# from scipy.interpolate import lagrange #导入拉格朗日插值函数
# # inputfile = 'catering_sale.xls' #销量数据路径
# # outputfile = 'sales.xls' #输出数据路径
# data = pd.read_excel('catering_sale.xls') #读入数据
# temp = data[u'销量'][(data[u'销量'] < 400) | (data[u'销量'] > 5000)] #找到不符合要求得值 data[列][行]
# for i in range(temp.shape[0]):
# data.loc[temp.index[i],u'销量'] = np.nan #把不符合要求得值变为空值
# #自定义列向量插值函数
# #s为列向量,n为被插值的位置,k为取前后的数据个数,默认为5
# def ployinterp_column(s, n, k=5):
# y = s.iloc[list(range(n-k, n)) + list(range(n+1, n+1+k))] #取数 就是传入得data
# y = y[y.notnull()] #剔除空值
# f = lagrange(y.index, list(y))
# return f(n) #插值并返回插值结果
# #逐个元素判断是否需要插值
# for i in data.columns:
# for j in range(len(data)):
# if (data[i].isnull())[j]: #如果为空即插值。
# data.loc[j,i] = ployinterp_column(data[i], j)
# data.to_excel('sales.xls') #输出结果,写入文件
# print("success")
# 2、读入principal_component.xls数据,使用PCA降维,输出模型的各个特征向量及各个成分各自的方差百分比。
# -*- coding: utf-8 -*-
# 代码4-6 主成分分析降维
# import pandas as pd
# from sklearn.decomposition import PCA
# # 参数初始化
# data = pd.read_excel('principal_component.xls', header = None) # 读入数据
# pca = PCA()
# pca.fit(data)
# pca.components_ # 返回模型的各个特征向量
# pca.explained_variance_ratio_ # 返回各个成分各自的方差百分
# # 代码4-7 计算成分结果
# pca = PCA(3)
# pca.fit(data)
# low_d = pca.transform(data) # 用它来降低维度
# pd.DataFrame(low_d).to_excel('dimention_reducted.xls') # 保存结果
# pca.inverse_transform(low_d) # 必要时可以用inverse_transform()函数来复原数据
# low_d
# 对一个10×4维的随机生成矩阵进行主成分分析
# import pandas as pd
# import numpy as np
# D = pd.Series([1, 1, 2, 3, 5])
# D.unique()
# np.unique(D)
# from sklearn.decomposition import PCA
# D = np.random.rand(10,4)
# pca = PCA()
# pca.fit(D)
# pca.components_ # 返回模型的各个特征向量
# pca.explained_variance_ratio_ # 返回各个成分各自的方差百分比
#
# print(pca.explained_variance_ratio_ )
# 1.创建一个数值范围为0-1,间隔为0.1的数组。
# list=np.arange(0,1,0.1)
# print(list)
# 创建100个服从正态分布的随机数。
# data=np.random.rand(100)
# print(data)
# 3.对创建的两个四行三列的二维数组进行合并运算。
# data_1=np.random.randint(0,20,size=(4,3))
# data_2=np.random.randint(40,60,size=(4,3))
# # 垂直合并
# data1=np.vstack((data_1,data_2))
# #水平合并
# data2=np.hstack((data_1,data_2))
# print(data_1)
# print(data_2)
# print("垂直合并:"+'\n',data1)
# print("水平合并:"+'\n',data2)
num=input("请输入股票代码:")
name=input("请输入股票名称:")
M_max=input("请输入当天股票最高价:")
M_min=input("请输入当天股票最低价:")
print("股票代码:{},股票名称{}".format(num,name))
print("最高价:{},最低价:{},差值:{}".format(M_max,M_min,int(M_max)-int(M_min)))
# re=int(input("请输入实部:"))
# vi=int(input("请输入虚部:"))
# print("复数形式:{}+{}i,模:{}".format(re,vi,math.sqrt(re*re+vi*vi)))
# type=int(input("请输入客户类型:"))
# price=int(input("请输入价格:"))
# num=int(input("请输入订货量:"))
# if type<5:
# if num<800:
# money=num*price
# else:
# money=num*price*(1-0.02)
# else:
# if num<500:
# money = num * price * (1 - 0.03)
# elif num>=500 and num<1000:
# money = num * price * (1 - 0.05)
# elif num>=1000 and num<2000:
# money = num * price * (1 - 0.08)
# else:
# money = num * price * (1 - 0.1)
# print("money:",money)
# sum=0
# for i in range(100,201):
# if i%2!=0:
# sum+=i
# print(sum)
# x=int(input("请输入一个自然数:"))
# for i in range(x-1,1,-1):
# # print("sfds3")
# if x%i==0:
# print(i)
# break
# for i in range (2,101):
# for j in range(2,i):
# if i%j==0:
# break
# else:
# print(i,end=' ')
# -*- coding:utf-8 -*-
import requests
import json
import re
import pandas as pd
import time
from pyecharts import options as opts
from pyecharts.charts import Bar, Page,Pie,Line,Timeline,Grid,Map
from pyecharts.commons.utils import JsCode
def sleeptime(hour, min, sec): # 时间转换
return hour * 3600 + min * 60 + sec
def Get_HTML():
headers = {'User-Agent': 'Mozilla/5.0'}
url = 'https://m.look.360.cn/events/feiyan?sv=&version=&market=&device=2&net=4&stype=&scene=&sub_scene=&refer_scene=&refer_subscene=&f=jsonp&location=true&sort=2&_=1649252949072&callback=jsonp2'
try:
r = requests.get(url, headers=headers)
r.raise_for_status()
r.encoding = r.apparent_encoding
re = r.text[7:-2]
response = json.loads(re)
return response
except:
print("error!")
def China_data_Total():
url='http://m.look.360.cn/subject/400?sign=360_6aa05217'
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/99.0'}
html = requests.get(url, headers=headers).text
pattern=re.compile('"qcmsint1":"0"},"feiyanTrend":(.*),"local":',re.S)
item=re.findall(pattern,html)
response=json.loads(item[0])
data = {
"截止日期":response['total']['modifyTime'],
"现存确诊":response['total']['currentConfirmed'],
"境外输入":response['total']['overseasInput'],
"现存无症状":response['total']['asymptom'],
"累计确诊":response['total']['diagnosed'],
"累计治愈":response['total']['cured'],
"累计死亡":response['total']['died'],
"现存_较昨日":response['newDiff']['currentConfirmed'],
"境外输入_较昨日":response['newDiff']['overseasInput'],
"无症状_较昨日":response['newDiff']['asymptom'],
"累计_较昨日":response['newDiff']['diagnosed'],
"治愈_较昨日":response['newDiff']['cured'],
"死亡_较昨日":response['newDiff']['died']
}
return data
def China_data(response): # 国内数据
C_name, Ljqz, Cured, Died, Xcqz, Xzqz = [], [], [], [], [], []
for i in range(34):
c_name = response['data'][i]['city'] # 城市名称
ljqz = response['data'][i]['diagnosed'] # 累计确诊
cured = response['data'][i]['cured'] # 治愈
died = response['data'][i]['died'] # 死亡
xcqz = response['data'][i]['currentConfirmed'] # 现存确诊
xzqz = response['data'][i]['diffDiagnosed'] # 新增确诊
C_name.append(c_name)
Ljqz.append(ljqz)
Cured.append(cured)
Died.append(died)
Xcqz.append(xcqz)
Xzqz.append(xzqz)
data = {
"地区": C_name,
"新增确诊": Xzqz,
"现存确诊": Xcqz,
"累计确诊": Ljqz,
"治愈人数": Cured,
"死亡人数": Died
}
return data
def Abroad_data(response): # 国外数据
C_name, Ljqz, Cured, Died, Xcqz, Xzqz = [], [], [], [], [], []
for i in range(196):
c_name = response['country'][i]['provinceName'] # 城市名称
ljqz = response['country'][i]['diagnosed'] # 累计确诊
cured = response['country'][i]['cured'] # 治愈
died = response['country'][i]['died'] # 死亡
xzqz = response['country'][i]['diffDiagnosed'] # 新增确诊
C_name.append(c_name)
Ljqz.append(ljqz)
Cured.append(cured)
Died.append(died)
Xzqz.append(xzqz)
data = {
"地区": C_name,
"新增确诊": Xzqz,
"累计确诊": Ljqz,
"治愈人数": Cured,
"死亡人数": Died
}
return data
def View(data_1, data_2): # 可视化
# 柱状图
area_color_js = (
"new echarts.graphic.LinearGradient(0, 0, 0, 1, "
"[{offset: 0, color: '#7bbfea'}, {offset: 1, color: '#3fbbff0d'}], false)"
)
t_1 = Timeline()
t_1.add_schema(play_interval=1000,
label_opts=opts.series_options.LabelOpts( color='#cfe2f3'))
for i in range(len(data_1['地区'])):
c_1 = (
Bar(init_opts=opts.InitOpts(width="300px",height='300px'))
.add_xaxis(['新增确诊','现存确诊','累计确诊','治愈人数','死亡人数'],)
.add_yaxis("",[data_1['新增确诊'][i],data_1['现存确诊'][i],data_1['累计确诊'][i],data_1['治愈人数'][i],data_1['死亡人数'][i]],
color='#44cef6',)
.reversal_axis()
.set_global_opts(
title_opts=opts.TitleOpts(" (地区: {} )".format(data_1['地区'][i]),title_textstyle_opts=opts.TextStyleOpts(color="#cfe2f3")),
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(color='white')),#调整x轴字体颜色
yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(color='white'))
)
)
t_1.add(c_1, "{}".format(data_1['地区'][i]))
# 柱状图
t_2 = Timeline()
t_2.add_schema(play_interval=1000,
label_opts=opts.series_options.LabelOpts(color='#cfe2f3'))
for i in range(len(data_2['地区'])):
c_2 = (
Bar()
.add_xaxis(['新增确诊', '累计确诊', '治愈人数', '死亡人数'], )
.add_yaxis("", [data_2['新增确诊'][i], data_2['累计确诊'][i], data_2['治愈人数'][i],data_2['死亡人数'][i]],
label_opts=opts.LabelOpts(position="right"),
color='#44cef6', )
.reversal_axis()
.set_global_opts(
title_opts=opts.TitleOpts(" (国家: {} )".format(data_2['地区'][i]),pos_right='0',title_textstyle_opts=opts.TextStyleOpts(color="#cfe2f3")),
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(color='white',)), # 调整x轴字体颜色
yaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(color='white'),position='right')
)
)
t_2.add(c_2, "{}".format(data_2['地区'][i]))
# 饼图
# 转换数据为类型,#转换后数据为['美国', 1010774]
# data_pair = [list(z) for z in zip(x_data, y_data)] # 转换后数据为['美国', 1010774]
c_3 = (
Line()
.add_xaxis(data_2['地区'])
.add_yaxis(
series_name="新增确诊",
y_axis=data_2['新增确诊'],
label_opts=opts.LabelOpts(is_show=False,),
)
.add_yaxis(
series_name="累计确诊",
y_axis=data_2['累计确诊'],
label_opts=opts.LabelOpts(is_show=False,),
)
.add_yaxis(
series_name="治愈人数",
y_axis=data_2['治愈人数'],
label_opts=opts.LabelOpts(is_show=False,),
)
.add_yaxis(
series_name="死亡人数",
y_axis=data_2['死亡人数'],
label_opts=opts.LabelOpts(is_show=False,),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="国外疫情数据",pos_right='0',title_textstyle_opts=opts.TextStyleOpts(color="#cfe2f3")),
datazoom_opts=opts.DataZoomOpts(),
tooltip_opts=opts.TooltipOpts(trigger="axis"),
xaxis_opts=opts.AxisOpts(axislabel_opts=opts.LabelOpts(color='white', )), # 调整x轴字体颜色
yaxis_opts=opts.AxisOpts(
position='right',
axistick_opts=opts.AxisTickOpts(is_show=True,),
splitline_opts=opts.SplitLineOpts(is_show=True),
axislabel_opts=opts.LabelOpts(color='white')
),
)
)
c_5 = ( #地图
Map()
.add("累计确诊", [['河南', 2840], ['香港', 308175], ['吉林', 37712] , ['台湾', 27410], ['上海', 11366], ['浙江', 2819], ['福建', 2905],['山东', 2646], ['黑龙江', 2505], ['广东', 6681], ['四川', 1948], ['辽宁', 1604], ['河北', 1981], ['江苏', 2083], ['广西', 1518], ['北京', 1821], ['海南', 243], ['山西', 350], ['陕西', 3247], ['天津', 1798], ['云南', 2079], ['安徽', 1054], ['江西', 1063], ['湖南', 1335], ['青海', 43], ['内蒙古', 1687],['重庆',2423], ['贵州', 178], ['湖北', 68392], ['甘肃', 681], ['新疆', 999], ['宁夏', 122], ['澳门', 82], ['西藏', 1]], "china")
.set_global_opts(
title_opts=opts.TitleOpts(title="国内现存确诊",title_textstyle_opts=opts.TextStyleOpts(color="#cfe2f3")),visualmap_opts=opts.VisualMapOpts(max_=10000))
)
data = China_data_Total()
c_6 = (
Pie(init_opts=opts.InitOpts())
.set_global_opts(
title_opts=opts.TitleOpts(
title="现存确诊:" + str(data['现存确诊']) + " " * 5 + "境外输入:" + str(data['境外输入']) + " " * 5 + "现存无症状:" + str(
data['现存无症状']), title_textstyle_opts=opts.TextStyleOpts(font_size=15, color="#ffb3a7"), ))
)
c_7 = (
Pie(init_opts=opts.InitOpts())
.set_global_opts(
title_opts=opts.TitleOpts(
title="累计确诊:" + str(data['累计确诊']) + " " * 5 + "累计治愈:" + str(data['累计治愈']) + " " * 5 + "累计死亡:" + str(
data['累计死亡']), title_textstyle_opts=opts.TextStyleOpts(font_size=15, color="#ffb3a7"), ))
)
c_8 = (
Pie(init_opts=opts.InitOpts())
.set_global_opts(
title_opts=opts.TitleOpts(
title="较昨日:" + str(data['现存_较昨日']) + " " * 25 + "较昨日:" + str(data['境外输入_较昨日']) + " " * 25 + "较昨日:" + str(
data['无症状_较昨日']), title_textstyle_opts=opts.TextStyleOpts(font_size=10, color="#e9e7ef"), ))
)
c_9 = (
Pie(init_opts=opts.InitOpts())
.set_global_opts(
title_opts=opts.TitleOpts(
title="较昨日:" + str(data['累计_较昨日']) + " " * 25 + "较昨日:" + str(data['治愈_较昨日']) + " " * 25 + "较昨日:" + str(
data['死亡_较昨日']), title_textstyle_opts=opts.TextStyleOpts(font_size=10, color="#e9e7ef"), ))
)
c_10 = ( # 地图
Map()
.add("死亡人数",[['河南', 22], ['香港', 8827], ['吉林', 5], ['台湾', 854], ['上海', 7], ['浙江', 1], ['福建', 1], ['山东', 7], ['广东', 8], ['黑龙江', 13], ['四川', 3], ['江苏', 0], ['河北', 7], ['辽宁', 2], ['广西', 2], ['北京', 9], ['海南', 6], ['山西', 0], ['陕西', 3], ['云南', 2], ['安徽', 6], ['湖南', 4], ['江西', 1], ['天津', 3], ['青海', 0], ['内蒙古', 1], ['重庆', 6], ['贵州', 2], ['湖北', 4512], ['甘肃', 2], ['新疆', 3], ['宁夏', 0], ['澳门', 0], ['西藏', 0]], "china")
.set_global_opts(
title_opts=opts.TitleOpts(title="国内各地死亡人数", title_textstyle_opts=opts.TextStyleOpts(color="#cfe2f3")),
visualmap_opts=opts.VisualMapOpts(max_=10000))
)
tu_1 = (
Line(init_opts=opts.InitOpts(width="1500px",
height="850px",
bg_color={"type": "pattern", "image": JsCode("img"), "repeat": "no-repeat"}))
.add_xaxis([None])
.add_yaxis("", [None])
.set_global_opts(
title_opts=opts.TitleOpts(),
yaxis_opts=opts.AxisOpts(is_show=False),
xaxis_opts=opts.AxisOpts(is_show=False))
)
tu_1.add_js_funcs(
"""
var img = new Image(); img.src = './images/bg_body.jpg';
"""
)
tu_2 = (
Line(init_opts=opts.InitOpts(width="600px",
height="700px",
bg_color={"type": "pattern", "image": JsCode("img"), "repeat": "no-repeat"}))
.add_xaxis([None])
.add_yaxis("", [None])
.set_global_opts(
title_opts=opts.TitleOpts(
pos_left='center',
title_textstyle_opts=opts.TextStyleOpts(font_size=25, color='#51c2d5'),
pos_top='5%'),
yaxis_opts=opts.AxisOpts(is_show=False),
xaxis_opts=opts.AxisOpts(is_show=False))
)
tu_2.add_js_funcs(
"""
var img = new Image(); img.src = './images/data_icon.png';
"""
)
tu_3 = (
Line(init_opts=opts.InitOpts(width="1500px",
height="200px",
bg_color={"type": "pattern", "image": JsCode("img"), "repeat": "no-repeat"}))
.add_xaxis([None])
.add_yaxis("", [None])
.set_global_opts(
title_opts=opts.TitleOpts(title="全球疫情信息 截至"+data['截止日期'],
pos_left='center',
title_textstyle_opts=opts.TextStyleOpts(font_size=21, color='#51c2d5'),
pos_top='5%'),
yaxis_opts=opts.AxisOpts(is_show=False),
xaxis_opts=opts.AxisOpts(is_show=False))
)
tu_3.add_js_funcs(
"""
var img = new Image(); img.src = './images/bg_title.png';
"""
)
# page = Page(layout=Page.DraggablePageLayout, page_title="模拟")
# page.add(tu_1,t_1, t_2, c_3,c_5,c_6,c_7,c_8,c_9,tu_2,tu_3,c_10)
# page.render('全球疫情信息.html')
# 固定样式
Page.save_resize_html(source="全球疫情信息.html", # 上面的HTML文件名称
cfg_file="chart_config.json", # 保存的json配置文件
dest="new_全球疫情信息.html" # 新HTML文件名称,可以空缺,默认resize_render.html
)
def main():
second = sleeptime(5, 0, 0) # 设置时间
data_1 = China_data(Get_HTML()) # 国内数据
data_2 = Abroad_data(Get_HTML()) # 国外数据
data_11 = pd.DataFrame(data_1)
data_22 = pd.DataFrame(data_2)
while True:
data_11.to_csv('./国内疫情数据.csv')
data_22.to_csv('./国外疫情数据.csv')
View(data_1, data_2)
time.sleep(second)
# main()
data_1 = China_data(Get_HTML()) # 国内数据
data_2 = Abroad_data(Get_HTML()) # 国外数据
data_11 = pd.DataFrame(data_1)
data_22 = pd.DataFrame(data_2)
# data_11
# data_22
View(data_1,data_2)
# print(data_1)
from pyecharts.commons.utils import JsCode
value=[7.0,11.0,50.0,112.0]
color_function = """
function (params) {
if (params.value == 7.0)
return '#66CCCC';
else if (params.value ==11.0)
return '#CCCCFF';
else if (params.value ==50.0)
return '#FF6666';
else return '#CCFFCC';
}
"""
c_3 = (
Bar(init_opts=opts.InitOpts(width="800px",height="400px",))
.add_xaxis(['睡觉','说话','离开','学习'],)
.add_yaxis("",value,itemstyle_opts=opts.ItemStyleOpts(color=JsCode(color_function)))
.set_global_opts(
title_opts=opts.TitleOpts(title="国内疫情数据"),
legend_opts=opts.LegendOpts(pos_left="20%"),
datazoom_opts=opts.DataZoomOpts(type_="inside"),
)
)
import requests
import json
from lxml import etree
import pandas as pd
import time
from pyecharts import options as opts
from pyecharts.charts import Bar
def sleeptime(hour, min, sec): #时间转换
return hour * 3600 + min * 60 + sec
def Get_HTML(url):
headers={'User-Agent':'Mozilla/5.0'}
try:
r=requests.get(url,headers=headers)
r.raise_for_status()
r.encoding=r.apparent_encoding
return r.text
except:
print("error!")
def Parse_page():
C_name,Ljqz,Cured,Died,Xcqz,Xzqz=[],[],[],[],[],[]
url='https://m.look.360.cn/events/feiyan?sv=&version=&market=&device=2&net=4&stype=&scene=&sub_scene=&refer_scene=&refer_subscene=&f=jsonp&location=true&sort=2&_=1649252949072&callback=jsonp2'
re=Get_HTML(url)
re=re[7:-2]
response=json.loads(re)
for i in range(34):
c_name=response['data'][i]['data']['provinceName'] #城市名称
ljqz=response['data'][i]['diagnosed'] #累计确诊
cured=response['data'][i]['cured'] #治愈
died=response['data'][i]['died'] #死亡
xcqz=response['data'][i]['currentConfirmed'] #现存确诊
xzqz=response['data'][i]['diffDiagnosed'] #新增确诊
C_name.append(c_name)
Ljqz.append(ljqz)
Cured.append(cured)
Died.append(died)
Xcqz.append(xcqz)
Xzqz.append(xzqz)
# columns=['新增确诊','现存确诊','累计确诊','治愈人数','死亡人数']
# index=C_name
data={
"地区":C_name,
"新增确诊":Xzqz,
"现存确诊":Xcqz,
"累计确诊":Ljqz,
"治愈人数":Cured,
"死亡人数":Died
}
# print(data)
return data
def View():
data=Parse_page()
c = (
Bar()
.add_xaxis(data['地区'])
.add_yaxis("新增确诊",data['新增确诊'], stack="stack1")
.add_yaxis("现存确诊",data['现存确诊'] , stack="stack1")
.add_yaxis("累计确诊",data['累计确诊'] , stack="stack1")
.add_yaxis("治愈人数",data['治愈人数'] , stack="stack1")
.add_yaxis("死亡人数",data['死亡人数'] , stack="stack1")
.set_global_opts(
title_opts=opts.TitleOpts(title="国内疫情数据数据(全部)"),
datazoom_opts=opts.DataZoomOpts(type_="inside"),
)
.render("国内疫情数据数据.html")
)
def main():
data=Parse_page()
data=pd.DataFrame(data)
second = sleeptime(24,0,0)
while True:
# print(data)
data.to_csv('./国内疫情数据.csv')
View()
time.sleep(second)
# main()
data=Parse_page()
data=pd.DataFrame(data)
data
import requests
def BaiDu_fanyi(url):
kw = input('请输入待翻译的英文/中文单词:')
data={'kw':kw}
headers = {
'User_Agent':'Mozilla/5.0',
'content-length': str(len(data)),
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'referer': 'https://fanyi.baidu.com/',
'x-requested-with': 'XMLHttpRequest'
}
response=requests.post(url,headers=headers,data=data)
# print(response.json())
# 请输入待翻译的英文/中文单词:word
# {'errno': 0, 'data': [{'k': 'word', 'v': 'n. 单词; 话语; 诺言; 消息 vt. 措辞,用词; 用言语表达 vi. 讲话'},
# {'k': 'Word', 'v': '微软公司生产的文字处理软件。; [人名] 沃德'}, {'k': 'words', 'v': 'n. 字( word的名词复数 );
# (说的)话; 诺言; 口令'}, {'k': 'Words', 'v': '[医]言语'}, {'k': 'WORDS', 'v': 'abbr. Western Operational Research Discussion Soci'}]}
result=''
for i in response.json()['data']:
result+=i['v']+'\n'
print("{}的翻译结果为:".format(kw))
print(result)
url='https://fanyi.baidu.com/sug'
BaiDu_fanyi(url)