Python爬取騰訊疫情實時數據並存儲到mysql數據庫的示例代碼

思路:

在騰訊疫情數據網站F12解析網站結構,使用Python爬取當日疫情數據和歷史疫情數據,分別存儲到details和history兩個mysql表。

①此方法用於爬取每日詳細疫情數據

import requests
import json
import time
def get_details():
  url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5&callback=jQuery34102848205531413024_1584924641755&_=1584924641756'
  headers ={
      'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400'
    }
  res = requests.get(url,headers=headers)
    #輸出全部信息
    # print(res.text)
  response_data = json.loads(res.text.replace('jQuery34102848205531413024_1584924641755(','')[:-1])
  #輸出這個字典的鍵值 dict_keys(['ret', 'data'])ret是響應值,0代表請求成功,data裡是我們需要的數據
#   print(response_data.keys())
  """上面已經轉化過一次字典,然後獲取裡面的data,因為data是字符串,所以需要再次轉化字典
    print(json.loads(reponse_data['data']).keys())
    結果:
    dict_keys(['lastUpdateTime', 'chinaTotal', 'chinaAdd', 'isShowAdd', 'showAddSwitch',
    'areaTree', 'chinaDayList', 'chinaDayAddList', 'dailyNewAddHistory', 'dailyHistory',
    'wuhanDayList', 'articleList'])
    lastUpdateTime是最新更新時間,chinaTotal是全國疫情總數,chinaAdd是全國新增數據,
    isShowAdd代表是否展示新增數據,showAddSwitch是顯示哪些數據,areaTree中有全國疫情數據
  """
  areaTree_data = json.loads(response_data['data'])['areaTree']
  temp=json.loads(response_data['data'])
#   print(temp.keys())
#   print(areaTree_data[0].keys())
  """
  獲取上一級字典裡的areaTree
  然後查看裡面中國鍵值
  print(areaTree_data[0].keys())
  dict_keys(['name', 'today', 'total', 'children'])
  name代表國傢名稱,today代表今日數據,total代表總數,children裡有全國各地數據,我們需要獲取全國各地數據,查看children數據
  print(areaTree_data[0]['children'])
  這裡面是
  name是地區名稱,today是今日數據,total是總數,children是市級數據,
  我們通過這個接口可以獲取每個地區的總數據。我們遍歷這個列表,取出name,這個是省級的數據,還需要獲取市級數據,
  需要取出name,children(市級數據)下的name、total(歷史總數)下的confirm、heal、dead,today(今日數據)下的confirm(增加數),
  這些就是我們需要的數據
  """
    # print(areaTree_data[0]['children'])
  #   for province_data in areaTree_data[0]['children']:
    #   print(province_data)

  ds= temp['lastUpdateTime']
  details=[]
  for pro_infos in areaTree_data[0]['children']:
    province_name = pro_infos['name'] # 省名
    for city_infos in pro_infos['children']:
      city_name = city_infos['name'] # 市名
      confirm = city_infos['total']['confirm']#歷史總數
      confirm_add = city_infos['today']['confirm']#今日增加數
      heal = city_infos['total']['heal']#治愈
      dead = city_infos['total']['dead']#死亡
#       print(ds,province_name,city_name,confirm,confirm_add,heal,dead)
      details.append([ds,province_name,city_name,confirm,confirm_add,heal,dead])
  return details

單獨測試方法:

# d=get_details()
 # print(d)

②此方法用於爬取歷史詳細數據

import requests
import json
import time
def get_history():
  url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_other&callback=jQuery341026745307075030955_1584946267054&_=1584946267055'
  headers ={
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400'
  }
  res = requests.get(url,headers=headers)
#   print(res.text)
  response_data = json.loads(res.text.replace('jQuery341026745307075030955_1584946267054(','')[:-1])
#   print(response_data)
  data = json.loads(response_data['data'])
#   print(data.keys())
  chinaDayList = data['chinaDayList']#歷史記錄
  chinaDayAddList = data['chinaDayAddList']#歷史新增記錄
  history = {}
  for i in chinaDayList:
    ds = '2021.' + i['date']#時間
    tup = time.strptime(ds,'%Y.%m.%d')
    ds = time.strftime('%Y-%m-%d',tup)#改變時間格式,插入數據庫
    confirm = i['confirm']
    suspect = i['suspect']
    heal = i['heal']
    dead = i['dead']
    history[ds] = {'confirm':confirm,'suspect':suspect,'heal':heal,'dead':dead}
  for i in chinaDayAddList:
    ds = '2021.' + i['date']#時間
    tup = time.strptime(ds,'%Y.%m.%d')
    ds = time.strftime('%Y-%m-%d',tup)#改變時間格式,插入數據庫
    confirm_add = i['confirm']
    suspect_add = i['suspect']
    heal_add = i['heal']
    dead_add = i['dead']
    history[ds].update({'confirm_add':confirm_add,'suspect_add':suspect_add,'heal_add':heal_add,'dead_add':dead_add})
  return history

單獨測試此方法:

# h=get_history()
 # print(h)

③此方法用於數據庫的連接與關閉:

import time
import pymysql
import traceback
def get_conn():
  """
  :return: 連接,遊標
  """
  # 創建連接
  conn = pymysql.connect(host="127.0.0.1",
          user="root",
          password="000429",
          db="mydb",
          charset="utf8")
  # 創建遊標
  cursor = conn.cursor() # 執行完畢返回的結果集默認以元組顯示
  return conn, cursor
def close_conn(conn, cursor):
  if cursor:
    cursor.close()
  if conn:
    conn.close()

④此方法用於更新並插入每日詳細數據到數據庫表:

def update_details():
  """
  更新 details 表
  :return:
  """
  cursor = None
  conn = None
  try:
    li = get_details()
    conn, cursor = get_conn()
    sql = "insert into details(update_time,province,city,confirm,confirm_add,heal,dead) values(%s,%s,%s,%s,%s,%s,%s)"
    sql_query = 'select %s=(select update_time from details order by id desc limit 1)' #對比當前最大時間戳
    cursor.execute(sql_query,li[0][0])
    if not cursor.fetchone()[0]:
      print(f"{time.asctime()}開始更新最新數據")
      for item in li:
        cursor.execute(sql, item)
      conn.commit() # 提交事務 update delete insert操作
      print(f"{time.asctime()}更新最新數據完畢")
    else:
      print(f"{time.asctime()}已是最新數據!")
  except:
    traceback.print_exc()
  finally:
    close_conn(conn, cursor)

單獨測試能否插入數據到details表:

 update_details()

⑤此方法用於插入歷史數據到history表

def insert_history():
  """
    插入歷史數據
  :return:
  """
  cursor = None
  conn = None
  try:
    dic = get_history()
    print(f"{time.asctime()}開始插入歷史數據")
    conn, cursor = get_conn()
    sql = "insert into history values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
    for k, v in dic.items():
      # item 格式 {'2021-01-13': {'confirm': 41, 'suspect': 0, 'heal': 0, 'dead': 1}
      cursor.execute(sql, [k, v.get("confirm"), v.get("confirm_add"), v.get("suspect"),
                 v.get("suspect_add"), v.get("heal"), v.get("heal_add"),
                 v.get("dead"), v.get("dead_add")])

    conn.commit() # 提交事務 update delete insert操作
    print(f"{time.asctime()}插入歷史數據完畢")
  except:
    traceback.print_exc()
  finally:
    close_conn(conn, cursor)

單獨測試能否插入數據到history表:

# insert_history()

⑥此方法用於根據時間來更新歷史數據表的內容:

def update_history():
  """
  更新歷史數據
  :return:
  """
  cursor = None
  conn = None
  try:
    dic = get_history()
    print(f"{time.asctime()}開始更新歷史數據")
    conn, cursor = get_conn()
    sql = "insert into history values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
    sql_query = "select confirm from history where ds=%s"
    for k, v in dic.items():
      # item 格式 {'2020-01-13': {'confirm': 41, 'suspect': 0, 'heal': 0, 'dead': 1}
      if not cursor.execute(sql_query, k):
        cursor.execute(sql, [k, v.get("confirm"), v.get("confirm_add"), v.get("suspect"),
                   v.get("suspect_add"), v.get("heal"), v.get("heal_add"),
                   v.get("dead"), v.get("dead_add")])
    conn.commit() # 提交事務 update delete insert操作
    print(f"{time.asctime()}歷史數據更新完畢")
  except:
    traceback.print_exc()
  finally:
    close_conn(conn, cursor)

單獨測試更新歷史數據表的方法:

# update_history()

最後是兩個數據表的詳細建立代碼(也可以使用mysql可視化工具直接建立):

create table history(
  ds datetime not null comment '日期',
  confirm int(11) default null comment '累計確診',
  confirm_add int(11) default null comment '當日新增確診',
  suspect int(11) default null comment '剩餘疑似',
  suspect_add int(11) default null comment '當日新增疑似',
  heal int(11) default null comment '累計治愈',
  heal_add int(11) default null comment '當日新增治愈',
  dead int(11) default null comment '累計死亡',
  dead_add int(11) default null comment '當日新增死亡',
  primary key(ds) using btree
)engine=InnoDB DEFAULT charset=utf8mb4;
create table details(
  id int(11) not null auto_increment,
  update_time datetime default null comment '數據最後更新時間',
  province varchar(50) default null comment '省',
  city varchar(50) default null comment '市',
  confirm int(11) default null comment '累計確診',
  confirm_add int(11) default null comment '新增確診',
  heal int(11) default null comment '累計治愈',
  dead int(11) default null comment '累計死亡',
  primary key(id)
)engine=InnoDB default charset=utf8mb4;

Tomorrowthe birds will singing.

到此這篇關於Python爬取騰訊疫情實時數據並存儲到mysql數據庫的文章就介紹到這瞭,更多相關Python爬取數據存儲到mysql數據庫內容請搜索WalkonNet以前的文章或繼續瀏覽下面的相關文章希望大傢以後多多支持WalkonNet!

推薦閱讀: