python 统计MySQL大于100万的表
时间:2021-07-01 10:21:17
帮助过:7人阅读
!/usr/bin/env python3
# coding: utf-8
import pymysql
import json
conn =
pymysql.connect(
host=
"192.168.91.128",
# mysql ip地址
user=
"root",
passwd=
"root",
port=3306,
# mysql 端口号,注意:必须是int类型
connect_timeout = 3
# 超时时间
)
cur = conn.cursor()
# 创建游标
# 获取mysql中所有数据库
cur.execute(
‘SHOW DATABASES‘)
data_all = cur.fetchall()
# 获取执行的返回结果
# print(data_all)
dic = {}
# 大字典,第一层
for i
in data_all:
if i[0]
not in dic:
# 判断库名不在dic中时
# 排序列表,排除mysql自带的数据库
exclude_list = [
"sys",
"information_schema",
"mysql",
"performance_schema"]
if i[0]
not in exclude_list:
# 判断不在列表中时
# 写入第二层数据
dic[i[0]] = {
‘name‘: i[0],
‘table_list‘: []}
conn.select_db(i[0]) # 切换到指定的库中
cur.execute(
‘SHOW TABLES‘)
# 查看库中所有的表
ret = cur.fetchall()
# 获取执行结果
for j
in ret:
# 查询表的行数
cur.execute(
‘select count(1) from `%s`;‘%
j[0])
ret =
cur.fetchall()
# print(ret)
for k
in ret:
print({
‘tname‘: j[0],
‘rows‘: k[0]})
dic[i[0]][‘table_list‘].append({
‘tname‘: j[0],
‘rows‘: k[0]})
with open(‘tj.json‘,
‘w‘,encoding=
‘utf-8‘) as f:
f.write(json.dumps(dic))
View Code
三、写入excel中
直接读取tj.json文件,进行写入,完整代码如下:
#!/usr/bin/env python3
# coding: utf-8
import xlwt
import json
from collections import OrderedDict
f = xlwt.Workbook()
sheet1 = f.add_sheet(‘统计‘, cell_overwrite_ok=True)
row0 = ["库名", "表名", "行数"]
# 写第一行
for i in range(0, len(row0)):
sheet1.write(0, i, row0[i])
# 加载json文件
with open("tj.json", ‘r‘) as load_f:
load_dict = json.load(load_f) # 反序列化文件
order_dic = OrderedDict() # 有序字典
for key in sorted(load_dict): # 先对普通字典key做排序
order_dic[key] = load_dict[key] # 再写入key
num = 0 # 计数器
for i in order_dic:
# 遍历所有表
for j in order_dic[i]["table_list"]:
# 判断行数大于100万时
if j[‘rows‘] > 1000000:
# 写入库名
sheet1.write(num + 1, 0, i)
# 写入表名
sheet1.write(num + 1, 1, j[‘tname‘])
# 写入行数
sheet1.write(num + 1, 2, j[‘rows‘])
num += 1 # 自增1
f.save(‘test1.xls‘)
View Code
执行程序,打开excel文件,效果如下:
python 统计MySQL大于100万的表
标签:自增 dump mysql odi onclick stat 基础 ali img