提交 b2ffd1b2 作者: 薛凌堃

11/21

上级 f79efef3
import MySQLdb
#pip install mysqlclient -i https://mirrors.aliyun.com/pypi/simple/
import time
def check_mysql_conn(conn):
try:
conn.ping()
return True
except:
return False
if __name__ == '__main__':
conn = MySQLdb.connect(
host='localhost',
user='root',
port=3306,
password='123456',
charset="utf8",
database='test'
)
while True:
if not check_mysql_conn(conn):
conn = MySQLdb.connect(
host='localhost',
user='root',
port=3306,
password='123456',
charset="utf8",
database='test'
)
print('reconnect mysql')
time.sleep(60)
\ No newline at end of file
......@@ -9,8 +9,11 @@ from kafka import KafkaProducer
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from base.BaseCore import BaseCore
baseCore = BaseCore()
import sys
# sys.path.append('D:\\KK\\zzsn_spider\\base')
sys.path.append('D:\\kkwork\\zzsn_spider\\base')
import BaseCore
baseCore = BaseCore.BaseCore()
cnx_ = baseCore.cnx
cursor_ = baseCore.cursor
log = baseCore.getLogger()
......@@ -75,6 +78,8 @@ def checklogin(key):
# url = f'https://www.qcc.com/web/search?key=91110108558521630L'
url = f'https://www.qcc.com/web/search?key={key}'
# ip = baseCore.get_proxy()
# req = requests.get(headers=headers, url=url, proxies=ip)
req = requests.get(headers=headers, url=url)
soup = BeautifulSoup(req.content, 'html.parser')
if soup.find('title').text == '会员登录 - 企查查':
......@@ -310,12 +315,20 @@ def redaytowork(com_name,social_code,securitiesCode, securitiesShortName, listin
if not soup:
log.info("登录失效===重新放入redis")
baseCore.r.lpush('BaseInfoEnterprise:gnqy_socialCode', company_field)
token.delete_token(cookie_)
token.delete_token(id_cookie)
log.info('=====已重新放入redis,失效cookies已删除======')
time.sleep(20)
return count
else:
searchinfo = soup.find_all('div', class_='npanel-heading')[1].find('span', class_='text-danger').text
try:
searchinfo = soup.find_all('div', class_='npanel-heading')[1].find('span', class_='text-danger').text
except:
log.info("登录失效===重新放入redis")
baseCore.r.lpush('BaseInfoEnterprise:gnqy_socialCode', company_field)
token.updateTokeen(id_cookie,2)
log.info('=====已重新放入redis,cookies已封号======')
time.sleep(20)
return count
if searchinfo == '0':
log.info('=====搜索不到该企业====')
data = [com_name, social_code]
......@@ -328,14 +341,15 @@ def redaytowork(com_name,social_code,securitiesCode, securitiesShortName, listin
if spiderwork(soup, com_name, securitiesCode, securitiesShortName, listingDate, category, exchange, ynDomestic, countryName, file_name):
count += 1
log.info(f'采集{com_name}成功=======耗时{baseCore.getTimeCost(start_time, time.time())}')
token.updateTokeen(id_cookie,3)
return count
else:
return count
except Exception as e:
log.info(f'====={social_code}=====获取基本信息失败,重新放入redis=====')
baseCore.r.lpush('BaseInfoEnterprise:gnqy_social_code', company_field)
token.delete_token(cookie_)
log.info('=====已重新放入redis,失效cookies已删除======')
baseCore.r.lpush('BaseInfoEnterprise:gnqy_socialCode', company_field)
token.updateTokeen(id_cookie,2)
log.info('=====已重新放入redis,cookies已封号======')
return count
......@@ -365,6 +379,8 @@ def spiderinfo(company_url, securitiesCode, securitiesShortName, listingDate, ca
updateSql = f"update EnterpriseInfo set QCCID = '{qccid}' where SocialCode = '{social_code}'"
cursor_.execute(updateSql)
cnx_.commit()
# ip = baseCore.get_proxy()
# req_ = requests.get(headers=headers, url=company_url, proxies=ip)
req_ = requests.get(headers=headers, url=company_url)
com_soup = BeautifulSoup(req_.content, 'html.parser')
try:
......@@ -378,11 +394,7 @@ def spiderinfo(company_url, securitiesCode, securitiesShortName, listingDate, ca
try:
name = businessinfo.find('div', class_='ntag text-gray original-tag').text
value = \
businessinfo.find('div', class_='original-name-list').text.replace('展开', '').replace(' ',
'').replace(
'…',
'').replace(
'\n', '').replace('复制', '').split('(')[0]
businessinfo.find('div', class_='original-name-list').text.replace('展开', '').replace(' ', '').replace('…', '').replace('\n', '').replace('复制', '').split('(')[0]
except:
name = '曾用名'
value = ''
......@@ -430,13 +442,16 @@ def spiderinfo(company_url, securitiesCode, securitiesShortName, listingDate, ca
data_businfo[list(data_businfo.keys())[-1]] = value
else:
data_businfo[list(data_businfo.keys())[-1]] = td.text.replace('复制', '').replace('\n','').replace(' ','')
data_businfo[list(data_businfo.keys())[-1]] = td.text.replace('复制', '').replace('\n',
'').replace(
' ',
'')
result_dict = getinfo(data_businfo, data_baseinfo)
# print(result_dict)
# 采集成功的企业
data = [com_name, social_code, result_dict['统一社会信用代码']]
data = [com_name, result_dict['企业名称'], social_code, result_dict['统一社会信用代码']]
file.appenddata(file_name, '获取基本信息成功企业', data)
# 将字段转化成英文驼峰
aa_dic = dic_handle(result_dict)
......@@ -449,13 +464,13 @@ def spiderinfo(company_url, securitiesCode, securitiesShortName, listingDate, ca
aa_dic['category'] = category
aa_dic['exchange'] = exchange
print(aa_dic)
# print(aa_dic)
sendkafka(aa_dic)
else:
data_baseinfo = baseinfo(com_soup)
# 采集成功的企业
data = [com_name, social_code, data_baseinfo['统一社会信用代码']]
data = [com_name, data_baseinfo['企业名称'], social_code, data_baseinfo['统一社会信用代码']]
file.appenddata(file_name, '获取基本信息成功企业', data)
# 将字段转化成英文驼峰
aa_dic = dic_handle(data_baseinfo)
......@@ -478,9 +493,9 @@ def spiderwork(soup, receptname, securitiesCode, securitiesShortName, listingDat
tr_list = company_list.find_all('tr', class_='tsd0')
except:
log.info(f'====={social_code}=====获取基本信息失败,重新放入redis=====')
baseCore.r.lpush('BaseInfoEnterprise:gnqy_social_code', company_field)
token.delete_token(cookie_)
log.info('=====已重新放入redis,失效cookie已删除======')
baseCore.r.lpush('BaseInfoEnterprise:gnqy_socialCode', company_field)
token.updateTokeen(id_cookie,2)
log.info('=====已重新放入redis,cookies已封号======')
return False
# receptname = '小米通讯技术有限公司'
......@@ -527,17 +542,19 @@ if __name__ == '__main__':
nowtime = baseCore.getNowTime(1).replace('-', '')[:8]
file_name = f'./data/国内企业基本信息采集情况_{nowtime}.xlsx'
file.createFile(file_name)
# TODO:需要隔两个小时左右抓包修改,token从数据库中获得
cookies = token.getToken()
print(type(cookies))
if cookies:
cookieinfo = token.getToken()
id_cookie = cookieinfo[0]
cookie_ = json.loads(cookieinfo[1])
# print(type(cookies))
if cookie_:
pass
else:
log.info('==========已无cookies==========')
time.sleep(30)
continue
cookie_ = json.loads(cookies[0])
print(type(cookie_))
# cookie_ = json.loads(cookies)
# print(type(cookie_))
log.info(f"获取cookie到----{cookie_}")
headers = {
......@@ -561,8 +578,8 @@ if __name__ == '__main__':
}
start_time = time.time()
# 获取企业信息
# company_field = baseCore.redicPullData('BaseInfoEnterprise:gnqy_socialCode')
company_field = '91220101606092819L||'
company_field = baseCore.redicPullData('BaseInfoEnterprise:gnqy_socialCode')
# company_field = '91220101606092819L||'
if company_field == 'end':
# 本轮处理完毕,需要发送邮件,并且进入下一轮
baseCore.sendEmail(file_name)
......@@ -575,16 +592,18 @@ if __name__ == '__main__':
file.deleteFile(file_name)
flag = True
while flag:
time.sleep(5400)
log.info('--------已没有数据---------')
time.sleep(30)
company_field = baseCore.redicPullData('BaseInfoEnterprise:gnqy_socialCode')
if company_field:
flag = False
baseCore.r.lpush('BaseInfoEnterprise:gnqy_socialCode', company_field)
log.info("-----已添加数据------")
baseCore.r.lpush('BaseInfoEnterprise:gnqy_socialCode',company_field)
continue
continue
social_code = company_field.split('|')[0]
com_name = company_field.split('|')[2].replace(" ", "")
com_name = company_field.split('|')[2].replace(' ', '')
ynDomestic = company_field.split('|')[15]
countryName = company_field.split('|')[16]
......@@ -601,10 +620,10 @@ if __name__ == '__main__':
# category = ''
# exchange = ''
count = redaytowork(com_name, social_code, securitiesCode, securitiesShortName, listingDate, category, exchange,ynDomestic, countryName, file_name)
# baseCore.r.close()
time.sleep(40)
# break
# baseCore.r.close()
# baseCore.sendEmail(file_name)
# 信息采集完成后将该企业的采集次数更新
# runType = 'BaseInfoRunCount'
......
import os.path
import random
from openpyxl import Workbook, load_workbook
from base.BaseCore import BaseCore
......@@ -45,15 +47,31 @@ class File():
wb.close()
class Token():
#获取token
# 获取token
def getToken(self):
cursor.execute(f"select cookies from QCC_token order by update_time asc limit 1")
cursor.execute(f"select id,cookies from QCC_token where fenghao_time < DATE_SUB(NOW(), INTERVAL 2 HOUR) order by update_time asc limit 1")
# cursor.execute(f" select id, cookies from QCC_token")
# rows = cursor.fetchall()
# cnx.commit()
# if rows:
# try:
# token_info = rows[random.randint(0, len(rows) - 1)]
# token = token_info[1]
# id = token_info[0]
# return token, id
# except:
# return False, False
# else:
# # 没有查到token
# log.info("没有拿到token")
# return False, False
row = cursor.fetchall()
cnx.commit()
if row:
pass
else:
#没有查到token
# 没有查到token
log.info("没有拿到token")
return False
......@@ -65,6 +83,19 @@ class Token():
cursor.execute(deletesql)
cnx.commit()
# token的处理
def updateTokeen(self,id_token, type):
if type == 2:
# session失效,删除token
cursor.execute(f"delete from QCC_token where id={id_token}")
if type == 1:
# 封号了 修改封号时间
cursor.execute(f"update QCC_token set fenghao_time=now() where id={id_token}")
if type == 3:
# 修改使用时间
cursor.execute(f"update QCC_token set update_time=now() where id={id_token}")
cnx.commit()
class Tag():
# 删除特定属性标签
def deletep(self, soup, tag_, attribute_to_delete, value_to_delete):
......
import json
from operator import itemgetter
from itertools import groupby
files=r'D:\worknote\新文件 3.json'
# 读取JSON文件
with open(files, 'r') as file:
data = json.load(file)
data=data['facts']['us-gaap']
zb=[]
# 遍历字典的key值
for key in data.keys():
accounts=data[key]['units']
for kk in accounts.keys():
accounts=accounts[kk]
for account in accounts:
end=str(account['end'])
if end.endswith('03-31') or end.endswith('06-30') or end.endswith('09-30') or end.endswith('12-31'):
pass
else:
continue
val=str(account['val'])
fp=str(account['fp'])
form=str(account['form'])
zhibiao={
'zbname':key,
'riqi':end,
'jine':val,
'fp':fp,
'form':form,
}
zb.append(zhibiao)
#找10-K的数据
# 根据age属性对列表内容进行分类
zb.sort(key=itemgetter('form')) # 先按照age属性进行排序
forms = groupby(zb, key=itemgetter('form')) # 根据age属性进行分组
form_25_data = []
for form, zz in forms:
if '10-K' in form :
form_25_data.extend(list(zz))
# 根据age属性对列表内容进行分类
form_25_data.sort(key=itemgetter('riqi')) # 先按照age属性进行排序
groups = groupby(form_25_data, key=itemgetter('riqi')) # 根据age属性进行分组
# 遍历每个分组,并打印分类结果
for riqi, group in groups:
print(f"riqi: {riqi}")
# for item in group:
# print(item)
listbydate = [item for item in group]
print(listbydate)
print()
#补充剩余核心人员信息
#先采集天眼查id,再通过id采集核心人员信息
import datetime
import json
import requests,time,random
import pandas as pd
from bs4 import BeautifulSoup
import urllib3
from base.BaseCore import BaseCore
from getTycId import getTycIdByXYDM
baseCore = BaseCore()
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
log = baseCore.getLogger()
headers = {
'Cookie':'HWWAFSESID=b6312a4594bea18413c; HWWAFSESTIME=1686818921445; csrfToken=e7sNDKWelJwlcjnm6Rlny887; TYCID=6ff6bc600b5911ee89d35bf79a73a3b1; bannerFlag=true; ssuid=1534238432; refresh_page=0; _ga=GA1.2.1790752229.1688467828; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22307016917%22%2C%22first_id%22%3A%22188be3e337e4bf-0d85716d366e44-26031d51-1049088-188be3e337f19e%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMTg4YmUzZTMzN2U0YmYtMGQ4NTcxNmQzNjZlNDQtMjYwMzFkNTEtMTA0OTA4OC0xODhiZTNlMzM3ZjE5ZSIsIiRpZGVudGl0eV9sb2dpbl9pZCI6IjMwNzAxNjkxNyJ9%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%24identity_login_id%22%2C%22value%22%3A%22307016917%22%7D%2C%22%24device_id%22%3A%22188be3e337e4bf-0d85716d366e44-26031d51-1049088-188be3e337f19e%22%7D; jsid=SEO-BAIDU-ALL-SY-000001; bdHomeCount=7; Hm_lvt_e92c8d65d92d534b0fc290df538b4758=1693986307; tyc-user-info=%7B%22state%22%3A%220%22%2C%22vipManager%22%3A%220%22%2C%22mobile%22%3A%2213592481839%22%7D; tyc-user-info-save-time=1693986377592; auth_token=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxMzU5MjQ4MTgzOSIsImlhdCI6MTY5Mzk4NjM3NywiZXhwIjoxNjk2NTc4Mzc3fQ.xeK54nMtB5wt7ipdOjhrzdplT1azvezrTuoD1b8i3OguqMB97ZOR1pFbRsP7vsKRdZ3Fsf5Y5ZqlmRKAVHGraA; Hm_lpvt_e92c8d65d92d534b0fc290df538b4758=1693986412',
# 'Cookie': 'TYCID=82cbe530204b11ed9f23298cecec1c60; ssuid=3927938144; _ga=GA1.2.1842488970.1670638075; jsid=SEO-BAIDU-ALL-SY-000001; tyc-user-info={%22state%22:%220%22%2C%22vipManager%22:%220%22%2C%22mobile%22:%2215565837784%22}; tyc-user-info-save-time=1678953978429; auth_token=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxNTU2NTgzNzc4NCIsImlhdCI6MTY3ODk1Mzk3OCwiZXhwIjoxNjgxNTQ1OTc4fQ.wsNxLWMkZVrtOEvo_CCDPD38R7F23c5yk7dFAdHkwFPkZhEEvmiv0nlt7UD0ZWfo3t8aYxc4qvu4ueEgMubJ5g; tyc-user-phone=%255B%252215565837784%2522%255D; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22284710084%22%2C%22first_id%22%3A%22182b9ca585ead-089598c1d7f7928-26021d51-1327104-182b9ca585f7f1%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E8%87%AA%E7%84%B6%E6%90%9C%E7%B4%A2%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC%22%2C%22%24latest_referrer%22%3A%22https%3A%2F%2Fwww.baidu.com%2Flink%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfbG9naW5faWQiOiIyODQ3MTAwODQiLCIkaWRlbnRpdHlfY29va2llX2lkIjoiMTgyYjljYTU4NWVhZC0wODk1OThjMWQ3Zjc5MjgtMjYwMjFkNTEtMTMyNzEwNC0xODJiOWNhNTg1ZjdmMSJ9%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%24identity_login_id%22%2C%22value%22%3A%22284710084%22%7D%2C%22%24device_id%22%3A%22182b9ca585ead-089598c1d7f7928-26021d51-1327104-182b9ca585f7f1%22%7D; HWWAFSESID=fa776898fa88a6520ea; HWWAFSESTIME=1679899464128; csrfToken=m3cB6mHsznwIuppkT-S8oYc6; Hm_lvt_e92c8d65d92d534b0fc290df538b4758=1679016180,1679471093,1679732923,1679899468; bdHomeCount=28; bannerFlag=true; show_activity_id_92=92; searchSessionId=1679899783.48494979; Hm_lpvt_e92c8d65d92d534b0fc290df538b4758=1679899783',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36',
}
cnx_ = baseCore.cnx
cursor_ = baseCore.cursor
list_all_1 = []
list_all_2 = []
taskType = '天眼查/核心人员'
requests.adapters.DEFAULT_RETRIES = 5
def doJob():
while True:
# 根据从Redis中拿到的社会信用代码,在数据库中获取对应基本信息
social_code = baseCore.redicPullData('CorPersonEnterprise:gnqy_socialCode')
# 判断 如果Redis中已经没有数据,则等待
# social_code = '92640302MA76KNNT0D'
if 'ZZSN' in social_code:
continue
if social_code == None:
time.sleep(20)
continue
start = time.time()
try:
# data = baseCore.getInfomation(social_code)
# if len(data) != 0:
# pass
# else:
# #数据重新塞入redis
# baseCore.rePutIntoR('CorPersonEnterprise:gnqy_socialCode',social_code)
# continue
# id = data[0]
# xydm = data[2]
tycid = ''
if tycid == None or tycid == '':
try:
retData = getTycIdByXYDM(social_code)
if retData['state']:
tycid = retData['tycData']['id']
# todo:写入数据库
# updateSql = f"update EnterpriseInfo set TYCID = '{tycid}' where SocialCode = '{xydm}'"
# cursor_.execute(updateSql)
# cnx_.commit()
else:
state = 0
takeTime = baseCore.getTimeCost(start, time.time())
baseCore.recordLog(social_code, taskType, state, takeTime, '', '获取天眼查id失败')
log.info(f'======={social_code}====重新放入redis====')
baseCore.rePutIntoR('CorPersonEnterprise:gnqy_socialCode', social_code)
continue
except Exception as e:
state = 0
takeTime = baseCore.getTimeCost(start, time.time())
baseCore.recordLog(social_code, taskType, state, takeTime, '', '获取天眼查id失败')
baseCore.rePutIntoR('CorPersonEnterprise:gnqy_socialCode', social_code)
continue
# count = data[17]
log.info(f"---{social_code}----{tycid}----开始采集核心人员")
list_one_info = []
num = 1
for page in range(1,2):
t = int(time.time()*1000)
#https://capi.tianyancha.com/cloud-listed-company/listed/getHkNoRepeatSeniorExecutive?_=1692929256462&gid=209370942&pageSize=20&pageNum=1
url = f'https://capi.tianyancha.com/cloud-listed-company/listed/noRepeatSeniorExecutive?_={t}&gid={tycid}&pageSize=20&pageNum={page}'
ip = baseCore.get_proxy()
res = requests.get(url,headers=headers,proxies=ip,verify=False)
time.sleep(1)
list_all = res.json()['data']['dataList']
if list_all:
for one_info in list_all:
name = one_info['name']
sex = one_info['sex']
education = one_info['education']
position = one_info['position']
Salary = one_info['salary']
#todo:获取当前年份
now = datetime.datetime.now()
year = now.year
try:
birthYear = year - int(one_info['age'])
except:
birthYear = ''
StockKeepings = one_info['numberOfShares']
currentTerm = one_info['term']
personInfo = one_info['resume']
try:
person_img = one_info['logo']
except:
person_img = '--'
dic_json = {
"socialCreditCode":social_code,
"name":name,
"sex":sex,
"education":education,
"position":position,
"salary":Salary,
"birthYear":birthYear,
"shareNum":StockKeepings,
"shareRatio":'',
"benefitShare":'',
"currentTerm":currentTerm,
"personInfo":personInfo,
"sort":str(num)
}
dic_json_img = {
"socialCreditCode":social_code,
"name":name,
"sex":sex,
"education":education,
"position":position,
"salary":Salary,
"birthYear":birthYear,
"shareNum":StockKeepings,
"shareRatio":'',
"benefitShare":'',
"currentTerm":currentTerm,
"personInfo":personInfo,
"头像":person_img,
"sort":str(num)
}
num = num+1
list_one_info.append(dic_json)
# list_all_2.append(dic_json_img)
else:
t = int(time.time() * 1000)
url = f'https://capi.tianyancha.com/cloud-listed-company/listed/getHkNoRepeatSeniorExecutive?_={t}&gid={tycid}&pageSize=20&pageNum={page}'
ip = baseCore.get_proxy()
res = requests.get(url, headers=headers, proxies=ip, verify=False)
time.sleep(1)
list_all = res.json()['data']['dataList']
if list_all:
for one_info in list_all:
name = one_info['personal_name']
try:
sex = one_info['gender2']
except:
sex = ''
education = ''
position = one_info['position_name']
Salary = ''
birthYear = ''
personInfo = one_info['resume_cn']
dic_json = {
"socialCreditCode": social_code,
"name": name,
"sex": sex,
"education": education,
"position": position,
"salary": Salary,
"birthYear": birthYear,
"shareNum": '',
"shareRatio": '',
"benefitShare": '',
"currentTerm": '',
"personInfo": personInfo,
"sort": str(num)
}
num = num + 1
list_one_info.append(dic_json)
else:
t = int(time.time() * 1000)
url = f'https://capi.tianyancha.com/cloud-company-background/company/dim/staff?_={t}&gid={tycid}&pageSize=20&pageNum={page}'
ip = baseCore.get_proxy()
res = requests.get(url, headers=headers, proxies=ip, verify=False)
time.sleep(1)
try:
list_all = res.json()['data']['result']
except Exception as e:
log.info(res.json())
continue
# todo:增加一种情况
if list_all:
for one_info in list_all:
name = one_info['name']
try:
sex = one_info['sex']
except:
sex = ''
try:
education = one_info['education']
except:
education = ''
try:
position = one_info['typeSore']
except:
position = ''
try:
Salary = one_info['salary']
except:
Salary = ''
birthYear = ''
try:
shareRatio = one_info['percent']
except:
shareRatio = ''
try:
benefitShare = one_info['finalBenefitShares']
except:
benefitShare = ''
try:
currentTerm = one_info['term']
except:
currentTerm = ''
person_id = one_info['id']
person_url = f'https://www.tianyancha.com/human/{person_id}-c{tycid}'
person_res = requests.get(person_url, headers=headers, proxies=ip)
person_soup = BeautifulSoup(person_res.content, 'html.parser')
try:
personInfo = person_soup.find('span', {'class': '_56d0a'}).text.strip()
except:
personInfo = ''
try:
person_img = one_info['logo']
except:
person_img = '--'
dic_json = {
"socialCreditCode": social_code,
"name": name,
"sex": sex,
"education": education,
"position": position,
"salary": Salary,
"birthYear": birthYear,
"shareNum": '',
"shareRatio": shareRatio,
"benefitShare": benefitShare,
"currentTerm": currentTerm,
"personInfo": personInfo,
"sort": str(num)
}
dic_json_img = {
"socialCreditCode": social_code,
"name": name,
"sex": sex,
"education": education,
"position": position,
"salary": Salary,
"birthYear": birthYear,
"shareNum": '',
"shareRatio": shareRatio,
"benefitShare": benefitShare,
"currentTerm": '',
"personInfo": personInfo,
"头像": person_img,
"sort": str(num)
}
num = num + 1
list_one_info.append(dic_json)
json_updata = json.dumps(list_one_info)
if json_updata == '[]':
log.indo(f'---{social_code}---无高管信息---')
continue
else:
pass
response = requests.post('http://114.115.236.206:8088/sync/executive',data=json_updata,timeout=300, verify=False)
print(response.text)
log.info('=========成功======')
except Exception as e:
log.info(f'==={social_code}=====企业核心人员采集失败===重新放入redis====')
# 重新塞入redis
baseCore.rePutIntoR('CorPersonEnterprise:gnqy_socialCode', social_code)
state = 0
takeTime = baseCore.getTimeCost(start, time.time())
baseCore.recordLog(social_code, taskType, state, takeTime, '', f'获取企业信息失败--{e}')
time.sleep(5)
# break
# df_img = pd.DataFrame(list_all_2)
# df_img.to_excel('企业主要人员-头像.xlsx',index=False)
if __name__ == "__main__":
doJob()
\ No newline at end of file
......@@ -27,7 +27,7 @@ headers = {
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
'X-AUTH-TOKEN': 'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODcwMzc1MjYwMCIsImlhdCI6MTY5OTkyNTk5NywiZXhwIjoxNzAyNTE3OTk3fQ.9iXmxFEiBdu2WYa7RwdU0xKKx7v_wBe9-QipH0TNKp9Dzk_2cZK1ESsmO1o8ICrddb5sx2cl5pjOBoaaf_9Qsg',
'X-TYCID': '6f6298905d3011ee96146793e725899d',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
......@@ -74,8 +74,8 @@ def getTycIdByXYDM(xydm):
log.error(f"{xydm}------{retJsonData}")
response.close()
return retData
except:
log.error(f"---{xydm}--天眼查token失效---")
except Exception as e:
log.error(f"---{xydm}--{e}---")
return retData
......
......@@ -2,6 +2,8 @@ import json
import threading
import time
import uuid
import redis
import requests
from retry import retry
from elasticsearch import Elasticsearch
......@@ -22,7 +24,9 @@ cursor_ = cnx_.cursor()
lock = threading.Lock()
pathType = 'QYNotice/'
taskType = '企业公告/证监会'
taskType = '企业研报/东方财富网'
pool = redis.ConnectionPool(host="114.115.236.206", port=6379, password='clbzzsn', db=6)
class EsMethod(object):
......@@ -82,40 +86,73 @@ class EsMethod(object):
,body=body)
log.info('更新结果:%s' % result)
# 根据es附件id,去附件表中查询,未查询到的研报,根据title查询出附件id,更新es
# 1.根据es附件id,去附件表中查询,未查询到的研报,放入redis中 (可使用多线程)
# 2.根据title查询出附件id,更新es
def secrchATT(type_id,id,title,attid):
def secrchATT(type_id,attid):
sel_sql = '''select id from clb_sys_attachment where type_id=%s and id = %s '''
lock.acquire()
cursor_.execute(sel_sql, (type_id, title,attid))
cursor_.execute(sel_sql, (type_id, attid))
selects = cursor_.fetchone()
lock.release()
return selects
if __name__ == "__main__":
esMethod = EsMethod()
p = 200
def main(page, p, esMethod):
redis_conn = redis.Redis(connection_pool=pool)
result = esMethod.queryatt(index_name=esMethod.index_name, pnum=p)
total = result['hits']['total']['value']
page = 0
if total == 0:
log.info('++++已没有数据+++++')
return
msglist = result['hits']['hits']
print(msglist)
log.info(f'---第{page}页{len(msglist)}条数据----共{total}条数据----')
num = 0
for mms in msglist:
start_time = time.time()
id = mms['_id']
title = mms['_source']['title']
sourceAddress = mms['_source']['sourceAddress']
attid = mms['_source']['attachmentIds'][0]
createDate = mms['_source']['createDate']
log.info(f'{id}-{attid}--{title}--{sourceAddress}---')
selects = secrchATT('4',id,title,attid)
selects = secrchATT('4', attid)
if selects:
pass
else:
baseCore.r.lpush('YanBao:id', id)
\ No newline at end of file
item = id + "|" + title
if redis_conn.lrem('YanBao:id', 0, item) == 0:
redis_conn.lpush('YanBao:id', item)
else:
continue
def run_threads(num_threads,esMethod,j):
threads = []
for i in range(num_threads):
page = j + i + 1
p = j + i * 200
thread = threading.Thread(target=main, args=(page, p, esMethod))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
if __name__ == "__main__":
j = 0
for i in range(165):
esMethod = EsMethod()
# result = esMethod.queryatt(index_name=esMethod.index_name, pnum=p)
# total = result['hits']['total']['value']
# if total == 0:
# log.info('++++已没有数据+++++')
# break
start = time.time()
num_threads = 5
run_threads(num_threads, esMethod, j)
j += 1000
log.info(f'5线程 每个处理200条数据 总耗时{time.time() - start}秒')
import json
import threading
import time
import redis
import requests
from retry import retry
from elasticsearch import Elasticsearch
from base import BaseCore
baseCore = BaseCore.BaseCore()
log = baseCore.getLogger()
baseCore = BaseCore.BaseCore()
cnx_ = baseCore.cnx_
cursor_ = cnx_.cursor()
lock = threading.Lock()
pathType = 'QYNotice/'
taskType = '企业研报/东方财富网'
es = Elasticsearch(['http://114.116.19.92:9700'], http_auth=('elastic', 'zzsn9988'), timeout=300)
index_name = 'researchreportdata'
pool = redis.ConnectionPool(host="114.115.236.206", port=6379, password='clbzzsn', db=6)
def searchATT(title):
sql = "select id from clb_sys_attachment where type_id=4 and name=%s "
# lock.acquire()
cursor_.execute(sql, title+'.pdf')
selects = cursor_.fetchone()
# lock.release()
return selects
def updateaunn(index_name,id,u_attid):
body = {
'doc': {
'attachmentIds': [str(u_attid)]
}
}
result = es.update(index=index_name
,id=id
,body=body)
log.info('更新结果:%s' % result)
if __name__ == "__main__":
while True:
# 从redis中读取数据,去附件表中根据title查询,更新查到的附件id
item = baseCore.redicPullData('YanBao:id')
log.info(item)
title = item.split('|')[1]
id = item.split('|')[0]
selects = searchATT(title)
if selects:
u_attid = selects[0]
# 更新es库
updateaunn(index_name,id,u_attid)
else:
# 查询es库,通过链接上传文件到obs
baseCore.r.lpush('YanBao:up',id)
# __init__.py
__version__ = '2.2.0'
VERSION = tuple(map(int, __version__.split('.')))
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: client.py
'''
Client module for Fastdfs 3.08
author: scott yuan scottzer8@gmail.com
date: 2012-06-21
'''
import os
import sys
from fdfs_client.utils import *
from fdfs_client.tracker_client import *
from fdfs_client.storage_client import *
from fdfs_client.exceptions import *
def get_tracker_conf(conf_path='client.conf'):
cf = Fdfs_ConfigParser()
tracker = {}
try:
cf.read(conf_path)
timeout = cf.getint('__config__', 'connect_timeout')
tracker_list = cf.get('__config__', 'tracker_server')
if isinstance(tracker_list, str):
tracker_list = [tracker_list]
tracker_ip_list = []
for tr in tracker_list:
tracker_ip, tracker_port = tr.split(':')
tracker_ip_list.append(tracker_ip)
tracker['host_tuple'] = tuple(tracker_ip_list)
tracker['port'] = int(tracker_port)
tracker['timeout'] = timeout
tracker['name'] = 'Tracker Pool'
except:
raise
return tracker
class Fdfs_client(object):
'''
Class Fdfs_client implemented Fastdfs client protol ver 3.08.
It's useful upload, download, delete file to or from fdfs server, etc. It's uses
connection pool to manage connection to server.
'''
def __init__(self, trackers, poolclass=ConnectionPool):
self.trackers = trackers
self.tracker_pool = poolclass(**self.trackers)
self.timeout = self.trackers['timeout']
return None
def __del__(self):
try:
self.pool.destroy()
self.pool = None
except:
pass
def upload_by_filename(self, filename, meta_dict=None):
'''
Upload a file to Storage server.
arguments:
@filename: string, name of file that will be uploaded
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} meta_dict can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : local_file_name,
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_filename(tc, store_serv, filename, meta_dict)
def upload_by_file(self, filename, meta_dict=None):
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_file(tc, store_serv, filename, meta_dict)
def upload_by_buffer(self, filebuffer, file_ext_name=None, meta_dict=None):
'''
Upload a buffer to Storage server.
arguments:
@filebuffer: string, buffer
@file_ext_name: string, file extend name
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_buffer(tc, store_serv, filebuffer, file_ext_name, meta_dict)
def upload_slave_by_filename(self, filename, remote_file_id, prefix_name, meta_dict=None):
'''
Upload slave file to Storage server.
arguments:
@filename: string, local file name
@remote_file_id: string, remote file id
@prefix_name: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading slave)')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
if not prefix_name:
raise DataError('[-] Error: prefix_name can not be null.')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_with_group(group_name)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
try:
ret_dict = store.storage_upload_slave_by_filename(tc, store_serv, filename, prefix_name, remote_filename,
meta_dict=None)
except:
raise
ret_dict['Status'] = 'Upload slave file successed.'
return ret_dict
def upload_slave_by_file(self, filename, remote_file_id, prefix_name, meta_dict=None):
'''
Upload slave file to Storage server.
arguments:
@filename: string, local file name
@remote_file_id: string, remote file id
@prefix_name: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading slave)')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
if not prefix_name:
raise DataError('[-] Error: prefix_name can not be null.')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_with_group(group_name)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
try:
ret_dict = store.storage_upload_slave_by_file(tc, store_serv, filename, prefix_name, remote_filename,
meta_dict=None)
except:
raise
ret_dict['Status'] = 'Upload slave file successed.'
return ret_dict
def upload_slave_by_buffer(self, filebuffer, remote_file_id, meta_dict=None, file_ext_name=None):
'''
Upload slave file by buffer
arguments:
@filebuffer: string
@remote_file_id: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_slave_by_buffer(tc, store_serv, filebuffer, remote_filename, meta_dict,
file_ext_name)
def upload_appender_by_filename(self, local_filename, meta_dict=None):
'''
Upload an appender file by filename.
arguments:
@local_filename: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} Notice: it can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(uploading appender)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_filename(tc, store_serv, local_filename, meta_dict)
def upload_appender_by_file(self, local_filename, meta_dict=None):
'''
Upload an appender file by file.
arguments:
@local_filename: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} Notice: it can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(uploading appender)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_file(tc, store_serv, local_filename, meta_dict)
def upload_appender_by_buffer(self, filebuffer, file_ext_name=None, meta_dict=None):
'''
Upload a buffer to Storage server.
arguments:
@filebuffer: string
@file_ext_name: string, can be null
@meta_dict: dictionary, can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_buffer(tc, store_serv, filebuffer, meta_dict, file_ext_name)
def delete_file(self, remote_file_id):
'''
Delete a file from Storage server.
arguments:
@remote_file_id: string, file_id of file that is on storage server
@return tuple ('Delete file successed.', remote_file_id, storage_ip)
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in delete file)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_delete_file(tc, store_serv, remote_filename)
def download_to_file(self, local_filename, remote_file_id, offset=0, down_bytes=0):
'''
Download a file from Storage server.
arguments:
@local_filename: string, local name of file
@remote_file_id: string, file_id of file that is on storage server
@offset: long
@downbytes: long
@return dict {
'Remote file_id' : remote_file_id,
'Content' : local_filename,
'Download size' : downloaded_size,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in download file)')
group_name, remote_filename = tmp
if not offset:
file_offset = int(offset)
if not down_bytes:
download_bytes = int(down_bytes)
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_fetch(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_download_to_file(tc, store_serv, local_filename, file_offset, download_bytes,
remote_filename)
def download_to_buffer(self, remote_file_id, offset=0, down_bytes=0):
'''
Download a file from Storage server and store in buffer.
arguments:
@remote_file_id: string, file_id of file that is on storage server
@offset: long
@down_bytes: long
@return dict {
'Remote file_id' : remote_file_id,
'Content' : file_buffer,
'Download size' : downloaded_size,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in download file)')
group_name, remote_filename = tmp
if not offset:
file_offset = int(offset)
if not down_bytes:
download_bytes = int(down_bytes)
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_fetch(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
file_buffer = None
return store.storage_download_to_buffer(tc, store_serv, file_buffer, file_offset, download_bytes,
remote_filename)
def list_one_group(self, group_name):
'''
List one group information.
arguments:
@group_name: string, group name will be list
@return Group_info, instance
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_one_group(group_name)
def list_servers(self, group_name, storage_ip=None):
'''
List all storage servers information in a group
arguments:
@group_name: string
@return dictionary {
'Group name' : group_name,
'Servers' : server list,
}
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_servers(group_name, storage_ip)
def list_all_groups(self):
'''
List all group information.
@return dictionary {
'Groups count' : group_count,
'Groups' : list of groups
}
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_all_groups()
def get_meta_data(self, remote_file_id):
'''
Get meta data of remote file.
arguments:
@remote_fileid: string, remote file id
@return dictionary, meta data
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in get meta data)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_get_metadata(tc, store_serv, remote_filename)
def set_meta_data(self, remote_file_id, meta_dict, op_flag=STORAGE_SET_METADATA_FLAG_OVERWRITE):
'''
Set meta data of remote file.
arguments:
@remote_file_id: string
@meta_dict: dictionary
@op_flag: char, 'O' for overwrite, 'M' for merge
@return dictionary {
'Status' : status,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in set meta data)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
try:
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
status = store.storage_set_metadata(tc, store_serv, remote_filename, meta_dict)
except (ConnectionError, ResponseError, DataError):
raise
# if status == 2:
# raise DataError('[-] Error: remote file %s is not exist.' % remote_file_id)
if status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
ret_dict = {}
ret_dict['Status'] = 'Set meta data success.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def append_by_filename(self, local_filename, remote_fileid):
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(append)')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_filename(tc, store_serv, local_filename, appended_filename)
def append_by_file(self, local_filename, remote_fileid):
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(append)')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_file(tc, store_serv, local_filename, appended_filename)
def append_by_buffer(self, file_buffer, remote_fileid):
if not file_buffer:
raise DataError('[-] Error: file_buffer can not be null.')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_buffer(tc, store_serv, file_buffer, appended_filename)
def truncate_file(self, truncated_filesize, appender_fileid):
'''
Truncate file in Storage server.
arguments:
@truncated_filesize: long
@appender_fileid: remote_fileid
@return: dictionary {
'Status' : 'Truncate successed.',
'Storage IP' : storage_ip
}
'''
trunc_filesize = int(truncated_filesize)
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: appender_fileid is invalid.(truncate)')
group_name, appender_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_truncate_file(tc, store_serv, trunc_filesize, appender_filename)
def modify_by_filename(self, filename, appender_fileid, offset=0):
'''
Modify a file in Storage server by file.
arguments:
@filename: string, local file name
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(modify)')
filesize = os.stat(filename).st_size
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_filename(tc, store_serv, filename, file_offset, filesize, appender_filename)
def modify_by_file(self, filename, appender_fileid, offset=0):
'''
Modify a file in Storage server by file.
arguments:
@filename: string, local file name
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(modify)')
filesize = os.stat(filename).st_size
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_file(tc, store_serv, filename, file_offset, filesize, appender_filename)
def modify_by_buffer(self, filebuffer, appender_fileid, offset=0):
'''
Modify a file in Storage server by buffer.
arguments:
@filebuffer: string, file buffer
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
if not filebuffer:
raise DataError('[-] Error: filebuffer can not be null.(modify)')
filesize = len(filebuffer)
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_buffer(tc, store_serv, filebuffer, file_offset, filesize, appender_filename)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: connection.py
import socket
import os
import sys
import time
import random
from itertools import chain
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# start class Connection
class Connection(object):
'''Manage TCP comunication to and from Fastdfs Server.'''
def __init__(self, **conn_kwargs):
self.pid = os.getpid()
self.host_tuple = conn_kwargs['host_tuple']
self.remote_port = conn_kwargs['port']
self.remote_addr = None
self.timeout = conn_kwargs['timeout']
self._sock = None
def __del__(self):
try:
self.disconnect()
except:
pass
def connect(self):
'''Connect to fdfs server.'''
if self._sock:
return
try:
sock = self._connect()
except socket.error as e:
raise ConnectionError(self._errormessage(e))
self._sock = sock
# print '[+] Create a connection success.'
# print '\tLocal address is %s:%s.' % self._sock.getsockname()
# print '\tRemote address is %s:%s' % (self.remote_addr, self.remote_port)
def _connect(self):
'''Create TCP socket. The host is random one of host_tuple.'''
self.remote_addr = random.choice(self.host_tuple)
# print '[+] Connecting... remote: %s:%s' % (self.remote_addr, self.remote_port)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.settimeout(self.timeout)
sock = socket.create_connection((self.remote_addr, self.remote_port), self.timeout)
return sock
def disconnect(self):
'''Disconnect from fdfs server.'''
if self._sock is None:
return
try:
self._sock.close()
except socket.error as e:
raise ConnectionError(self._errormessage(e))
self._sock = None
def get_sock(self):
return self._sock
def _errormessage(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message" '''
if len(exception.args) == 1:
return "[-] Error: connect to %s:%s. %s." % (self.remote_addr, self.remote_port, exception.args[0])
else:
return "[-] Error: %s connect to %s:%s. %s." % \
(exception.args[0], self.remote_addr, self.remote_port, exception.args[1])
# end class Connection
# start ConnectionPool
class ConnectionPool(object):
'''Generic Connection Pool'''
def __init__(self, name='', conn_class=Connection,
max_conn=None, **conn_kwargs):
self.pool_name = name
self.pid = os.getpid()
self.conn_class = conn_class
self.max_conn = max_conn or 2 ** 31
self.conn_kwargs = conn_kwargs
self._conns_created = 0
self._conns_available = []
self._conns_inuse = set()
# print '[+] Create a connection pool success, name: %s.' % self.pool_name
def _check_pid(self):
if self.pid != os.getpid():
self.destroy()
self.__init__(self.conn_class, self.max_conn, **self.conn_kwargs)
def make_conn(self):
'''Create a new connection.'''
if self._conns_created >= self.max_conn:
raise ConnectionError('[-] Error: Too many connections.')
num_try = 10
while True:
try:
if num_try <= 0:
sys.exit()
conn_instance = self.conn_class(**self.conn_kwargs)
conn_instance.connect()
self._conns_created += 1
break
except ConnectionError as e:
print(e)
num_try -= 1
conn_instance = None
return conn_instance
def get_connection(self):
'''Get a connection from pool.'''
self._check_pid()
try:
conn = self._conns_available.pop()
# print '[+] Get a connection from pool %s.' % self.pool_name
# print '\tLocal address is %s:%s.' % conn._sock.getsockname()
# print '\tRemote address is %s:%s' % (conn.remote_addr, conn.remote_port)
except IndexError:
conn = self.make_conn()
self._conns_inuse.add(conn)
return conn
def remove(self, conn):
'''Remove connection from pool.'''
if conn in self._conns_inuse:
self._conns_inuse.remove(conn)
self._conns_created -= 1
if conn in self._conns_available:
self._conns_available.remove(conn)
self._conns_created -= 1
def destroy(self):
'''Disconnect all connections in the pool.'''
all_conns = chain(self._conns_inuse, self._conns_available)
for conn in all_conns:
conn.disconnect()
# print '[-] Destroy connection pool %s.' % self.pool_name
def release(self, conn):
'''Release the connection back to the pool.'''
self._check_pid()
if conn.pid == self.pid:
self._conns_inuse.remove(conn)
self._conns_available.append(conn)
# print '[-] Release connection back to pool %s.' % self.pool_name
# end ConnectionPool class
def tcp_recv_response(conn, bytes_size, buffer_size=4096):
'''Receive response from server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_size: int, will be received byte_stream size
@buffer_size: int, receive buffer size
@Return: tuple,(response, received_size)
'''
recv_buff = []
total_size = 0
try:
while bytes_size > 0:
resp = conn._sock.recv(buffer_size)
recv_buff.append(resp)
total_size += len(resp)
bytes_size -= len(resp)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while reading from socket: (%s)' % e.args)
return (b''.join(recv_buff), total_size)
def tcp_send_data(conn, bytes_stream):
'''Send buffer to server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_stream: trasmit buffer
@Return bool
'''
try:
conn._sock.sendall(bytes_stream)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while writting to socket: (%s)' % e.args)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: exceptions.py
'''Core exceptions raised by fdfs client'''
class FDFSError(Exception):
pass
class ConnectionError(FDFSError):
pass
class ResponseError(FDFSError):
pass
class InvaildResponse(FDFSError):
pass
class DataError(FDFSError):
pass
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: fdfs_protol.py
import struct
import socket
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# define FDFS protol constans
TRACKER_PROTO_CMD_STORAGE_JOIN = 81
FDFS_PROTO_CMD_QUIT = 82
TRACKER_PROTO_CMD_STORAGE_BEAT = 83 # storage heart beat
TRACKER_PROTO_CMD_STORAGE_REPORT_DISK_USAGE = 84 # report disk usage
TRACKER_PROTO_CMD_STORAGE_REPLICA_CHG = 85 # repl new storage servers
TRACKER_PROTO_CMD_STORAGE_SYNC_SRC_REQ = 86 # src storage require sync
TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_REQ = 87 # dest storage require sync
TRACKER_PROTO_CMD_STORAGE_SYNC_NOTIFY = 88 # sync done notify
TRACKER_PROTO_CMD_STORAGE_SYNC_REPORT = 89 # report src last synced time as dest server
TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_QUERY = 79 # dest storage query sync src storage server
TRACKER_PROTO_CMD_STORAGE_REPORT_IP_CHANGED = 78 # storage server report it's ip changed
TRACKER_PROTO_CMD_STORAGE_CHANGELOG_REQ = 77 # storage server request storage server's changelog
TRACKER_PROTO_CMD_STORAGE_REPORT_STATUS = 76 # report specified storage server status
TRACKER_PROTO_CMD_STORAGE_PARAMETER_REQ = 75 # storage server request parameters
TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FREE = 74 # storage report trunk free space
TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FID = 73 # storage report current trunk file id
TRACKER_PROTO_CMD_STORAGE_FETCH_TRUNK_FID = 72 # storage get current trunk file id
TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_START = 61 # start of tracker get system data files
TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_END = 62 # end of tracker get system data files
TRACKER_PROTO_CMD_TRACKER_GET_ONE_SYS_FILE = 63 # tracker get a system data file
TRACKER_PROTO_CMD_TRACKER_GET_STATUS = 64 # tracker get status of other tracker
TRACKER_PROTO_CMD_TRACKER_PING_LEADER = 65 # tracker ping leader
TRACKER_PROTO_CMD_TRACKER_NOTIFY_NEXT_LEADER = 66 # notify next leader to other trackers
TRACKER_PROTO_CMD_TRACKER_COMMIT_NEXT_LEADER = 67 # commit next leader to other trackers
TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP = 90
TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS = 91
TRACKER_PROTO_CMD_SERVER_LIST_STORAGE = 92
TRACKER_PROTO_CMD_SERVER_DELETE_STORAGE = 93
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE = 101
TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE = 102
TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE = 103
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE = 104
TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL = 105
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL = 106
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL = 107
TRACKER_PROTO_CMD_RESP = 100
FDFS_PROTO_CMD_ACTIVE_TEST = 111 # active test, tracker and storage both support since V1.28
STORAGE_PROTO_CMD_REPORT_CLIENT_IP = 9 # ip as tracker client
STORAGE_PROTO_CMD_UPLOAD_FILE = 11
STORAGE_PROTO_CMD_DELETE_FILE = 12
STORAGE_PROTO_CMD_SET_METADATA = 13
STORAGE_PROTO_CMD_DOWNLOAD_FILE = 14
STORAGE_PROTO_CMD_GET_METADATA = 15
STORAGE_PROTO_CMD_SYNC_CREATE_FILE = 16
STORAGE_PROTO_CMD_SYNC_DELETE_FILE = 17
STORAGE_PROTO_CMD_SYNC_UPDATE_FILE = 18
STORAGE_PROTO_CMD_SYNC_CREATE_LINK = 19
STORAGE_PROTO_CMD_CREATE_LINK = 20
STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE = 21
STORAGE_PROTO_CMD_QUERY_FILE_INFO = 22
STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE = 23 # create appender file
STORAGE_PROTO_CMD_APPEND_FILE = 24 # append file
STORAGE_PROTO_CMD_SYNC_APPEND_FILE = 25
STORAGE_PROTO_CMD_FETCH_ONE_PATH_BINLOG = 26 # fetch binlog of one store path
STORAGE_PROTO_CMD_RESP = TRACKER_PROTO_CMD_RESP
STORAGE_PROTO_CMD_UPLOAD_MASTER_FILE = STORAGE_PROTO_CMD_UPLOAD_FILE
STORAGE_PROTO_CMD_TRUNK_ALLOC_SPACE = 27 # since V3.00
STORAGE_PROTO_CMD_TRUNK_ALLOC_CONFIRM = 28 # since V3.00
STORAGE_PROTO_CMD_TRUNK_FREE_SPACE = 29 # since V3.00
STORAGE_PROTO_CMD_TRUNK_SYNC_BINLOG = 30 # since V3.00
STORAGE_PROTO_CMD_TRUNK_GET_BINLOG_SIZE = 31 # since V3.07
STORAGE_PROTO_CMD_TRUNK_DELETE_BINLOG_MARKS = 32 # since V3.07
STORAGE_PROTO_CMD_TRUNK_TRUNCATE_BINLOG_FILE = 33 # since V3.07
STORAGE_PROTO_CMD_MODIFY_FILE = 34 # since V3.08
STORAGE_PROTO_CMD_SYNC_MODIFY_FILE = 35 # since V3.08
STORAGE_PROTO_CMD_TRUNCATE_FILE = 36 # since V3.08
STORAGE_PROTO_CMD_SYNC_TRUNCATE_FILE = 37 # since V3.08
# for overwrite all old metadata
STORAGE_SET_METADATA_FLAG_OVERWRITE = 'O'
STORAGE_SET_METADATA_FLAG_OVERWRITE_STR = "O"
# for replace, insert when the meta item not exist, otherwise update it
STORAGE_SET_METADATA_FLAG_MERGE = 'M'
STORAGE_SET_METADATA_FLAG_MERGE_STR = "M"
FDFS_RECORD_SEPERATOR = '\x01'
FDFS_FIELD_SEPERATOR = '\x02'
# common constants
FDFS_GROUP_NAME_MAX_LEN = 16
IP_ADDRESS_SIZE = 16
FDFS_PROTO_PKG_LEN_SIZE = 8
FDFS_PROTO_CMD_SIZE = 1
FDFS_PROTO_STATUS_SIZE = 1
FDFS_PROTO_IP_PORT_SIZE = (IP_ADDRESS_SIZE + 6)
FDFS_MAX_SERVERS_EACH_GROUP = 32
FDFS_MAX_GROUPS = 512
FDFS_MAX_TRACKERS = 16
FDFS_DOMAIN_NAME_MAX_LEN = 128
FDFS_MAX_META_NAME_LEN = 64
FDFS_MAX_META_VALUE_LEN = 256
FDFS_FILE_PREFIX_MAX_LEN = 16
FDFS_LOGIC_FILE_PATH_LEN = 10
FDFS_TRUE_FILE_PATH_LEN = 6
FDFS_FILENAME_BASE64_LENGTH = 27
FDFS_TRUNK_FILE_INFO_LEN = 16
FDFS_FILE_EXT_NAME_MAX_LEN = 6
FDFS_SPACE_SIZE_BASE_INDEX = 2 # storage space size based (MB)
FDFS_UPLOAD_BY_BUFFER = 1
FDFS_UPLOAD_BY_FILENAME = 2
FDFS_UPLOAD_BY_FILE = 3
FDFS_DOWNLOAD_TO_BUFFER = 1
FDFS_DOWNLOAD_TO_FILE = 2
FDFS_NORMAL_LOGIC_FILENAME_LENGTH = (
FDFS_LOGIC_FILE_PATH_LEN + FDFS_FILENAME_BASE64_LENGTH + FDFS_FILE_EXT_NAME_MAX_LEN + 1)
FDFS_TRUNK_FILENAME_LENGTH = (
FDFS_TRUE_FILE_PATH_LEN + FDFS_FILENAME_BASE64_LENGTH + FDFS_TRUNK_FILE_INFO_LEN + 1 + FDFS_FILE_EXT_NAME_MAX_LEN)
FDFS_TRUNK_LOGIC_FILENAME_LENGTH = (FDFS_TRUNK_FILENAME_LENGTH + (FDFS_LOGIC_FILE_PATH_LEN - FDFS_TRUE_FILE_PATH_LEN))
FDFS_VERSION_SIZE = 6
TRACKER_QUERY_STORAGE_FETCH_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE)
TRACKER_QUERY_STORAGE_STORE_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE + 1)
# status code, order is important!
FDFS_STORAGE_STATUS_INIT = 0
FDFS_STORAGE_STATUS_WAIT_SYNC = 1
FDFS_STORAGE_STATUS_SYNCING = 2
FDFS_STORAGE_STATUS_IP_CHANGED = 3
FDFS_STORAGE_STATUS_DELETED = 4
FDFS_STORAGE_STATUS_OFFLINE = 5
FDFS_STORAGE_STATUS_ONLINE = 6
FDFS_STORAGE_STATUS_ACTIVE = 7
FDFS_STORAGE_STATUS_RECOVERY = 9
FDFS_STORAGE_STATUS_NONE = 99
class Storage_server(object):
'''Class storage server for upload.'''
def __init__(self):
self.ip_addr = None
self.port = None
self.group_name = ''
self.store_path_index = 0
# Class tracker_header
class Tracker_header(object):
'''
Class for Pack or Unpack tracker header
struct tracker_header{
char pkg_len[FDFS_PROTO_PKG_LEN_SIZE],
char cmd,
char status,
}
'''
def __init__(self):
self.fmt = '!QBB' # pkg_len[FDFS_PROTO_PKG_LEN_SIZE] + cmd + status
self.st = struct.Struct(self.fmt)
self.pkg_len = 0
self.cmd = 0
self.status = 0
def _pack(self, pkg_len=0, cmd=0, status=0):
return self.st.pack(pkg_len, cmd, status)
def _unpack(self, bytes_stream):
self.pkg_len, self.cmd, self.status = self.st.unpack(bytes_stream)
return True
def header_len(self):
return self.st.size
def send_header(self, conn):
'''Send Tracker header to server.'''
header = self._pack(self.pkg_len, self.cmd, self.status)
try:
conn._sock.sendall(header)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while writting to socket: %s' % (e.args,))
def recv_header(self, conn):
'''Receive response from server.
if sucess, class member (pkg_len, cmd, status) is response.
'''
try:
header = conn._sock.recv(self.header_len())
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while reading from socket: %s' % (e.args,))
self._unpack(header)
def fdfs_pack_metadata(meta_dict):
ret = ''
for key in meta_dict:
ret += '%s%c%s%c' % (key, FDFS_FIELD_SEPERATOR, meta_dict[key], FDFS_RECORD_SEPERATOR)
return ret[0:-1]
def fdfs_unpack_metadata(bytes_stream):
li = bytes_stream.split(FDFS_RECORD_SEPERATOR)
return dict([item.split(FDFS_FIELD_SEPERATOR) for item in li])
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: fdfs_test.py
import os
import sys
import time
try:
from fdfs_client.client import *
from fdfs_client.exceptions import *
except ImportError:
import_path = os.path.abspath('../')
sys.path.append(import_path)
from fdfs_client.client import *
from fdfs_client.exceptions import *
def usage():
s = 'Usage: python fdfs_test.py {options} [{local_filename} [{remote_file_id}]]\n'
s += 'options: upfile, upbuffer, downfile, downbuffer, delete, listgroup, listserv\n'
s += ' upslavefile, upslavebuffer, upappendfile, upappendbuffer\n'
s += '\tupfile {local_filename}\n'
s += '\tupbuffer {local_filename}\n'
s += '\tdownfile {local_filename} {remote_file_id}\n'
s += '\tdownbuffer {remote_file_id}\n'
s += '\tdelete {remote_file_id}\n'
s += '\tlistgroup {group_name}\n'
s += '\tlistall \n'
s += '\tlistsrv {group_name} [storage_ip]\n'
s += '\tsetmeta {remote_file_id}\n'
s += '\tgetmeta {remote_file_id}\n'
s += '\tupslavefile {local_filename} {remote_fileid} {prefix_name}\n'
s += '\tupappendfile {local_filename}\n'
s += '\ttruncate {truncate_filesize} {remote_fileid}\n'
s += '\tmodifyfile {local_filename} {remote_fileid} {file_offset}\n'
s += '\tmodifybuffer {local_filename} {remote_fileid} {file_offset}\n'
s += 'e.g.: python fdfs_test.py upfile test'
print(s)
sys.exit(0)
if len(sys.argv) < 2:
usage()
client = Fdfs_client('client.conf')
def upfile_func():
# Upload by filename
# usage: python fdfs_test.py upfile {local_filename}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
file_size = os.stat(local_filename).st_size
# meta_buffer can be null.
meta_dict = {
'ext_name': 'py',
'file_size': str(file_size) + 'B'
}
t1 = time.time()
ret_dict = client.upload_by_filename(local_filename, meta_dict)
t2 = time.time()
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
print('[+] time consume: %fs' % (t2 - t1))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upfileex_func():
# Upload by file
# usage: python fdfs_test.py upfileex {local_filename}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
t1 = time.time()
ret_dict = client.upload_by_file(local_filename)
t2 = time.time()
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
print('[+] time consume: %fs' % (t2 - t1))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upslavefile_func():
# upload slave file
# usage: python fdfs_test.py upslavefile {local_filename} {remote_fileid} {prefix_name}
if len(sys.argv) < 5:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
prefix_name = sys.argv[4]
ret_dict = client.upload_slave_by_file(local_filename, remote_fileid, \
prefix_name)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upslavebuffer_func():
# upload slave by buffer
# usage: python fdfs_test.py upslavebuffer {local_filename} {remote_fileid} {prefix_name}
if len(sys.argv) < 5:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
prefix_name = sys.argv[4]
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.upload_slave_by_buffer(local_filename, \
remote_fileid, prefix_name)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def del_func():
# delete file
# usage: python fdfs_test.py delete {remote_fileid}
if len(sys.argv) < 3:
usage()
return None
try:
remote_file_id = sys.argv[2]
ret_tuple = client.delete_file(remote_file_id)
print('[+] %s' % ret_tuple[0])
print('[+] remote_fileid: %s' % ret_tuple[1])
print('[+] Storage IP: %s' % ret_tuple[2])
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def downfile_func():
# Download to file
# usage: python fdfs_test.py downfile {local_filename} {remote_fileid}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
ret_dict = client.download_to_file(local_filename, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def list_group_func():
# List one group info
# usage: python fdfs_test.py listgroup {group_name}
if len(sys.argv) < 3:
usage()
return None
try:
group_name = sys.argv[2]
ret = client.list_one_group(group_name)
print(ret)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def listall_func():
# List all group info
# usage: python fdfs_test.py listall
if len(sys.argv) < 2:
usage()
return None
try:
ret_dict = client.list_all_groups()
print('=' * 80)
print('Groups count:', ret_dict['Groups count'])
for li in ret_dict['Groups']:
print('-' * 80)
print(li)
print('-' * 80)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def list_server_func():
# List all servers info of group
# usage: python fdfs_test.py listsrv {group_name} [storage_ip]
if len(sys.argv) < 3:
usage()
return None
try:
group_name = sys.argv[2]
if len(sys.argv) > 3:
storage_ip = sys.argv[3]
else:
storage_ip = None
ret_dict = client.list_servers(group_name, storage_ip)
print('=' * 80)
print('Group name: %s' % ret_dict['Group name'])
print('=' * 80)
i = 1
for serv in ret_dict['Servers']:
print('Storage server %d:' % i)
print('=' * 80)
print(serv)
i += 1
print('=' * 80)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upbuffer_func():
# Upload by buffer
# usage: python fdfs_test.py upbuffer {local_filename} [remote_file_ext_name]
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
if len(sys.argv) > 3:
ext_name = sys.argv[3]
else:
ext_name = None
# meta_buffer can be null.
meta_buffer = {
'ext_name': 'gif',
'width': '150px',
'height': '80px'
}
try:
with open(local_filename, 'rb') as f:
file_buffer = f.read()
ret_dict = client.upload_by_buffer(file_buffer, ext_name, meta_buffer)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def downbuffer_func():
# Download to buffer
# usage: python fdfs_test.py downbuffer {remote_file_id}
# e.g.: 'group1/M00/00/00/wKjzhU_rLNmjo2-1AAAamGDONEA5818.py'
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
try:
ret_dict = client.download_to_buffer(remote_fileid)
print('Downloaded content:')
print(ret_dict['Content'])
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def get_meta_data_func():
# Get meta data of remote file
# usage python fdfs_test.py getmeta {remote_file_id}
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
try:
ret_dict = client.get_meta_data(remote_fileid)
print(ret_dict)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def set_meta_data_func():
# Set meta data of remote file
# usage python fdfs_test.py setmeta {remote_file_id}
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
meta_dict = {
'ext_name': 'jgp',
'width': '160px',
'hight': '80px',
}
try:
ret_dict = client.set_meta_data(remote_fileid, meta_dict)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upappendfile_func():
# Upload an appender file by filename
# usage: python fdfs_test.py upappendfile {local_filename}
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
try:
ret_dict = client.upload_appender_by_file(local_filename)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upappendbuffer_func():
# Upload an appender file by buffer
# usage: python fdfs_test.py upappendbuffer {local_filename}
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
try:
with open(local_filename, 'rb') as f:
file_buffer = f.read()
ret_dict = client.upload_appender_by_buffer(file_buffer)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def appendfile_func():
# Append a remote file
# usage: python fdfs_test.py appendfile {local_filename} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
try:
ret_dict = client.append_by_file(local_filename, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def appendbuffer_func():
# Append a remote file by buffer
# usage: python fdfs_test.py appendbuffer {local_filename} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
try:
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.append_by_buffer(filebuffer, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def truncate_func():
# Truncate file
# usage: python fdfs_test.py truncate {truncate_filesize} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
truncate_filesize = int(sys.argv[2])
remote_fileid = sys.argv[3]
try:
ret_dict = client.truncate_file(truncate_filesize, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def modifyfile_func():
# Modify file by filename
# usage: python fdfs_test.py modifyfile {local_filename} {remote_fileid} [file_offset]
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
if len(sys.argv) > 4:
file_offset = int(sys.argv[4])
else:
file_offset = 0
try:
ret_dict = client.modify_by_filename(local_filename, remote_fileid, file_offset)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def modifybuffer_func():
# Modify file by buffer
# usage: python fdfs_test.py modifybuffer {local_filename} {remote_fileid} [file_offset]
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
if len(sys.argv) > 4:
file_offset = int(sys.argv[4])
else:
file_offset = 0
try:
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.modify_by_buffer(filebuffer, remote_fileid, file_offset)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
result = {
'upfile': lambda: upfile_func(),
'upfileex': lambda: upfileex_func(),
'upbuffer': lambda: upbuffer_func(),
'delete': lambda: del_func(),
'downfile': lambda: downfile_func(),
'downbuffer': lambda: downbuffer_func(),
'listgroup': lambda: list_group_func(),
'listall': lambda: listall_func(),
'listsrv': lambda: list_server_func(),
'getmeta': lambda: get_meta_data_func(),
'setmeta': lambda: set_meta_data_func(),
'upslavefile': lambda: upslavefile_func(),
'upappendfile': lambda: upappendfile_func(),
'upappendbuffer': lambda: upappendbuffer_func(),
'appendfile': lambda: appendfile_func(),
'appendbuffer': lambda: appendbuffer_func(),
'truncate': lambda: truncate_func(),
'modifyfile': lambda: modifyfile_func(),
'modifybuffer': lambda: modifybuffer_func(),
'-h': lambda: usage(),
}[sys.argv[1].lower()]()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: storage_client.py
import os
import stat
import errno
import struct
import socket
import datetime
import platform
from fdfs_client.fdfs_protol import *
from fdfs_client.connection import *
# from test_fdfs.sendfile import *
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
from fdfs_client.utils import *
__os_sep__ = "/" if platform.system() == 'Windows' else os.sep
def tcp_send_file(conn, filename, buffer_size=1024):
'''
Send file to server, and split into multiple pkgs while sending.
arguments:
@conn: connection
@filename: string
@buffer_size: int ,send buffer size
@Return int: file size if success else raise ConnectionError.
'''
file_size = 0
with open(filename, 'rb') as f:
while 1:
try:
send_buffer = f.read(buffer_size)
send_size = len(send_buffer)
if send_size == 0:
break
tcp_send_data(conn, send_buffer)
file_size += send_size
except ConnectionError as e:
raise ConnectionError('[-] Error while uploading file(%s).' % e.args)
except IOError as e:
raise DataError('[-] Error while reading local file(%s).' % e.args)
return file_size
def tcp_send_file_ex(conn, filename, buffer_size=4096):
'''
Send file to server. Using linux system call 'sendfile'.
arguments:
@conn: connection
@filename: string
@return long, sended size
'''
if 'linux' not in sys.platform.lower():
raise DataError('[-] Error: \'sendfile\' system call only available on linux.')
nbytes = 0
offset = 0
sock_fd = conn.get_sock().fileno()
with open(filename, 'rb') as f:
in_fd = f.fileno()
while 1:
try:
pass
# sent = sendfile(sock_fd, in_fd, offset, buffer_size)
# if 0 == sent:
# break
# nbytes += sent
# offset += sent
except OSError as e:
if e.errno == errno.EAGAIN:
continue
raise
return nbytes
def tcp_recv_file(conn, local_filename, file_size, buffer_size=1024):
'''
Receive file from server, fragmented it while receiving and write to disk.
arguments:
@conn: connection
@local_filename: string
@file_size: int, remote file size
@buffer_size: int, receive buffer size
@Return int: file size if success else raise ConnectionError.
'''
total_file_size = 0
flush_size = 0
remain_bytes = file_size
with open(local_filename, 'wb+') as f:
while remain_bytes > 0:
try:
if remain_bytes >= buffer_size:
file_buffer, recv_size = tcp_recv_response(conn, buffer_size, buffer_size)
else:
file_buffer, recv_size = tcp_recv_response(conn, remain_bytes, buffer_size)
f.write(file_buffer)
remain_bytes -= buffer_size
total_file_size += recv_size
flush_size += recv_size
if flush_size >= 4096:
f.flush()
flush_size = 0
except ConnectionError as e:
raise ConnectionError('[-] Error: while downloading file(%s).' % e.args)
except IOError as e:
raise DataError('[-] Error: while writting local file(%s).' % e.args)
return total_file_size
class Storage_client(object):
'''
The Class Storage_client for storage server.
Note: argument host_tuple of storage server ip address, that should be a single element.
'''
def __init__(self, *kwargs):
conn_kwargs = {
'name': 'Storage Pool',
'host_tuple': (kwargs[0],),
'port': kwargs[1],
'timeout': kwargs[2]
}
self.pool = ConnectionPool(**conn_kwargs)
return None
def __del__(self):
try:
self.pool.destroy()
self.pool = None
except:
pass
def update_pool(self, old_store_serv, new_store_serv, timeout=30):
'''
Update connection pool of storage client.
We need update connection pool of storage client, while storage server is changed.
but if server not changed, we do nothing.
'''
if old_store_serv.ip_addr == new_store_serv.ip_addr:
return None
self.pool.destroy()
conn_kwargs = {
'name': 'Storage_pool',
'host_tuple': (new_store_serv.ip_addr,),
'port': new_store_serv.port,
'timeout': timeout
}
self.pool = ConnectionPool(**conn_kwargs)
return True
def _storage_do_upload_file(self, tracker_client, store_serv, file_buffer, file_size=None, upload_type=None,
meta_dict=None, cmd=None, master_filename=None, prefix_name=None, file_ext_name=None):
'''
core of upload file.
arguments:
@tracker_client: Tracker_client, it is useful connect to tracker server
@store_serv: Storage_server, it is return from query tracker server
@file_buffer: string, file name or file buffer for send
@file_size: int
@upload_type: int, optional: FDFS_UPLOAD_BY_FILE, FDFS_UPLOAD_BY_FILENAME,
FDFS_UPLOAD_BY_BUFFER
@meta_dic: dictionary, store metadata in it
@cmd: int, reference fdfs protol
@master_filename: string, useful upload slave file
@prefix_name: string
@file_ext_name: string
@Return dictionary
{
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : status,
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
}
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
master_filename_len = len(master_filename) if master_filename else 0
prefix_name_len = len(prefix_name) if prefix_name else 0
upload_slave = len(store_serv.group_name) and master_filename_len
file_ext_name = str(file_ext_name) if file_ext_name else ''
# non_slave_fmt |-store_path_index(1)-file_size(8)-file_ext_name(6)-|
non_slave_fmt = '!B Q %ds' % FDFS_FILE_EXT_NAME_MAX_LEN
# slave_fmt |-master_len(8)-file_size(8)-prefix_name(16)-file_ext_name(6)
# -master_name(master_filename_len)-|
slave_fmt = '!Q Q %ds %ds %ds' % (FDFS_FILE_PREFIX_MAX_LEN, FDFS_FILE_EXT_NAME_MAX_LEN, master_filename_len)
th.pkg_len = struct.calcsize(slave_fmt) if upload_slave else struct.calcsize(non_slave_fmt)
th.pkg_len += file_size
th.cmd = cmd
th.send_header(store_conn)
if upload_slave:
send_buffer = struct.pack(
slave_fmt, master_filename_len, file_size, prefix_name, file_ext_name, master_filename)
else:
send_buffer = struct.pack(non_slave_fmt, store_serv.store_path_index, file_size, file_ext_name.encode())
try:
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
send_file_size = tcp_send_file(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
send_file_size = tcp_send_file_ex(store_conn, file_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
if recv_size <= FDFS_GROUP_NAME_MAX_LEN:
errmsg = '[-] Error: Storage response length is not match, '
errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
# recv_fmt: |-group_name(16)-remote_file_name(recv_size - 16)-|
recv_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, th.pkg_len - FDFS_GROUP_NAME_MAX_LEN)
(group_name, remote_name) = struct.unpack(recv_fmt, recv_buffer)
remote_filename = remote_name.strip(b'\x00')
if meta_dict and len(meta_dict) > 0:
status = self.storage_set_metadata(tracker_client, store_serv, remote_filename, meta_dict)
if status != 0:
# rollback
self.storage_delete_file(tracker_client, store_serv, remote_filename)
raise DataError('[-] Error: %d, %s' % (status, os.strerror(status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dic = {
'Group name': group_name.strip(b'\x00'),
'Remote file_id': group_name.strip(b'\x00') + __os_sep__.encode() + remote_filename,
'Status': 'Upload successed.',
'Local file name': file_buffer if (upload_type == FDFS_UPLOAD_BY_FILENAME
or upload_type == FDFS_UPLOAD_BY_FILE
) else '',
'Uploaded size': appromix(send_file_size) if (upload_type == FDFS_UPLOAD_BY_FILENAME
or upload_type == FDFS_UPLOAD_BY_FILE
) else appromix(len(file_buffer)),
'Storage IP': store_serv.ip_addr
}
return ret_dic
def storage_upload_by_filename(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_by_file(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_by_buffer(self, tracker_client, store_serv, file_buffer, file_ext_name=None, meta_dict=None):
buffer_size = len(file_buffer)
return self._storage_do_upload_file(tracker_client, store_serv, file_buffer, buffer_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_slave_by_filename(self, tracker_client, store_serv, filename, prefix_name, remote_filename,
meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, remote_filename,
prefix_name, file_ext_name)
def storage_upload_slave_by_file(self, tracker_client, store_serv, filename, prefix_name, remote_filename,
meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, remote_filename,
prefix_name, file_ext_name)
def storage_upload_slave_by_buffer(self, tracker_client, store_serv, filebuffer, remote_filename, meta_dict,
file_ext_name):
file_size = len(filebuffer)
return self._storage_do_upload_file(tracker_client, store_serv, filebuffer, file_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, None, remote_filename,
file_ext_name)
def storage_upload_appender_by_filename(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_upload_appender_by_file(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_upload_appender_by_buffer(self, tracker_client, store_serv, file_buffer, meta_dict=None,
file_ext_name=None):
file_size = len(file_buffer)
return self._storage_do_upload_file(tracker_client, store_serv, file_buffer, file_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_delete_file(self, tracker_client, store_serv, remote_filename):
'''
Delete file from storage server.
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_DELETE_FILE
file_name_len = len(remote_filename)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
try:
th.send_header(store_conn)
# del_fmt: |-group_name(16)-filename(len)-|
del_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
send_buffer = struct.pack(del_fmt, store_serv.group_name, remote_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: remote file %s is not exist.'
# % (store_serv.group_name + __os_sep__.encode() + remote_filename))
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
# recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
remote_filename = store_serv.group_name + __os_sep__.encode() + remote_filename
return ('Delete file successed.', remote_filename, store_serv.ip_addr)
def _storage_do_download_file(self, tracker_client, store_serv, file_buffer, offset, download_size,
download_type, remote_filename):
'''
Core of download file from storage server.
You can choice download type, optional FDFS_DOWNLOAD_TO_FILE or
FDFS_DOWNLOAD_TO_BUFFER. And you can choice file offset.
@Return dictionary
'Remote file name' : remote_filename,
'Content' : local_filename or buffer,
'Download size' : download_size,
'Storage IP' : storage_ip
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
remote_filename_len = len(remote_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + FDFS_GROUP_NAME_MAX_LEN + remote_filename_len
th.cmd = STORAGE_PROTO_CMD_DOWNLOAD_FILE
try:
th.send_header(store_conn)
# down_fmt: |-offset(8)-download_bytes(8)-group_name(16)-remote_filename(len)-|
down_fmt = '!Q Q %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len)
send_buffer = struct.pack(down_fmt, offset, download_size, store_serv.group_name, remote_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: remote file %s is not exist.' %
# (store_serv.group_name + __os_sep__.encode() + remote_filename))
if th.status != 0:
raise DataError('Error: %d %s' % (th.status, os.strerror(th.status)))
if download_type == FDFS_DOWNLOAD_TO_FILE:
total_recv_size = tcp_recv_file(store_conn, file_buffer, th.pkg_len)
elif download_type == FDFS_DOWNLOAD_TO_BUFFER:
recv_buffer, total_recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
ret_dic = {
'Remote file_id': store_serv.group_name + __os_sep__.encode() + remote_filename,
'Content': file_buffer if download_type == FDFS_DOWNLOAD_TO_FILE else recv_buffer,
'Download size': appromix(total_recv_size),
'Storage IP': store_serv.ip_addr
}
return ret_dic
def storage_download_to_file(self, tracker_client, store_serv, local_filename, file_offset, download_bytes,
remote_filename):
return self._storage_do_download_file(tracker_client, store_serv, local_filename, file_offset, download_bytes,
FDFS_DOWNLOAD_TO_FILE, remote_filename)
def storage_download_to_buffer(self, tracker_client, store_serv, file_buffer, file_offset, download_bytes,
remote_filename):
return self._storage_do_download_file(tracker_client, store_serv, file_buffer, file_offset, download_bytes,
FDFS_DOWNLOAD_TO_BUFFER, remote_filename)
def storage_set_metadata(self, tracker_client, store_serv, remote_filename, meta_dict,
op_flag=STORAGE_SET_METADATA_FLAG_OVERWRITE):
ret = 0
conn = self.pool.get_connection()
remote_filename_len = len(remote_filename)
meta_buffer = fdfs_pack_metadata(meta_dict)
meta_len = len(meta_buffer)
th = Tracker_header()
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + 1 + FDFS_GROUP_NAME_MAX_LEN + remote_filename_len + meta_len
th.cmd = STORAGE_PROTO_CMD_SET_METADATA
try:
th.send_header(conn)
# meta_fmt: |-filename_len(8)-meta_len(8)-op_flag(1)-group_name(16)
# -filename(remote_filename_len)-meta(meta_len)|
meta_fmt = '!Q Q c %ds %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len, meta_len)
send_buffer = struct.pack(meta_fmt, remote_filename_len, meta_len, op_flag, store_serv.group_name,
remote_filename, meta_buffer)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
ret = th.status
except:
raise
finally:
self.pool.release(conn)
return ret
def storage_get_metadata(self, tracker_client, store_serv, remote_file_name):
store_conn = self.pool.get_connection()
th = Tracker_header()
remote_filename_len = len(remote_file_name)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + remote_filename_len
th.cmd = STORAGE_PROTO_CMD_GET_METADATA
try:
th.send_header(store_conn)
# meta_fmt: |-group_name(16)-filename(remote_filename_len)-|
meta_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len)
send_buffer = struct.pack(meta_fmt, store_serv.group_name, remote_file_name.encode())
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: Remote file %s has no meta data.'
# % (store_serv.group_name + __os_sep__.encode() + remote_file_name))
if th.status != 0:
raise DataError('[-] Error:%d, %s' % (th.status, os.strerror(th.status)))
if th.pkg_len == 0:
ret_dict = {}
meta_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = fdfs_unpack_metadata(meta_buffer)
return ret_dict
def _storage_do_append_file(self, tracker_client, store_serv, file_buffer, file_size, upload_type,
appended_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
appended_filename_len = len(appended_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appended_filename_len + file_size
th.cmd = STORAGE_PROTO_CMD_APPEND_FILE
try:
th.send_header(store_conn)
# append_fmt: |-appended_filename_len(8)-file_size(8)-appended_filename(len)
# -filecontent(filesize)-|
append_fmt = '!Q Q %ds' % appended_filename_len
send_buffer = struct.pack(append_fmt, appended_filename_len, file_size, appended_filename)
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
tcp_send_file(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
tcp_send_file_ex(store_conn, file_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Append file successed.'
ret_dict['Appender file name'] = store_serv.group_name + __os_sep__.encode() + appended_filename
ret_dict['Appended size'] = appromix(file_size)
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_append_by_filename(self, tracker_client, store_serv, local_filename, appended_filename):
file_size = os.stat(local_filename).st_size
return self._storage_do_append_file(tracker_client, store_serv, local_filename, file_size,
FDFS_UPLOAD_BY_FILENAME, appended_filename)
def storage_append_by_file(self, tracker_client, store_serv, local_filename, appended_filename):
file_size = os.stat(local_filename).st_size
return self._storage_do_append_file(tracker_client, store_serv, local_filename, file_size, FDFS_UPLOAD_BY_FILE,
appended_filename)
def storage_append_by_buffer(self, tracker_client, store_serv, file_buffer, appended_filename):
file_size = len(file_buffer)
return self._storage_do_append_file(tracker_client, store_serv, file_buffer, file_size, FDFS_UPLOAD_BY_BUFFER,
appended_filename)
def _storage_do_truncate_file(self, tracker_client, store_serv, truncated_filesize, appender_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_TRUNCATE_FILE
appender_filename_len = len(appender_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appender_filename_len
try:
th.send_header(store_conn)
# truncate_fmt:|-appender_filename_len(8)-truncate_filesize(8)
# -appender_filename(len)-|
truncate_fmt = '!Q Q %ds' % appender_filename_len
send_buffer = struct.pack(truncate_fmt, appender_filename_len, truncated_filesize, appender_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Truncate successed.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_truncate_file(self, tracker_client, store_serv, truncated_filesize, appender_filename):
return self._storage_do_truncate_file(tracker_client, store_serv, truncated_filesize, appender_filename)
def _storage_do_modify_file(self, tracker_client, store_serv, upload_type, filebuffer, offset, filesize,
appender_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_MODIFY_FILE
appender_filename_len = len(appender_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 3 + appender_filename_len + filesize
try:
th.send_header(store_conn)
# modify_fmt: |-filename_len(8)-offset(8)-filesize(8)-filename(len)-|
modify_fmt = '!Q Q Q %ds' % appender_filename_len
send_buffer = struct.pack(modify_fmt, appender_filename_len, offset, filesize, appender_filename)
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
upload_size = tcp_send_file(store_conn, filebuffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, filebuffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
upload_size = tcp_send_file_ex(store_conn, filebuffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Modify successed.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_modify_by_filename(self, tracker_client, store_serv, filename, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_FILENAME, filename, offset,
filesize, appender_filename)
def storage_modify_by_file(self, tracker_client, store_serv, filename, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_FILE, filename, offset, filesize,
appender_filename)
def storage_modify_by_buffer(self, tracker_client, store_serv, filebuffer, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_BUFFER, filebuffer, offset,
filesize, appender_filename)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: tracker_client.py
import struct
import socket
from datetime import datetime
from fdfs_client.fdfs_protol import *
from fdfs_client.connection import *
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
from fdfs_client.utils import *
def parse_storage_status(status_code):
try:
ret = {
FDFS_STORAGE_STATUS_INIT: lambda: 'INIT',
FDFS_STORAGE_STATUS_WAIT_SYNC: lambda: 'WAIT_SYNC',
FDFS_STORAGE_STATUS_SYNCING: lambda: 'SYNCING',
FDFS_STORAGE_STATUS_IP_CHANGED: lambda: 'IP_CHANGED',
FDFS_STORAGE_STATUS_DELETED: lambda: 'DELETED',
FDFS_STORAGE_STATUS_OFFLINE: lambda: 'OFFLINE',
FDFS_STORAGE_STATUS_ONLINE: lambda: 'ONLINE',
FDFS_STORAGE_STATUS_ACTIVE: lambda: 'ACTIVE',
FDFS_STORAGE_STATUS_RECOVERY: lambda: 'RECOVERY'
}[status_code]()
except KeyError:
ret = 'UNKNOW'
return ret
class Storage_info(object):
def __init__(self):
self.status = 0
self.id = ''
self.ip_addr = ''
self.domain_name = ''
self.src_id = ''
self.version = ''
self.join_time = datetime.fromtimestamp(0).isoformat()
self.up_time = datetime.fromtimestamp(0).isoformat()
self.totalMB = ''
self.freeMB = ''
self.upload_prio = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_write_path = 0
self.storage_port = 23000
self.storage_http_port = 80
self.alloc_count = 0
self.current_count = 0
self.max_count = 0
self.total_upload_count = 0
self.success_upload_count = 0
self.total_append_count = 0
self.success_append_count = 0
self.total_modify_count = 0
self.success_modify_count = 0
self.total_truncate_count = 0
self.success_truncate_count = 0
self.total_setmeta_count = 0
self.success_setmeta_count = 0
self.total_del_count = 0
self.success_del_count = 0
self.total_download_count = 0
self.success_download_count = 0
self.total_getmeta_count = 0
self.success_getmeta_count = 0
self.total_create_link_count = 0
self.success_create_link_count = 0
self.total_del_link_count = 0
self.success_del_link_count = 0
self.total_upload_bytes = 0
self.success_upload_bytes = 0
self.total_append_bytes = 0
self.success_append_bytes = 0
self.total_modify_bytes = 0
self.success_modify_bytes = 0
self.total_download_bytes = 0
self.success_download_bytes = 0
self.total_sync_in_bytes = 0
self.success_sync_in_bytes = 0
self.total_sync_out_bytes = 0
self.success_sync_out_bytes = 0
self.total_file_open_count = 0
self.success_file_open_count = 0
self.total_file_read_count = 0
self.success_file_read_count = 0
self.total_file_write_count = 0
self.success_file_write_count = 0
self.last_source_sync = datetime.fromtimestamp(0).isoformat()
self.last_sync_update = datetime.fromtimestamp(0).isoformat()
self.last_synced_time = datetime.fromtimestamp(0).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(0).isoformat()
self.if_trunk_server = ''
# fmt = |-status(1)-ipaddr(16)-domain(128)-srcipaddr(16)-ver(6)-52*8-|
self.fmt = '!B 16s 16s 128s 16s 6s 10Q 4s4s4s 42Q?'
def set_info(self, bytes_stream):
(self.status, self.id, ip_addr, domain_name, self.src_id, version, join_time, up_time, totalMB, freeMB,
self.upload_prio, self.store_path_count, self.subdir_count_per_path, self.curr_write_path, self.storage_port,
self.storage_http_port, self.alloc_count, self.current_count, self.max_count, self.total_upload_count,
self.success_upload_count, self.total_append_count, self.success_append_count, self.total_modify_count,
self.success_modify_count, self.total_truncate_count, self.success_truncate_count, self.total_setmeta_count,
self.success_setmeta_count, self.total_del_count, self.success_del_count, self.total_download_count,
self.success_download_count, self.total_getmeta_count, self.success_getmeta_count,
self.total_create_link_count, self.success_create_link_count, self.total_del_link_count,
self.success_del_link_count, self.total_upload_bytes, self.success_upload_bytes, self.total_append_bytes,
self.total_append_bytes, self.total_modify_bytes, self.success_modify_bytes, self.total_download_bytes,
self.success_download_bytes, self.total_sync_in_bytes, self.success_sync_in_bytes, self.total_sync_out_bytes,
self.success_sync_out_bytes, self.total_file_open_count, self.success_file_open_count,
self.total_file_read_count, self.success_file_read_count, self.total_file_write_count,
self.success_file_write_count, last_source_sync, last_sync_update, last_synced_time, last_heartbeat_time,
self.if_trunk_server,) = struct.unpack(self.fmt, bytes_stream)
try:
self.ip_addr = ip_addr.strip(b'\x00')
self.domain_name = domain_name.strip(b'\x00')
self.version = version.strip(b'\x00')
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError as e:
raise ResponseError('[-] Error: disk space overrun, can not represented it.')
self.join_time = datetime.fromtimestamp(join_time).isoformat()
self.up_time = datetime.fromtimestamp(up_time).isoformat()
self.last_source_sync = datetime.fromtimestamp(last_source_sync).isoformat()
self.last_sync_update = datetime.fromtimestamp(last_sync_update).isoformat()
self.last_synced_time = datetime.fromtimestamp(last_synced_time).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(last_heartbeat_time).isoformat()
return True
def __str__(self):
'''Transform to readable string.'''
s = 'Storage information:\n'
s += '\tip_addr = %s (%s)\n' % (self.ip_addr, parse_storage_status(self.status))
s += '\thttp domain = %s\n' % self.domain_name
s += '\tversion = %s\n' % self.version
s += '\tjoin time = %s\n' % self.join_time
s += '\tup time = %s\n' % self.up_time
s += '\ttotal storage = %s\n' % self.totalMB
s += '\tfree storage = %s\n' % self.freeMB
s += '\tupload priority = %d\n' % self.upload_prio
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.storage_http_port
s += '\tcurrent write path = %d\n' % self.curr_write_path
s += '\tsource ip_addr = %s\n' % self.ip_addr
s += '\tif_trunk_server = %d\n' % self.if_trunk_server
s += '\ttotal upload count = %ld\n' % self.total_upload_count
s += '\tsuccess upload count = %ld\n' % self.success_upload_count
s += '\ttotal download count = %ld\n' % self.total_download_count
s += '\tsuccess download count = %ld\n' % self.success_download_count
s += '\ttotal append count = %ld\n' % self.total_append_count
s += '\tsuccess append count = %ld\n' % self.success_append_count
s += '\ttotal modify count = %ld\n' % self.total_modify_count
s += '\tsuccess modify count = %ld\n' % self.success_modify_count
s += '\ttotal truncate count = %ld\n' % self.total_truncate_count
s += '\tsuccess truncate count = %ld\n' % self.success_truncate_count
s += '\ttotal delete count = %ld\n' % self.total_del_count
s += '\tsuccess delete count = %ld\n' % self.success_del_count
s += '\ttotal set_meta count = %ld\n' % self.total_setmeta_count
s += '\tsuccess set_meta count = %ld\n' % self.success_setmeta_count
s += '\ttotal get_meta count = %ld\n' % self.total_getmeta_count
s += '\tsuccess get_meta count = %ld\n' % self.success_getmeta_count
s += '\ttotal create link count = %ld\n' % self.total_create_link_count
s += '\tsuccess create link count = %ld\n' % self.success_create_link_count
s += '\ttotal delete link count = %ld\n' % self.total_del_link_count
s += '\tsuccess delete link count = %ld\n' % self.success_del_link_count
s += '\ttotal upload bytes = %ld\n' % self.total_upload_bytes
s += '\tsuccess upload bytes = %ld\n' % self.success_upload_bytes
s += '\ttotal download bytes = %ld\n' % self.total_download_bytes
s += '\tsuccess download bytes = %ld\n' % self.success_download_bytes
s += '\ttotal append bytes = %ld\n' % self.total_append_bytes
s += '\tsuccess append bytes = %ld\n' % self.success_append_bytes
s += '\ttotal modify bytes = %ld\n' % self.total_modify_bytes
s += '\tsuccess modify bytes = %ld\n' % self.success_modify_bytes
s += '\ttotal sync_in bytes = %ld\n' % self.total_sync_in_bytes
s += '\tsuccess sync_in bytes = %ld\n' % self.success_sync_in_bytes
s += '\ttotal sync_out bytes = %ld\n' % self.total_sync_out_bytes
s += '\tsuccess sync_out bytes = %ld\n' % self.success_sync_out_bytes
s += '\ttotal file open count = %ld\n' % self.total_file_open_count
s += '\tsuccess file open count = %ld\n' % self.success_file_open_count
s += '\ttotal file read count = %ld\n' % self.total_file_read_count
s += '\tsuccess file read count = %ld\n' % self.success_file_read_count
s += '\ttotal file write count = %ld\n' % self.total_file_write_count
s += '\tsucess file write count = %ld\n' % self.success_file_write_count
s += '\tlast heartbeat time = %s\n' % self.last_heartbeat_time
s += '\tlast source update = %s\n' % self.last_source_sync
s += '\tlast sync update = %s\n' % self.last_sync_update
s += '\tlast synced time = %s\n' % self.last_synced_time
return s
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Group_info(object):
def __init__(self):
self.group_name = ''
self.totalMB = ''
self.freeMB = ''
self.trunk_freeMB = ''
self.count = 0
self.storage_port = 0
self.store_http_port = 0
self.active_count = 0
self.curr_write_server = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_trunk_file_id = 0
self.fmt = '!%ds 11Q' % (FDFS_GROUP_NAME_MAX_LEN + 1)
return None
def __str__(self):
s = 'Group information:\n'
s += '\tgroup name = %s\n' % self.group_name
s += '\ttotal disk space = %s\n' % self.totalMB
s += '\tdisk free space = %s\n' % self.freeMB
s += '\ttrunk free space = %s\n' % self.trunk_freeMB
s += '\tstorage server count = %d\n' % self.count
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.store_http_port
s += '\tactive server count = %d\n' % self.active_count
s += '\tcurrent write server index = %d\n' % self.curr_write_server
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tcurrent trunk file id = %d\n' % self.curr_trunk_file_id
return s
def set_info(self, bytes_stream):
(group_name, totalMB, freeMB, trunk_freeMB, self.count, self.storage_port, self.store_http_port,
self.active_count, self.curr_write_server, self.store_path_count, self.subdir_count_per_path,
self.curr_trunk_file_id) = struct.unpack(self.fmt, bytes_stream)
try:
self.group_name = group_name.strip(b'\x00')
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.trunk_freeMB = appromix(trunk_freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError:
raise DataError('[-] Error disk space overrun, can not represented it.')
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Tracker_client(object):
'''Class Tracker client.'''
def __init__(self, pool):
self.pool = pool
def tracker_list_servers(self, group_name, storage_ip=None):
'''
List servers in a storage group
'''
conn = self.pool.get_connection()
th = Tracker_header()
ip_len = len(storage_ip) if storage_ip else 0
if ip_len >= IP_ADDRESS_SIZE:
ip_len = IP_ADDRESS_SIZE - 1
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + ip_len
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_STORAGE
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
store_ip_addr = storage_ip or ''
storage_ip_fmt = '!%ds' % ip_len
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name) + struct.pack(storage_ip_fmt, store_ip_addr)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
si = Storage_info()
si_fmt_size = si.get_fmt_size()
recv_size = len(recv_buffer)
if recv_size % si_fmt_size != 0:
errinfo = '[-] Error: response size not match, expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errinfo)
except ConnectionError:
raise
finally:
self.pool.release(conn)
num_storage = recv_size / si_fmt_size
si_list = []
i = 0
while num_storage:
si.set_info(recv_buffer[(i * si_fmt_size): ((i + 1) * si_fmt_size)])
si_list.append(si)
si = Storage_info()
num_storage -= 1
i += 1
ret_dict = {}
ret_dict['Group name'] = group_name
ret_dict['Servers'] = si_list
return ret_dict
def tracker_list_one_group(self, group_name):
conn = self.pool.get_connection()
th = Tracker_header()
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP
# group_fmt: |-group_name(16)-|
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
group_info = Group_info()
group_info.set_info(recv_buffer)
except ConnectionError:
raise
finally:
self.pool.release(conn)
return group_info
def tracker_list_all_groups(self):
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
except:
raise
finally:
self.pool.release(conn)
gi = Group_info()
gi_fmt_size = gi.get_fmt_size()
if recv_size % gi_fmt_size != 0:
errmsg = '[-] Error: Response size is mismatch, except: %d, actul: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
num_groups = recv_size / gi_fmt_size
ret_dict = {}
ret_dict['Groups count'] = num_groups
gi_list = []
i = 0
while num_groups:
gi.set_info(recv_buffer[i * gi_fmt_size: (i + 1) * gi_fmt_size])
gi_list.append(gi)
gi = Group_info()
i += 1
num_groups -= 1
ret_dict['Groups'] = gi_list
return ret_dict
def tracker_query_storage_stor_without_group(self):
'''Query storage server for upload, without group name.
Return: Storage_server object'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def tracker_query_storage_stor_with_group(self, group_name):
'''Query storage server for upload, based group name.
arguments:
@group_name: string
@Return Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.send_header(conn)
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
send_buffer = struct.pack(group_fmt, group_name)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)-|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def _tracker_do_query_storage(self, group_name, filename, cmd):
'''
core of query storage, based group name and filename.
It is useful download, delete and set meta.
arguments:
@group_name: string
@filename: string. remote file_id
@Return: Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
file_name_len = len(filename)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
th.cmd = cmd
th.send_header(conn)
# query_fmt: |-group_name(16)-filename(file_name_len)-|
query_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
send_buffer = struct.pack(query_fmt, group_name, filename)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_FETCH_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ip_addr(16)-port(8)-|
recv_fmt = '!%ds %ds Q' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ipaddr, store_serv.port) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ipaddr.strip(b'\x00')
return store_serv
def tracker_query_storage_update(self, group_name, filename):
'''
Query storage server to update(delete and set_meta).
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE)
def tracker_query_storage_fetch(self, group_name, filename):
'''
Query storage server to download.
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE)
#!/usr/bin/env python
# -*- coding = utf-8 -*-
# filename: utils.py
import io
import os
import sys
import stat
import platform
import configparser
SUFFIX = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
__os_sep__ = "/" if platform.system() == 'Windows' else os.sep
def appromix(size, base=0):
'''Conver bytes stream size to human-readable format.
Keyword arguments:
size: int, bytes stream size
base: int, suffix index
Return: string
'''
multiples = 1024
if size < 0:
raise ValueError('[-] Error: number must be non-negative.')
if size < multiples:
return '{0:d}{1}'.format(size, SUFFIX[base])
for suffix in SUFFIX[base:]:
if size < multiples:
return '{0:.2f}{1}'.format(size, suffix)
size = size / float(multiples)
raise ValueError('[-] Error: number too big.')
def get_file_ext_name(filename, double_ext=True):
li = filename.split(os.extsep)
if len(li) <= 1:
return ''
else:
if li[-1].find(__os_sep__) != -1:
return ''
if double_ext:
if len(li) > 2:
if li[-2].find(__os_sep__) == -1:
return '%s.%s' % (li[-2], li[-1])
return li[-1]
class Fdfs_ConfigParser(configparser.RawConfigParser):
"""
Extends ConfigParser to allow files without sections.
This is done by wrapping read files and prepending them with a placeholder
section, which defaults to '__config__'
"""
def __init__(self, default_section=None, *args, **kwargs):
configparser.RawConfigParser.__init__(self, *args, **kwargs)
self._default_section = None
self.set_default_section(default_section or '__config__')
def get_default_section(self):
return self._default_section
def set_default_section(self, section):
self.add_section(section)
# move all values from the previous default section to the new one
try:
default_section_items = self.items(self._default_section)
self.remove_section(self._default_section)
except configparser.NoSectionError:
pass
else:
for (key, value) in default_section_items:
self.set(section, key, value)
self._default_section = section
def read(self, filenames):
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename) as fp:
self.readfp(fp)
except IOError:
continue
else:
read_ok.append(filename)
return read_ok
def readfp(self, fp, *args, **kwargs):
stream = io.StringIO()
try:
stream.name = fp.name
except AttributeError:
pass
stream.write('[' + self._default_section + ']\n')
stream.write(fp.read())
stream.seek(0, 0)
return self._read(stream, stream.name)
def write(self, fp):
# Write the items from the default section manually and then remove them
# from the data. They'll be re-added later.
try:
default_section_items = self.items(self._default_section)
self.remove_section(self._default_section)
for (key, value) in default_section_items:
fp.write("{0} = {1}\n".format(key, value))
fp.write("\n")
except configparser.NoSectionError:
pass
configparser.RawConfigParser.write(self, fp)
self.add_section(self._default_section)
for (key, value) in default_section_items:
self.set(self._default_section, key, value)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
if optname in cursect:
if not isinstance(cursect[optname], list):
cursect[optname] = [cursect[optname]]
cursect[optname].append(optval)
else:
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
def split_remote_fileid(remote_file_id):
'''
Splite remote_file_id to (group_name, remote_file_name)
arguments:
@remote_file_id: string
@return tuple, (group_name, remote_file_name)
'''
index = remote_file_id.find(b'/')
if -1 == index:
return None
return (remote_file_id[0:index], remote_file_id[(index + 1):])
def fdfs_check_file(filename):
ret = True
errmsg = ''
if not os.path.isfile(filename):
ret = False
errmsg = '[-] Error: %s is not a file.' % filename
elif not stat.S_ISREG(os.stat(filename).st_mode):
ret = False
errmsg = '[-] Error: %s is not a regular file.' % filename
return (ret, errmsg)
if __name__ == '__main__':
print(get_file_ext_name('/bc.tar.gz'))
# connect timeout in seconds
# default value is 30s
connect_timeout=300
# network timeout in seconds
# default value is 30s
network_timeout=600
# the base path to store log files
#base_path=/home/tarena/django-project/cc_shop1/cc_shop1/logs
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=114.115.215.96:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# if load FastDFS parameters from tracker server
# since V4.05
# default value is false
load_fdfs_parameters_from_tracker=false
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V4.05
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V4.05
storage_ids_filename = storage_ids.conf
#HTTP settings
http.tracker_server_port=80
#use "#include" directive to include HTTP other settiongs
##include http.conf
\ No newline at end of file
# __init__.py
__version__ = '2.2.0'
VERSION = tuple(map(int, __version__.split('.')))
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: client.py
'''
Client module for Fastdfs 3.08
author: scott yuan scottzer8@gmail.com
date: 2012-06-21
'''
import os
import sys
from fdfs_client.utils import *
from fdfs_client.tracker_client import *
from fdfs_client.storage_client import *
from fdfs_client.exceptions import *
def get_tracker_conf(conf_path='client.conf'):
cf = Fdfs_ConfigParser()
tracker = {}
try:
cf.read(conf_path)
timeout = cf.getint('__config__', 'connect_timeout')
tracker_list = cf.get('__config__', 'tracker_server')
if isinstance(tracker_list, str):
tracker_list = [tracker_list]
tracker_ip_list = []
for tr in tracker_list:
tracker_ip, tracker_port = tr.split(':')
tracker_ip_list.append(tracker_ip)
tracker['host_tuple'] = tuple(tracker_ip_list)
tracker['port'] = int(tracker_port)
tracker['timeout'] = timeout
tracker['name'] = 'Tracker Pool'
except:
raise
return tracker
class Fdfs_client(object):
'''
Class Fdfs_client implemented Fastdfs client protol ver 3.08.
It's useful upload, download, delete file to or from fdfs server, etc. It's uses
connection pool to manage connection to server.
'''
def __init__(self, trackers, poolclass=ConnectionPool):
self.trackers = trackers
self.tracker_pool = poolclass(**self.trackers)
self.timeout = self.trackers['timeout']
return None
def __del__(self):
try:
self.pool.destroy()
self.pool = None
except:
pass
def upload_by_filename(self, filename, meta_dict=None):
'''
Upload a file to Storage server.
arguments:
@filename: string, name of file that will be uploaded
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} meta_dict can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : local_file_name,
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_filename(tc, store_serv, filename, meta_dict)
def upload_by_file(self, filename, meta_dict=None):
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_file(tc, store_serv, filename, meta_dict)
def upload_by_buffer(self, filebuffer, file_ext_name=None, meta_dict=None):
'''
Upload a buffer to Storage server.
arguments:
@filebuffer: string, buffer
@file_ext_name: string, file extend name
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_buffer(tc, store_serv, filebuffer, file_ext_name, meta_dict)
def upload_slave_by_filename(self, filename, remote_file_id, prefix_name, meta_dict=None):
'''
Upload slave file to Storage server.
arguments:
@filename: string, local file name
@remote_file_id: string, remote file id
@prefix_name: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading slave)')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
if not prefix_name:
raise DataError('[-] Error: prefix_name can not be null.')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_with_group(group_name)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
try:
ret_dict = store.storage_upload_slave_by_filename(tc, store_serv, filename, prefix_name, remote_filename,
meta_dict=None)
except:
raise
ret_dict['Status'] = 'Upload slave file successed.'
return ret_dict
def upload_slave_by_file(self, filename, remote_file_id, prefix_name, meta_dict=None):
'''
Upload slave file to Storage server.
arguments:
@filename: string, local file name
@remote_file_id: string, remote file id
@prefix_name: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading slave)')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
if not prefix_name:
raise DataError('[-] Error: prefix_name can not be null.')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_with_group(group_name)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
try:
ret_dict = store.storage_upload_slave_by_file(tc, store_serv, filename, prefix_name, remote_filename,
meta_dict=None)
except:
raise
ret_dict['Status'] = 'Upload slave file successed.'
return ret_dict
def upload_slave_by_buffer(self, filebuffer, remote_file_id, meta_dict=None, file_ext_name=None):
'''
Upload slave file by buffer
arguments:
@filebuffer: string
@remote_file_id: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_slave_by_buffer(tc, store_serv, filebuffer, remote_filename, meta_dict,
file_ext_name)
def upload_appender_by_filename(self, local_filename, meta_dict=None):
'''
Upload an appender file by filename.
arguments:
@local_filename: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} Notice: it can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(uploading appender)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_filename(tc, store_serv, local_filename, meta_dict)
def upload_appender_by_file(self, local_filename, meta_dict=None):
'''
Upload an appender file by file.
arguments:
@local_filename: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} Notice: it can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(uploading appender)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_file(tc, store_serv, local_filename, meta_dict)
def upload_appender_by_buffer(self, filebuffer, file_ext_name=None, meta_dict=None):
'''
Upload a buffer to Storage server.
arguments:
@filebuffer: string
@file_ext_name: string, can be null
@meta_dict: dictionary, can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_buffer(tc, store_serv, filebuffer, meta_dict, file_ext_name)
def delete_file(self, remote_file_id):
'''
Delete a file from Storage server.
arguments:
@remote_file_id: string, file_id of file that is on storage server
@return tuple ('Delete file successed.', remote_file_id, storage_ip)
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in delete file)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_delete_file(tc, store_serv, remote_filename)
def download_to_file(self, local_filename, remote_file_id, offset=0, down_bytes=0):
'''
Download a file from Storage server.
arguments:
@local_filename: string, local name of file
@remote_file_id: string, file_id of file that is on storage server
@offset: long
@downbytes: long
@return dict {
'Remote file_id' : remote_file_id,
'Content' : local_filename,
'Download size' : downloaded_size,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in download file)')
group_name, remote_filename = tmp
if not offset:
file_offset = int(offset)
if not down_bytes:
download_bytes = int(down_bytes)
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_fetch(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_download_to_file(tc, store_serv, local_filename, file_offset, download_bytes,
remote_filename)
def download_to_buffer(self, remote_file_id, offset=0, down_bytes=0):
'''
Download a file from Storage server and store in buffer.
arguments:
@remote_file_id: string, file_id of file that is on storage server
@offset: long
@down_bytes: long
@return dict {
'Remote file_id' : remote_file_id,
'Content' : file_buffer,
'Download size' : downloaded_size,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in download file)')
group_name, remote_filename = tmp
if not offset:
file_offset = int(offset)
if not down_bytes:
download_bytes = int(down_bytes)
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_fetch(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
file_buffer = None
return store.storage_download_to_buffer(tc, store_serv, file_buffer, file_offset, download_bytes,
remote_filename)
def list_one_group(self, group_name):
'''
List one group information.
arguments:
@group_name: string, group name will be list
@return Group_info, instance
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_one_group(group_name)
def list_servers(self, group_name, storage_ip=None):
'''
List all storage servers information in a group
arguments:
@group_name: string
@return dictionary {
'Group name' : group_name,
'Servers' : server list,
}
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_servers(group_name, storage_ip)
def list_all_groups(self):
'''
List all group information.
@return dictionary {
'Groups count' : group_count,
'Groups' : list of groups
}
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_all_groups()
def get_meta_data(self, remote_file_id):
'''
Get meta data of remote file.
arguments:
@remote_fileid: string, remote file id
@return dictionary, meta data
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in get meta data)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_get_metadata(tc, store_serv, remote_filename)
def set_meta_data(self, remote_file_id, meta_dict, op_flag=STORAGE_SET_METADATA_FLAG_OVERWRITE):
'''
Set meta data of remote file.
arguments:
@remote_file_id: string
@meta_dict: dictionary
@op_flag: char, 'O' for overwrite, 'M' for merge
@return dictionary {
'Status' : status,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in set meta data)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
try:
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
status = store.storage_set_metadata(tc, store_serv, remote_filename, meta_dict)
except (ConnectionError, ResponseError, DataError):
raise
# if status == 2:
# raise DataError('[-] Error: remote file %s is not exist.' % remote_file_id)
if status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
ret_dict = {}
ret_dict['Status'] = 'Set meta data success.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def append_by_filename(self, local_filename, remote_fileid):
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(append)')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_filename(tc, store_serv, local_filename, appended_filename)
def append_by_file(self, local_filename, remote_fileid):
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(append)')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_file(tc, store_serv, local_filename, appended_filename)
def append_by_buffer(self, file_buffer, remote_fileid):
if not file_buffer:
raise DataError('[-] Error: file_buffer can not be null.')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_buffer(tc, store_serv, file_buffer, appended_filename)
def truncate_file(self, truncated_filesize, appender_fileid):
'''
Truncate file in Storage server.
arguments:
@truncated_filesize: long
@appender_fileid: remote_fileid
@return: dictionary {
'Status' : 'Truncate successed.',
'Storage IP' : storage_ip
}
'''
trunc_filesize = int(truncated_filesize)
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: appender_fileid is invalid.(truncate)')
group_name, appender_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_truncate_file(tc, store_serv, trunc_filesize, appender_filename)
def modify_by_filename(self, filename, appender_fileid, offset=0):
'''
Modify a file in Storage server by file.
arguments:
@filename: string, local file name
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(modify)')
filesize = os.stat(filename).st_size
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_filename(tc, store_serv, filename, file_offset, filesize, appender_filename)
def modify_by_file(self, filename, appender_fileid, offset=0):
'''
Modify a file in Storage server by file.
arguments:
@filename: string, local file name
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(modify)')
filesize = os.stat(filename).st_size
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_file(tc, store_serv, filename, file_offset, filesize, appender_filename)
def modify_by_buffer(self, filebuffer, appender_fileid, offset=0):
'''
Modify a file in Storage server by buffer.
arguments:
@filebuffer: string, file buffer
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
if not filebuffer:
raise DataError('[-] Error: filebuffer can not be null.(modify)')
filesize = len(filebuffer)
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_buffer(tc, store_serv, filebuffer, file_offset, filesize, appender_filename)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: connection.py
import socket
import os
import sys
import time
import random
from itertools import chain
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# start class Connection
class Connection(object):
'''Manage TCP comunication to and from Fastdfs Server.'''
def __init__(self, **conn_kwargs):
self.pid = os.getpid()
self.host_tuple = conn_kwargs['host_tuple']
self.remote_port = conn_kwargs['port']
self.remote_addr = None
self.timeout = conn_kwargs['timeout']
self._sock = None
def __del__(self):
try:
self.disconnect()
except:
pass
def connect(self):
'''Connect to fdfs server.'''
if self._sock:
return
try:
sock = self._connect()
except socket.error as e:
raise ConnectionError(self._errormessage(e))
self._sock = sock
# print '[+] Create a connection success.'
# print '\tLocal address is %s:%s.' % self._sock.getsockname()
# print '\tRemote address is %s:%s' % (self.remote_addr, self.remote_port)
def _connect(self):
'''Create TCP socket. The host is random one of host_tuple.'''
self.remote_addr = random.choice(self.host_tuple)
# print '[+] Connecting... remote: %s:%s' % (self.remote_addr, self.remote_port)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.settimeout(self.timeout)
sock = socket.create_connection((self.remote_addr, self.remote_port), self.timeout)
return sock
def disconnect(self):
'''Disconnect from fdfs server.'''
if self._sock is None:
return
try:
self._sock.close()
except socket.error as e:
raise ConnectionError(self._errormessage(e))
self._sock = None
def get_sock(self):
return self._sock
def _errormessage(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message" '''
if len(exception.args) == 1:
return "[-] Error: connect to %s:%s. %s." % (self.remote_addr, self.remote_port, exception.args[0])
else:
return "[-] Error: %s connect to %s:%s. %s." % \
(exception.args[0], self.remote_addr, self.remote_port, exception.args[1])
# end class Connection
# start ConnectionPool
class ConnectionPool(object):
'''Generic Connection Pool'''
def __init__(self, name='', conn_class=Connection,
max_conn=None, **conn_kwargs):
self.pool_name = name
self.pid = os.getpid()
self.conn_class = conn_class
self.max_conn = max_conn or 2 ** 31
self.conn_kwargs = conn_kwargs
self._conns_created = 0
self._conns_available = []
self._conns_inuse = set()
# print '[+] Create a connection pool success, name: %s.' % self.pool_name
def _check_pid(self):
if self.pid != os.getpid():
self.destroy()
self.__init__(self.conn_class, self.max_conn, **self.conn_kwargs)
def make_conn(self):
'''Create a new connection.'''
if self._conns_created >= self.max_conn:
raise ConnectionError('[-] Error: Too many connections.')
num_try = 10
while True:
try:
if num_try <= 0:
sys.exit()
conn_instance = self.conn_class(**self.conn_kwargs)
conn_instance.connect()
self._conns_created += 1
break
except ConnectionError as e:
print(e)
num_try -= 1
conn_instance = None
return conn_instance
def get_connection(self):
'''Get a connection from pool.'''
self._check_pid()
try:
conn = self._conns_available.pop()
# print '[+] Get a connection from pool %s.' % self.pool_name
# print '\tLocal address is %s:%s.' % conn._sock.getsockname()
# print '\tRemote address is %s:%s' % (conn.remote_addr, conn.remote_port)
except IndexError:
conn = self.make_conn()
self._conns_inuse.add(conn)
return conn
def remove(self, conn):
'''Remove connection from pool.'''
if conn in self._conns_inuse:
self._conns_inuse.remove(conn)
self._conns_created -= 1
if conn in self._conns_available:
self._conns_available.remove(conn)
self._conns_created -= 1
def destroy(self):
'''Disconnect all connections in the pool.'''
all_conns = chain(self._conns_inuse, self._conns_available)
for conn in all_conns:
conn.disconnect()
# print '[-] Destroy connection pool %s.' % self.pool_name
def release(self, conn):
'''Release the connection back to the pool.'''
self._check_pid()
if conn.pid == self.pid:
self._conns_inuse.remove(conn)
self._conns_available.append(conn)
# print '[-] Release connection back to pool %s.' % self.pool_name
# end ConnectionPool class
def tcp_recv_response(conn, bytes_size, buffer_size=4096):
'''Receive response from server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_size: int, will be received byte_stream size
@buffer_size: int, receive buffer size
@Return: tuple,(response, received_size)
'''
recv_buff = []
total_size = 0
try:
while bytes_size > 0:
resp = conn._sock.recv(buffer_size)
recv_buff.append(resp)
total_size += len(resp)
bytes_size -= len(resp)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while reading from socket: (%s)' % e.args)
return (b''.join(recv_buff), total_size)
def tcp_send_data(conn, bytes_stream):
'''Send buffer to server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_stream: trasmit buffer
@Return bool
'''
try:
conn._sock.sendall(bytes_stream)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while writting to socket: (%s)' % e.args)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: exceptions.py
'''Core exceptions raised by fdfs client'''
class FDFSError(Exception):
pass
class ConnectionError(FDFSError):
pass
class ResponseError(FDFSError):
pass
class InvaildResponse(FDFSError):
pass
class DataError(FDFSError):
pass
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: fdfs_protol.py
import struct
import socket
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# define FDFS protol constans
TRACKER_PROTO_CMD_STORAGE_JOIN = 81
FDFS_PROTO_CMD_QUIT = 82
TRACKER_PROTO_CMD_STORAGE_BEAT = 83 # storage heart beat
TRACKER_PROTO_CMD_STORAGE_REPORT_DISK_USAGE = 84 # report disk usage
TRACKER_PROTO_CMD_STORAGE_REPLICA_CHG = 85 # repl new storage servers
TRACKER_PROTO_CMD_STORAGE_SYNC_SRC_REQ = 86 # src storage require sync
TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_REQ = 87 # dest storage require sync
TRACKER_PROTO_CMD_STORAGE_SYNC_NOTIFY = 88 # sync done notify
TRACKER_PROTO_CMD_STORAGE_SYNC_REPORT = 89 # report src last synced time as dest server
TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_QUERY = 79 # dest storage query sync src storage server
TRACKER_PROTO_CMD_STORAGE_REPORT_IP_CHANGED = 78 # storage server report it's ip changed
TRACKER_PROTO_CMD_STORAGE_CHANGELOG_REQ = 77 # storage server request storage server's changelog
TRACKER_PROTO_CMD_STORAGE_REPORT_STATUS = 76 # report specified storage server status
TRACKER_PROTO_CMD_STORAGE_PARAMETER_REQ = 75 # storage server request parameters
TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FREE = 74 # storage report trunk free space
TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FID = 73 # storage report current trunk file id
TRACKER_PROTO_CMD_STORAGE_FETCH_TRUNK_FID = 72 # storage get current trunk file id
TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_START = 61 # start of tracker get system data files
TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_END = 62 # end of tracker get system data files
TRACKER_PROTO_CMD_TRACKER_GET_ONE_SYS_FILE = 63 # tracker get a system data file
TRACKER_PROTO_CMD_TRACKER_GET_STATUS = 64 # tracker get status of other tracker
TRACKER_PROTO_CMD_TRACKER_PING_LEADER = 65 # tracker ping leader
TRACKER_PROTO_CMD_TRACKER_NOTIFY_NEXT_LEADER = 66 # notify next leader to other trackers
TRACKER_PROTO_CMD_TRACKER_COMMIT_NEXT_LEADER = 67 # commit next leader to other trackers
TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP = 90
TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS = 91
TRACKER_PROTO_CMD_SERVER_LIST_STORAGE = 92
TRACKER_PROTO_CMD_SERVER_DELETE_STORAGE = 93
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE = 101
TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE = 102
TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE = 103
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE = 104
TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL = 105
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL = 106
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL = 107
TRACKER_PROTO_CMD_RESP = 100
FDFS_PROTO_CMD_ACTIVE_TEST = 111 # active test, tracker and storage both support since V1.28
STORAGE_PROTO_CMD_REPORT_CLIENT_IP = 9 # ip as tracker client
STORAGE_PROTO_CMD_UPLOAD_FILE = 11
STORAGE_PROTO_CMD_DELETE_FILE = 12
STORAGE_PROTO_CMD_SET_METADATA = 13
STORAGE_PROTO_CMD_DOWNLOAD_FILE = 14
STORAGE_PROTO_CMD_GET_METADATA = 15
STORAGE_PROTO_CMD_SYNC_CREATE_FILE = 16
STORAGE_PROTO_CMD_SYNC_DELETE_FILE = 17
STORAGE_PROTO_CMD_SYNC_UPDATE_FILE = 18
STORAGE_PROTO_CMD_SYNC_CREATE_LINK = 19
STORAGE_PROTO_CMD_CREATE_LINK = 20
STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE = 21
STORAGE_PROTO_CMD_QUERY_FILE_INFO = 22
STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE = 23 # create appender file
STORAGE_PROTO_CMD_APPEND_FILE = 24 # append file
STORAGE_PROTO_CMD_SYNC_APPEND_FILE = 25
STORAGE_PROTO_CMD_FETCH_ONE_PATH_BINLOG = 26 # fetch binlog of one store path
STORAGE_PROTO_CMD_RESP = TRACKER_PROTO_CMD_RESP
STORAGE_PROTO_CMD_UPLOAD_MASTER_FILE = STORAGE_PROTO_CMD_UPLOAD_FILE
STORAGE_PROTO_CMD_TRUNK_ALLOC_SPACE = 27 # since V3.00
STORAGE_PROTO_CMD_TRUNK_ALLOC_CONFIRM = 28 # since V3.00
STORAGE_PROTO_CMD_TRUNK_FREE_SPACE = 29 # since V3.00
STORAGE_PROTO_CMD_TRUNK_SYNC_BINLOG = 30 # since V3.00
STORAGE_PROTO_CMD_TRUNK_GET_BINLOG_SIZE = 31 # since V3.07
STORAGE_PROTO_CMD_TRUNK_DELETE_BINLOG_MARKS = 32 # since V3.07
STORAGE_PROTO_CMD_TRUNK_TRUNCATE_BINLOG_FILE = 33 # since V3.07
STORAGE_PROTO_CMD_MODIFY_FILE = 34 # since V3.08
STORAGE_PROTO_CMD_SYNC_MODIFY_FILE = 35 # since V3.08
STORAGE_PROTO_CMD_TRUNCATE_FILE = 36 # since V3.08
STORAGE_PROTO_CMD_SYNC_TRUNCATE_FILE = 37 # since V3.08
# for overwrite all old metadata
STORAGE_SET_METADATA_FLAG_OVERWRITE = 'O'
STORAGE_SET_METADATA_FLAG_OVERWRITE_STR = "O"
# for replace, insert when the meta item not exist, otherwise update it
STORAGE_SET_METADATA_FLAG_MERGE = 'M'
STORAGE_SET_METADATA_FLAG_MERGE_STR = "M"
FDFS_RECORD_SEPERATOR = '\x01'
FDFS_FIELD_SEPERATOR = '\x02'
# common constants
FDFS_GROUP_NAME_MAX_LEN = 16
IP_ADDRESS_SIZE = 16
FDFS_PROTO_PKG_LEN_SIZE = 8
FDFS_PROTO_CMD_SIZE = 1
FDFS_PROTO_STATUS_SIZE = 1
FDFS_PROTO_IP_PORT_SIZE = (IP_ADDRESS_SIZE + 6)
FDFS_MAX_SERVERS_EACH_GROUP = 32
FDFS_MAX_GROUPS = 512
FDFS_MAX_TRACKERS = 16
FDFS_DOMAIN_NAME_MAX_LEN = 128
FDFS_MAX_META_NAME_LEN = 64
FDFS_MAX_META_VALUE_LEN = 256
FDFS_FILE_PREFIX_MAX_LEN = 16
FDFS_LOGIC_FILE_PATH_LEN = 10
FDFS_TRUE_FILE_PATH_LEN = 6
FDFS_FILENAME_BASE64_LENGTH = 27
FDFS_TRUNK_FILE_INFO_LEN = 16
FDFS_FILE_EXT_NAME_MAX_LEN = 6
FDFS_SPACE_SIZE_BASE_INDEX = 2 # storage space size based (MB)
FDFS_UPLOAD_BY_BUFFER = 1
FDFS_UPLOAD_BY_FILENAME = 2
FDFS_UPLOAD_BY_FILE = 3
FDFS_DOWNLOAD_TO_BUFFER = 1
FDFS_DOWNLOAD_TO_FILE = 2
FDFS_NORMAL_LOGIC_FILENAME_LENGTH = (
FDFS_LOGIC_FILE_PATH_LEN + FDFS_FILENAME_BASE64_LENGTH + FDFS_FILE_EXT_NAME_MAX_LEN + 1)
FDFS_TRUNK_FILENAME_LENGTH = (
FDFS_TRUE_FILE_PATH_LEN + FDFS_FILENAME_BASE64_LENGTH + FDFS_TRUNK_FILE_INFO_LEN + 1 + FDFS_FILE_EXT_NAME_MAX_LEN)
FDFS_TRUNK_LOGIC_FILENAME_LENGTH = (FDFS_TRUNK_FILENAME_LENGTH + (FDFS_LOGIC_FILE_PATH_LEN - FDFS_TRUE_FILE_PATH_LEN))
FDFS_VERSION_SIZE = 6
TRACKER_QUERY_STORAGE_FETCH_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE)
TRACKER_QUERY_STORAGE_STORE_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE + 1)
# status code, order is important!
FDFS_STORAGE_STATUS_INIT = 0
FDFS_STORAGE_STATUS_WAIT_SYNC = 1
FDFS_STORAGE_STATUS_SYNCING = 2
FDFS_STORAGE_STATUS_IP_CHANGED = 3
FDFS_STORAGE_STATUS_DELETED = 4
FDFS_STORAGE_STATUS_OFFLINE = 5
FDFS_STORAGE_STATUS_ONLINE = 6
FDFS_STORAGE_STATUS_ACTIVE = 7
FDFS_STORAGE_STATUS_RECOVERY = 9
FDFS_STORAGE_STATUS_NONE = 99
class Storage_server(object):
'''Class storage server for upload.'''
def __init__(self):
self.ip_addr = None
self.port = None
self.group_name = ''
self.store_path_index = 0
# Class tracker_header
class Tracker_header(object):
'''
Class for Pack or Unpack tracker header
struct tracker_header{
char pkg_len[FDFS_PROTO_PKG_LEN_SIZE],
char cmd,
char status,
}
'''
def __init__(self):
self.fmt = '!QBB' # pkg_len[FDFS_PROTO_PKG_LEN_SIZE] + cmd + status
self.st = struct.Struct(self.fmt)
self.pkg_len = 0
self.cmd = 0
self.status = 0
def _pack(self, pkg_len=0, cmd=0, status=0):
return self.st.pack(pkg_len, cmd, status)
def _unpack(self, bytes_stream):
self.pkg_len, self.cmd, self.status = self.st.unpack(bytes_stream)
return True
def header_len(self):
return self.st.size
def send_header(self, conn):
'''Send Tracker header to server.'''
header = self._pack(self.pkg_len, self.cmd, self.status)
try:
conn._sock.sendall(header)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while writting to socket: %s' % (e.args,))
def recv_header(self, conn):
'''Receive response from server.
if sucess, class member (pkg_len, cmd, status) is response.
'''
try:
header = conn._sock.recv(self.header_len())
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while reading from socket: %s' % (e.args,))
self._unpack(header)
def fdfs_pack_metadata(meta_dict):
ret = ''
for key in meta_dict:
ret += '%s%c%s%c' % (key, FDFS_FIELD_SEPERATOR, meta_dict[key], FDFS_RECORD_SEPERATOR)
return ret[0:-1]
def fdfs_unpack_metadata(bytes_stream):
li = bytes_stream.split(FDFS_RECORD_SEPERATOR)
return dict([item.split(FDFS_FIELD_SEPERATOR) for item in li])
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: fdfs_test.py
import os
import sys
import time
try:
from fdfs_client.client import *
from fdfs_client.exceptions import *
except ImportError:
import_path = os.path.abspath('../')
sys.path.append(import_path)
from fdfs_client.client import *
from fdfs_client.exceptions import *
def usage():
s = 'Usage: python fdfs_test.py {options} [{local_filename} [{remote_file_id}]]\n'
s += 'options: upfile, upbuffer, downfile, downbuffer, delete, listgroup, listserv\n'
s += ' upslavefile, upslavebuffer, upappendfile, upappendbuffer\n'
s += '\tupfile {local_filename}\n'
s += '\tupbuffer {local_filename}\n'
s += '\tdownfile {local_filename} {remote_file_id}\n'
s += '\tdownbuffer {remote_file_id}\n'
s += '\tdelete {remote_file_id}\n'
s += '\tlistgroup {group_name}\n'
s += '\tlistall \n'
s += '\tlistsrv {group_name} [storage_ip]\n'
s += '\tsetmeta {remote_file_id}\n'
s += '\tgetmeta {remote_file_id}\n'
s += '\tupslavefile {local_filename} {remote_fileid} {prefix_name}\n'
s += '\tupappendfile {local_filename}\n'
s += '\ttruncate {truncate_filesize} {remote_fileid}\n'
s += '\tmodifyfile {local_filename} {remote_fileid} {file_offset}\n'
s += '\tmodifybuffer {local_filename} {remote_fileid} {file_offset}\n'
s += 'e.g.: python fdfs_test.py upfile test'
print(s)
sys.exit(0)
if len(sys.argv) < 2:
usage()
client = Fdfs_client('client.conf')
def upfile_func():
# Upload by filename
# usage: python fdfs_test.py upfile {local_filename}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
file_size = os.stat(local_filename).st_size
# meta_buffer can be null.
meta_dict = {
'ext_name': 'py',
'file_size': str(file_size) + 'B'
}
t1 = time.time()
ret_dict = client.upload_by_filename(local_filename, meta_dict)
t2 = time.time()
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
print('[+] time consume: %fs' % (t2 - t1))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upfileex_func():
# Upload by file
# usage: python fdfs_test.py upfileex {local_filename}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
t1 = time.time()
ret_dict = client.upload_by_file(local_filename)
t2 = time.time()
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
print('[+] time consume: %fs' % (t2 - t1))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upslavefile_func():
# upload slave file
# usage: python fdfs_test.py upslavefile {local_filename} {remote_fileid} {prefix_name}
if len(sys.argv) < 5:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
prefix_name = sys.argv[4]
ret_dict = client.upload_slave_by_file(local_filename, remote_fileid, \
prefix_name)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upslavebuffer_func():
# upload slave by buffer
# usage: python fdfs_test.py upslavebuffer {local_filename} {remote_fileid} {prefix_name}
if len(sys.argv) < 5:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
prefix_name = sys.argv[4]
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.upload_slave_by_buffer(local_filename, \
remote_fileid, prefix_name)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def del_func():
# delete file
# usage: python fdfs_test.py delete {remote_fileid}
if len(sys.argv) < 3:
usage()
return None
try:
remote_file_id = sys.argv[2]
ret_tuple = client.delete_file(remote_file_id)
print('[+] %s' % ret_tuple[0])
print('[+] remote_fileid: %s' % ret_tuple[1])
print('[+] Storage IP: %s' % ret_tuple[2])
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def downfile_func():
# Download to file
# usage: python fdfs_test.py downfile {local_filename} {remote_fileid}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
ret_dict = client.download_to_file(local_filename, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def list_group_func():
# List one group info
# usage: python fdfs_test.py listgroup {group_name}
if len(sys.argv) < 3:
usage()
return None
try:
group_name = sys.argv[2]
ret = client.list_one_group(group_name)
print(ret)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def listall_func():
# List all group info
# usage: python fdfs_test.py listall
if len(sys.argv) < 2:
usage()
return None
try:
ret_dict = client.list_all_groups()
print('=' * 80)
print('Groups count:', ret_dict['Groups count'])
for li in ret_dict['Groups']:
print('-' * 80)
print(li)
print('-' * 80)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def list_server_func():
# List all servers info of group
# usage: python fdfs_test.py listsrv {group_name} [storage_ip]
if len(sys.argv) < 3:
usage()
return None
try:
group_name = sys.argv[2]
if len(sys.argv) > 3:
storage_ip = sys.argv[3]
else:
storage_ip = None
ret_dict = client.list_servers(group_name, storage_ip)
print('=' * 80)
print('Group name: %s' % ret_dict['Group name'])
print('=' * 80)
i = 1
for serv in ret_dict['Servers']:
print('Storage server %d:' % i)
print('=' * 80)
print(serv)
i += 1
print('=' * 80)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upbuffer_func():
# Upload by buffer
# usage: python fdfs_test.py upbuffer {local_filename} [remote_file_ext_name]
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
if len(sys.argv) > 3:
ext_name = sys.argv[3]
else:
ext_name = None
# meta_buffer can be null.
meta_buffer = {
'ext_name': 'gif',
'width': '150px',
'height': '80px'
}
try:
with open(local_filename, 'rb') as f:
file_buffer = f.read()
ret_dict = client.upload_by_buffer(file_buffer, ext_name, meta_buffer)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def downbuffer_func():
# Download to buffer
# usage: python fdfs_test.py downbuffer {remote_file_id}
# e.g.: 'group1/M00/00/00/wKjzhU_rLNmjo2-1AAAamGDONEA5818.py'
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
try:
ret_dict = client.download_to_buffer(remote_fileid)
print('Downloaded content:')
print(ret_dict['Content'])
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def get_meta_data_func():
# Get meta data of remote file
# usage python fdfs_test.py getmeta {remote_file_id}
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
try:
ret_dict = client.get_meta_data(remote_fileid)
print(ret_dict)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def set_meta_data_func():
# Set meta data of remote file
# usage python fdfs_test.py setmeta {remote_file_id}
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
meta_dict = {
'ext_name': 'jgp',
'width': '160px',
'hight': '80px',
}
try:
ret_dict = client.set_meta_data(remote_fileid, meta_dict)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upappendfile_func():
# Upload an appender file by filename
# usage: python fdfs_test.py upappendfile {local_filename}
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
try:
ret_dict = client.upload_appender_by_file(local_filename)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upappendbuffer_func():
# Upload an appender file by buffer
# usage: python fdfs_test.py upappendbuffer {local_filename}
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
try:
with open(local_filename, 'rb') as f:
file_buffer = f.read()
ret_dict = client.upload_appender_by_buffer(file_buffer)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def appendfile_func():
# Append a remote file
# usage: python fdfs_test.py appendfile {local_filename} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
try:
ret_dict = client.append_by_file(local_filename, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def appendbuffer_func():
# Append a remote file by buffer
# usage: python fdfs_test.py appendbuffer {local_filename} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
try:
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.append_by_buffer(filebuffer, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def truncate_func():
# Truncate file
# usage: python fdfs_test.py truncate {truncate_filesize} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
truncate_filesize = int(sys.argv[2])
remote_fileid = sys.argv[3]
try:
ret_dict = client.truncate_file(truncate_filesize, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def modifyfile_func():
# Modify file by filename
# usage: python fdfs_test.py modifyfile {local_filename} {remote_fileid} [file_offset]
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
if len(sys.argv) > 4:
file_offset = int(sys.argv[4])
else:
file_offset = 0
try:
ret_dict = client.modify_by_filename(local_filename, remote_fileid, file_offset)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def modifybuffer_func():
# Modify file by buffer
# usage: python fdfs_test.py modifybuffer {local_filename} {remote_fileid} [file_offset]
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
if len(sys.argv) > 4:
file_offset = int(sys.argv[4])
else:
file_offset = 0
try:
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.modify_by_buffer(filebuffer, remote_fileid, file_offset)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
result = {
'upfile': lambda: upfile_func(),
'upfileex': lambda: upfileex_func(),
'upbuffer': lambda: upbuffer_func(),
'delete': lambda: del_func(),
'downfile': lambda: downfile_func(),
'downbuffer': lambda: downbuffer_func(),
'listgroup': lambda: list_group_func(),
'listall': lambda: listall_func(),
'listsrv': lambda: list_server_func(),
'getmeta': lambda: get_meta_data_func(),
'setmeta': lambda: set_meta_data_func(),
'upslavefile': lambda: upslavefile_func(),
'upappendfile': lambda: upappendfile_func(),
'upappendbuffer': lambda: upappendbuffer_func(),
'appendfile': lambda: appendfile_func(),
'appendbuffer': lambda: appendbuffer_func(),
'truncate': lambda: truncate_func(),
'modifyfile': lambda: modifyfile_func(),
'modifybuffer': lambda: modifybuffer_func(),
'-h': lambda: usage(),
}[sys.argv[1].lower()]()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: storage_client.py
import os
import stat
import errno
import struct
import socket
import datetime
import platform
from fdfs_client.fdfs_protol import *
from fdfs_client.connection import *
# from test_fdfs.sendfile import *
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
from fdfs_client.utils import *
__os_sep__ = "/" if platform.system() == 'Windows' else os.sep
def tcp_send_file(conn, filename, buffer_size=1024):
'''
Send file to server, and split into multiple pkgs while sending.
arguments:
@conn: connection
@filename: string
@buffer_size: int ,send buffer size
@Return int: file size if success else raise ConnectionError.
'''
file_size = 0
with open(filename, 'rb') as f:
while 1:
try:
send_buffer = f.read(buffer_size)
send_size = len(send_buffer)
if send_size == 0:
break
tcp_send_data(conn, send_buffer)
file_size += send_size
except ConnectionError as e:
raise ConnectionError('[-] Error while uploading file(%s).' % e.args)
except IOError as e:
raise DataError('[-] Error while reading local file(%s).' % e.args)
return file_size
def tcp_send_file_ex(conn, filename, buffer_size=4096):
'''
Send file to server. Using linux system call 'sendfile'.
arguments:
@conn: connection
@filename: string
@return long, sended size
'''
if 'linux' not in sys.platform.lower():
raise DataError('[-] Error: \'sendfile\' system call only available on linux.')
nbytes = 0
offset = 0
sock_fd = conn.get_sock().fileno()
with open(filename, 'rb') as f:
in_fd = f.fileno()
while 1:
try:
pass
# sent = sendfile(sock_fd, in_fd, offset, buffer_size)
# if 0 == sent:
# break
# nbytes += sent
# offset += sent
except OSError as e:
if e.errno == errno.EAGAIN:
continue
raise
return nbytes
def tcp_recv_file(conn, local_filename, file_size, buffer_size=1024):
'''
Receive file from server, fragmented it while receiving and write to disk.
arguments:
@conn: connection
@local_filename: string
@file_size: int, remote file size
@buffer_size: int, receive buffer size
@Return int: file size if success else raise ConnectionError.
'''
total_file_size = 0
flush_size = 0
remain_bytes = file_size
with open(local_filename, 'wb+') as f:
while remain_bytes > 0:
try:
if remain_bytes >= buffer_size:
file_buffer, recv_size = tcp_recv_response(conn, buffer_size, buffer_size)
else:
file_buffer, recv_size = tcp_recv_response(conn, remain_bytes, buffer_size)
f.write(file_buffer)
remain_bytes -= buffer_size
total_file_size += recv_size
flush_size += recv_size
if flush_size >= 4096:
f.flush()
flush_size = 0
except ConnectionError as e:
raise ConnectionError('[-] Error: while downloading file(%s).' % e.args)
except IOError as e:
raise DataError('[-] Error: while writting local file(%s).' % e.args)
return total_file_size
class Storage_client(object):
'''
The Class Storage_client for storage server.
Note: argument host_tuple of storage server ip address, that should be a single element.
'''
def __init__(self, *kwargs):
conn_kwargs = {
'name': 'Storage Pool',
'host_tuple': (kwargs[0],),
'port': kwargs[1],
'timeout': kwargs[2]
}
self.pool = ConnectionPool(**conn_kwargs)
return None
def __del__(self):
try:
self.pool.destroy()
self.pool = None
except:
pass
def update_pool(self, old_store_serv, new_store_serv, timeout=30):
'''
Update connection pool of storage client.
We need update connection pool of storage client, while storage server is changed.
but if server not changed, we do nothing.
'''
if old_store_serv.ip_addr == new_store_serv.ip_addr:
return None
self.pool.destroy()
conn_kwargs = {
'name': 'Storage_pool',
'host_tuple': (new_store_serv.ip_addr,),
'port': new_store_serv.port,
'timeout': timeout
}
self.pool = ConnectionPool(**conn_kwargs)
return True
def _storage_do_upload_file(self, tracker_client, store_serv, file_buffer, file_size=None, upload_type=None,
meta_dict=None, cmd=None, master_filename=None, prefix_name=None, file_ext_name=None):
'''
core of upload file.
arguments:
@tracker_client: Tracker_client, it is useful connect to tracker server
@store_serv: Storage_server, it is return from query tracker server
@file_buffer: string, file name or file buffer for send
@file_size: int
@upload_type: int, optional: FDFS_UPLOAD_BY_FILE, FDFS_UPLOAD_BY_FILENAME,
FDFS_UPLOAD_BY_BUFFER
@meta_dic: dictionary, store metadata in it
@cmd: int, reference fdfs protol
@master_filename: string, useful upload slave file
@prefix_name: string
@file_ext_name: string
@Return dictionary
{
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : status,
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
}
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
master_filename_len = len(master_filename) if master_filename else 0
prefix_name_len = len(prefix_name) if prefix_name else 0
upload_slave = len(store_serv.group_name) and master_filename_len
file_ext_name = str(file_ext_name) if file_ext_name else ''
# non_slave_fmt |-store_path_index(1)-file_size(8)-file_ext_name(6)-|
non_slave_fmt = '!B Q %ds' % FDFS_FILE_EXT_NAME_MAX_LEN
# slave_fmt |-master_len(8)-file_size(8)-prefix_name(16)-file_ext_name(6)
# -master_name(master_filename_len)-|
slave_fmt = '!Q Q %ds %ds %ds' % (FDFS_FILE_PREFIX_MAX_LEN, FDFS_FILE_EXT_NAME_MAX_LEN, master_filename_len)
th.pkg_len = struct.calcsize(slave_fmt) if upload_slave else struct.calcsize(non_slave_fmt)
th.pkg_len += file_size
th.cmd = cmd
th.send_header(store_conn)
if upload_slave:
send_buffer = struct.pack(
slave_fmt, master_filename_len, file_size, prefix_name, file_ext_name, master_filename)
else:
send_buffer = struct.pack(non_slave_fmt, store_serv.store_path_index, file_size, file_ext_name.encode())
try:
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
send_file_size = tcp_send_file(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
send_file_size = tcp_send_file_ex(store_conn, file_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
if recv_size <= FDFS_GROUP_NAME_MAX_LEN:
errmsg = '[-] Error: Storage response length is not match, '
errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
# recv_fmt: |-group_name(16)-remote_file_name(recv_size - 16)-|
recv_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, th.pkg_len - FDFS_GROUP_NAME_MAX_LEN)
(group_name, remote_name) = struct.unpack(recv_fmt, recv_buffer)
remote_filename = remote_name.strip(b'\x00')
if meta_dict and len(meta_dict) > 0:
status = self.storage_set_metadata(tracker_client, store_serv, remote_filename, meta_dict)
if status != 0:
# rollback
self.storage_delete_file(tracker_client, store_serv, remote_filename)
raise DataError('[-] Error: %d, %s' % (status, os.strerror(status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dic = {
'Group name': group_name.strip(b'\x00'),
'Remote file_id': group_name.strip(b'\x00') + __os_sep__.encode() + remote_filename,
'Status': 'Upload successed.',
'Local file name': file_buffer if (upload_type == FDFS_UPLOAD_BY_FILENAME
or upload_type == FDFS_UPLOAD_BY_FILE
) else '',
'Uploaded size': appromix(send_file_size) if (upload_type == FDFS_UPLOAD_BY_FILENAME
or upload_type == FDFS_UPLOAD_BY_FILE
) else appromix(len(file_buffer)),
'Storage IP': store_serv.ip_addr
}
return ret_dic
def storage_upload_by_filename(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_by_file(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_by_buffer(self, tracker_client, store_serv, file_buffer, file_ext_name=None, meta_dict=None):
buffer_size = len(file_buffer)
return self._storage_do_upload_file(tracker_client, store_serv, file_buffer, buffer_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_slave_by_filename(self, tracker_client, store_serv, filename, prefix_name, remote_filename,
meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, remote_filename,
prefix_name, file_ext_name)
def storage_upload_slave_by_file(self, tracker_client, store_serv, filename, prefix_name, remote_filename,
meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, remote_filename,
prefix_name, file_ext_name)
def storage_upload_slave_by_buffer(self, tracker_client, store_serv, filebuffer, remote_filename, meta_dict,
file_ext_name):
file_size = len(filebuffer)
return self._storage_do_upload_file(tracker_client, store_serv, filebuffer, file_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, None, remote_filename,
file_ext_name)
def storage_upload_appender_by_filename(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_upload_appender_by_file(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_upload_appender_by_buffer(self, tracker_client, store_serv, file_buffer, meta_dict=None,
file_ext_name=None):
file_size = len(file_buffer)
return self._storage_do_upload_file(tracker_client, store_serv, file_buffer, file_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_delete_file(self, tracker_client, store_serv, remote_filename):
'''
Delete file from storage server.
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_DELETE_FILE
file_name_len = len(remote_filename)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
try:
th.send_header(store_conn)
# del_fmt: |-group_name(16)-filename(len)-|
del_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
send_buffer = struct.pack(del_fmt, store_serv.group_name, remote_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: remote file %s is not exist.'
# % (store_serv.group_name + __os_sep__.encode() + remote_filename))
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
# recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
remote_filename = store_serv.group_name + __os_sep__.encode() + remote_filename
return ('Delete file successed.', remote_filename, store_serv.ip_addr)
def _storage_do_download_file(self, tracker_client, store_serv, file_buffer, offset, download_size,
download_type, remote_filename):
'''
Core of download file from storage server.
You can choice download type, optional FDFS_DOWNLOAD_TO_FILE or
FDFS_DOWNLOAD_TO_BUFFER. And you can choice file offset.
@Return dictionary
'Remote file name' : remote_filename,
'Content' : local_filename or buffer,
'Download size' : download_size,
'Storage IP' : storage_ip
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
remote_filename_len = len(remote_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + FDFS_GROUP_NAME_MAX_LEN + remote_filename_len
th.cmd = STORAGE_PROTO_CMD_DOWNLOAD_FILE
try:
th.send_header(store_conn)
# down_fmt: |-offset(8)-download_bytes(8)-group_name(16)-remote_filename(len)-|
down_fmt = '!Q Q %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len)
send_buffer = struct.pack(down_fmt, offset, download_size, store_serv.group_name, remote_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: remote file %s is not exist.' %
# (store_serv.group_name + __os_sep__.encode() + remote_filename))
if th.status != 0:
raise DataError('Error: %d %s' % (th.status, os.strerror(th.status)))
if download_type == FDFS_DOWNLOAD_TO_FILE:
total_recv_size = tcp_recv_file(store_conn, file_buffer, th.pkg_len)
elif download_type == FDFS_DOWNLOAD_TO_BUFFER:
recv_buffer, total_recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
ret_dic = {
'Remote file_id': store_serv.group_name + __os_sep__.encode() + remote_filename,
'Content': file_buffer if download_type == FDFS_DOWNLOAD_TO_FILE else recv_buffer,
'Download size': appromix(total_recv_size),
'Storage IP': store_serv.ip_addr
}
return ret_dic
def storage_download_to_file(self, tracker_client, store_serv, local_filename, file_offset, download_bytes,
remote_filename):
return self._storage_do_download_file(tracker_client, store_serv, local_filename, file_offset, download_bytes,
FDFS_DOWNLOAD_TO_FILE, remote_filename)
def storage_download_to_buffer(self, tracker_client, store_serv, file_buffer, file_offset, download_bytes,
remote_filename):
return self._storage_do_download_file(tracker_client, store_serv, file_buffer, file_offset, download_bytes,
FDFS_DOWNLOAD_TO_BUFFER, remote_filename)
def storage_set_metadata(self, tracker_client, store_serv, remote_filename, meta_dict,
op_flag=STORAGE_SET_METADATA_FLAG_OVERWRITE):
ret = 0
conn = self.pool.get_connection()
remote_filename_len = len(remote_filename)
meta_buffer = fdfs_pack_metadata(meta_dict)
meta_len = len(meta_buffer)
th = Tracker_header()
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + 1 + FDFS_GROUP_NAME_MAX_LEN + remote_filename_len + meta_len
th.cmd = STORAGE_PROTO_CMD_SET_METADATA
try:
th.send_header(conn)
# meta_fmt: |-filename_len(8)-meta_len(8)-op_flag(1)-group_name(16)
# -filename(remote_filename_len)-meta(meta_len)|
meta_fmt = '!Q Q c %ds %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len, meta_len)
send_buffer = struct.pack(meta_fmt, remote_filename_len, meta_len, op_flag, store_serv.group_name,
remote_filename, meta_buffer)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
ret = th.status
except:
raise
finally:
self.pool.release(conn)
return ret
def storage_get_metadata(self, tracker_client, store_serv, remote_file_name):
store_conn = self.pool.get_connection()
th = Tracker_header()
remote_filename_len = len(remote_file_name)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + remote_filename_len
th.cmd = STORAGE_PROTO_CMD_GET_METADATA
try:
th.send_header(store_conn)
# meta_fmt: |-group_name(16)-filename(remote_filename_len)-|
meta_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len)
send_buffer = struct.pack(meta_fmt, store_serv.group_name, remote_file_name.encode())
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: Remote file %s has no meta data.'
# % (store_serv.group_name + __os_sep__.encode() + remote_file_name))
if th.status != 0:
raise DataError('[-] Error:%d, %s' % (th.status, os.strerror(th.status)))
if th.pkg_len == 0:
ret_dict = {}
meta_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = fdfs_unpack_metadata(meta_buffer)
return ret_dict
def _storage_do_append_file(self, tracker_client, store_serv, file_buffer, file_size, upload_type,
appended_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
appended_filename_len = len(appended_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appended_filename_len + file_size
th.cmd = STORAGE_PROTO_CMD_APPEND_FILE
try:
th.send_header(store_conn)
# append_fmt: |-appended_filename_len(8)-file_size(8)-appended_filename(len)
# -filecontent(filesize)-|
append_fmt = '!Q Q %ds' % appended_filename_len
send_buffer = struct.pack(append_fmt, appended_filename_len, file_size, appended_filename)
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
tcp_send_file(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
tcp_send_file_ex(store_conn, file_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Append file successed.'
ret_dict['Appender file name'] = store_serv.group_name + __os_sep__.encode() + appended_filename
ret_dict['Appended size'] = appromix(file_size)
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_append_by_filename(self, tracker_client, store_serv, local_filename, appended_filename):
file_size = os.stat(local_filename).st_size
return self._storage_do_append_file(tracker_client, store_serv, local_filename, file_size,
FDFS_UPLOAD_BY_FILENAME, appended_filename)
def storage_append_by_file(self, tracker_client, store_serv, local_filename, appended_filename):
file_size = os.stat(local_filename).st_size
return self._storage_do_append_file(tracker_client, store_serv, local_filename, file_size, FDFS_UPLOAD_BY_FILE,
appended_filename)
def storage_append_by_buffer(self, tracker_client, store_serv, file_buffer, appended_filename):
file_size = len(file_buffer)
return self._storage_do_append_file(tracker_client, store_serv, file_buffer, file_size, FDFS_UPLOAD_BY_BUFFER,
appended_filename)
def _storage_do_truncate_file(self, tracker_client, store_serv, truncated_filesize, appender_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_TRUNCATE_FILE
appender_filename_len = len(appender_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appender_filename_len
try:
th.send_header(store_conn)
# truncate_fmt:|-appender_filename_len(8)-truncate_filesize(8)
# -appender_filename(len)-|
truncate_fmt = '!Q Q %ds' % appender_filename_len
send_buffer = struct.pack(truncate_fmt, appender_filename_len, truncated_filesize, appender_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Truncate successed.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_truncate_file(self, tracker_client, store_serv, truncated_filesize, appender_filename):
return self._storage_do_truncate_file(tracker_client, store_serv, truncated_filesize, appender_filename)
def _storage_do_modify_file(self, tracker_client, store_serv, upload_type, filebuffer, offset, filesize,
appender_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_MODIFY_FILE
appender_filename_len = len(appender_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 3 + appender_filename_len + filesize
try:
th.send_header(store_conn)
# modify_fmt: |-filename_len(8)-offset(8)-filesize(8)-filename(len)-|
modify_fmt = '!Q Q Q %ds' % appender_filename_len
send_buffer = struct.pack(modify_fmt, appender_filename_len, offset, filesize, appender_filename)
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
upload_size = tcp_send_file(store_conn, filebuffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, filebuffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
upload_size = tcp_send_file_ex(store_conn, filebuffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Modify successed.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_modify_by_filename(self, tracker_client, store_serv, filename, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_FILENAME, filename, offset,
filesize, appender_filename)
def storage_modify_by_file(self, tracker_client, store_serv, filename, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_FILE, filename, offset, filesize,
appender_filename)
def storage_modify_by_buffer(self, tracker_client, store_serv, filebuffer, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_BUFFER, filebuffer, offset,
filesize, appender_filename)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: tracker_client.py
import struct
import socket
from datetime import datetime
from fdfs_client.fdfs_protol import *
from fdfs_client.connection import *
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
from fdfs_client.utils import *
def parse_storage_status(status_code):
try:
ret = {
FDFS_STORAGE_STATUS_INIT: lambda: 'INIT',
FDFS_STORAGE_STATUS_WAIT_SYNC: lambda: 'WAIT_SYNC',
FDFS_STORAGE_STATUS_SYNCING: lambda: 'SYNCING',
FDFS_STORAGE_STATUS_IP_CHANGED: lambda: 'IP_CHANGED',
FDFS_STORAGE_STATUS_DELETED: lambda: 'DELETED',
FDFS_STORAGE_STATUS_OFFLINE: lambda: 'OFFLINE',
FDFS_STORAGE_STATUS_ONLINE: lambda: 'ONLINE',
FDFS_STORAGE_STATUS_ACTIVE: lambda: 'ACTIVE',
FDFS_STORAGE_STATUS_RECOVERY: lambda: 'RECOVERY'
}[status_code]()
except KeyError:
ret = 'UNKNOW'
return ret
class Storage_info(object):
def __init__(self):
self.status = 0
self.id = ''
self.ip_addr = ''
self.domain_name = ''
self.src_id = ''
self.version = ''
self.join_time = datetime.fromtimestamp(0).isoformat()
self.up_time = datetime.fromtimestamp(0).isoformat()
self.totalMB = ''
self.freeMB = ''
self.upload_prio = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_write_path = 0
self.storage_port = 23000
self.storage_http_port = 80
self.alloc_count = 0
self.current_count = 0
self.max_count = 0
self.total_upload_count = 0
self.success_upload_count = 0
self.total_append_count = 0
self.success_append_count = 0
self.total_modify_count = 0
self.success_modify_count = 0
self.total_truncate_count = 0
self.success_truncate_count = 0
self.total_setmeta_count = 0
self.success_setmeta_count = 0
self.total_del_count = 0
self.success_del_count = 0
self.total_download_count = 0
self.success_download_count = 0
self.total_getmeta_count = 0
self.success_getmeta_count = 0
self.total_create_link_count = 0
self.success_create_link_count = 0
self.total_del_link_count = 0
self.success_del_link_count = 0
self.total_upload_bytes = 0
self.success_upload_bytes = 0
self.total_append_bytes = 0
self.success_append_bytes = 0
self.total_modify_bytes = 0
self.success_modify_bytes = 0
self.total_download_bytes = 0
self.success_download_bytes = 0
self.total_sync_in_bytes = 0
self.success_sync_in_bytes = 0
self.total_sync_out_bytes = 0
self.success_sync_out_bytes = 0
self.total_file_open_count = 0
self.success_file_open_count = 0
self.total_file_read_count = 0
self.success_file_read_count = 0
self.total_file_write_count = 0
self.success_file_write_count = 0
self.last_source_sync = datetime.fromtimestamp(0).isoformat()
self.last_sync_update = datetime.fromtimestamp(0).isoformat()
self.last_synced_time = datetime.fromtimestamp(0).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(0).isoformat()
self.if_trunk_server = ''
# fmt = |-status(1)-ipaddr(16)-domain(128)-srcipaddr(16)-ver(6)-52*8-|
self.fmt = '!B 16s 16s 128s 16s 6s 10Q 4s4s4s 42Q?'
def set_info(self, bytes_stream):
(self.status, self.id, ip_addr, domain_name, self.src_id, version, join_time, up_time, totalMB, freeMB,
self.upload_prio, self.store_path_count, self.subdir_count_per_path, self.curr_write_path, self.storage_port,
self.storage_http_port, self.alloc_count, self.current_count, self.max_count, self.total_upload_count,
self.success_upload_count, self.total_append_count, self.success_append_count, self.total_modify_count,
self.success_modify_count, self.total_truncate_count, self.success_truncate_count, self.total_setmeta_count,
self.success_setmeta_count, self.total_del_count, self.success_del_count, self.total_download_count,
self.success_download_count, self.total_getmeta_count, self.success_getmeta_count,
self.total_create_link_count, self.success_create_link_count, self.total_del_link_count,
self.success_del_link_count, self.total_upload_bytes, self.success_upload_bytes, self.total_append_bytes,
self.total_append_bytes, self.total_modify_bytes, self.success_modify_bytes, self.total_download_bytes,
self.success_download_bytes, self.total_sync_in_bytes, self.success_sync_in_bytes, self.total_sync_out_bytes,
self.success_sync_out_bytes, self.total_file_open_count, self.success_file_open_count,
self.total_file_read_count, self.success_file_read_count, self.total_file_write_count,
self.success_file_write_count, last_source_sync, last_sync_update, last_synced_time, last_heartbeat_time,
self.if_trunk_server,) = struct.unpack(self.fmt, bytes_stream)
try:
self.ip_addr = ip_addr.strip(b'\x00')
self.domain_name = domain_name.strip(b'\x00')
self.version = version.strip(b'\x00')
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError as e:
raise ResponseError('[-] Error: disk space overrun, can not represented it.')
self.join_time = datetime.fromtimestamp(join_time).isoformat()
self.up_time = datetime.fromtimestamp(up_time).isoformat()
self.last_source_sync = datetime.fromtimestamp(last_source_sync).isoformat()
self.last_sync_update = datetime.fromtimestamp(last_sync_update).isoformat()
self.last_synced_time = datetime.fromtimestamp(last_synced_time).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(last_heartbeat_time).isoformat()
return True
def __str__(self):
'''Transform to readable string.'''
s = 'Storage information:\n'
s += '\tip_addr = %s (%s)\n' % (self.ip_addr, parse_storage_status(self.status))
s += '\thttp domain = %s\n' % self.domain_name
s += '\tversion = %s\n' % self.version
s += '\tjoin time = %s\n' % self.join_time
s += '\tup time = %s\n' % self.up_time
s += '\ttotal storage = %s\n' % self.totalMB
s += '\tfree storage = %s\n' % self.freeMB
s += '\tupload priority = %d\n' % self.upload_prio
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.storage_http_port
s += '\tcurrent write path = %d\n' % self.curr_write_path
s += '\tsource ip_addr = %s\n' % self.ip_addr
s += '\tif_trunk_server = %d\n' % self.if_trunk_server
s += '\ttotal upload count = %ld\n' % self.total_upload_count
s += '\tsuccess upload count = %ld\n' % self.success_upload_count
s += '\ttotal download count = %ld\n' % self.total_download_count
s += '\tsuccess download count = %ld\n' % self.success_download_count
s += '\ttotal append count = %ld\n' % self.total_append_count
s += '\tsuccess append count = %ld\n' % self.success_append_count
s += '\ttotal modify count = %ld\n' % self.total_modify_count
s += '\tsuccess modify count = %ld\n' % self.success_modify_count
s += '\ttotal truncate count = %ld\n' % self.total_truncate_count
s += '\tsuccess truncate count = %ld\n' % self.success_truncate_count
s += '\ttotal delete count = %ld\n' % self.total_del_count
s += '\tsuccess delete count = %ld\n' % self.success_del_count
s += '\ttotal set_meta count = %ld\n' % self.total_setmeta_count
s += '\tsuccess set_meta count = %ld\n' % self.success_setmeta_count
s += '\ttotal get_meta count = %ld\n' % self.total_getmeta_count
s += '\tsuccess get_meta count = %ld\n' % self.success_getmeta_count
s += '\ttotal create link count = %ld\n' % self.total_create_link_count
s += '\tsuccess create link count = %ld\n' % self.success_create_link_count
s += '\ttotal delete link count = %ld\n' % self.total_del_link_count
s += '\tsuccess delete link count = %ld\n' % self.success_del_link_count
s += '\ttotal upload bytes = %ld\n' % self.total_upload_bytes
s += '\tsuccess upload bytes = %ld\n' % self.success_upload_bytes
s += '\ttotal download bytes = %ld\n' % self.total_download_bytes
s += '\tsuccess download bytes = %ld\n' % self.success_download_bytes
s += '\ttotal append bytes = %ld\n' % self.total_append_bytes
s += '\tsuccess append bytes = %ld\n' % self.success_append_bytes
s += '\ttotal modify bytes = %ld\n' % self.total_modify_bytes
s += '\tsuccess modify bytes = %ld\n' % self.success_modify_bytes
s += '\ttotal sync_in bytes = %ld\n' % self.total_sync_in_bytes
s += '\tsuccess sync_in bytes = %ld\n' % self.success_sync_in_bytes
s += '\ttotal sync_out bytes = %ld\n' % self.total_sync_out_bytes
s += '\tsuccess sync_out bytes = %ld\n' % self.success_sync_out_bytes
s += '\ttotal file open count = %ld\n' % self.total_file_open_count
s += '\tsuccess file open count = %ld\n' % self.success_file_open_count
s += '\ttotal file read count = %ld\n' % self.total_file_read_count
s += '\tsuccess file read count = %ld\n' % self.success_file_read_count
s += '\ttotal file write count = %ld\n' % self.total_file_write_count
s += '\tsucess file write count = %ld\n' % self.success_file_write_count
s += '\tlast heartbeat time = %s\n' % self.last_heartbeat_time
s += '\tlast source update = %s\n' % self.last_source_sync
s += '\tlast sync update = %s\n' % self.last_sync_update
s += '\tlast synced time = %s\n' % self.last_synced_time
return s
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Group_info(object):
def __init__(self):
self.group_name = ''
self.totalMB = ''
self.freeMB = ''
self.trunk_freeMB = ''
self.count = 0
self.storage_port = 0
self.store_http_port = 0
self.active_count = 0
self.curr_write_server = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_trunk_file_id = 0
self.fmt = '!%ds 11Q' % (FDFS_GROUP_NAME_MAX_LEN + 1)
return None
def __str__(self):
s = 'Group information:\n'
s += '\tgroup name = %s\n' % self.group_name
s += '\ttotal disk space = %s\n' % self.totalMB
s += '\tdisk free space = %s\n' % self.freeMB
s += '\ttrunk free space = %s\n' % self.trunk_freeMB
s += '\tstorage server count = %d\n' % self.count
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.store_http_port
s += '\tactive server count = %d\n' % self.active_count
s += '\tcurrent write server index = %d\n' % self.curr_write_server
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tcurrent trunk file id = %d\n' % self.curr_trunk_file_id
return s
def set_info(self, bytes_stream):
(group_name, totalMB, freeMB, trunk_freeMB, self.count, self.storage_port, self.store_http_port,
self.active_count, self.curr_write_server, self.store_path_count, self.subdir_count_per_path,
self.curr_trunk_file_id) = struct.unpack(self.fmt, bytes_stream)
try:
self.group_name = group_name.strip(b'\x00')
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.trunk_freeMB = appromix(trunk_freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError:
raise DataError('[-] Error disk space overrun, can not represented it.')
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Tracker_client(object):
'''Class Tracker client.'''
def __init__(self, pool):
self.pool = pool
def tracker_list_servers(self, group_name, storage_ip=None):
'''
List servers in a storage group
'''
conn = self.pool.get_connection()
th = Tracker_header()
ip_len = len(storage_ip) if storage_ip else 0
if ip_len >= IP_ADDRESS_SIZE:
ip_len = IP_ADDRESS_SIZE - 1
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + ip_len
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_STORAGE
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
store_ip_addr = storage_ip or ''
storage_ip_fmt = '!%ds' % ip_len
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name) + struct.pack(storage_ip_fmt, store_ip_addr)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
si = Storage_info()
si_fmt_size = si.get_fmt_size()
recv_size = len(recv_buffer)
if recv_size % si_fmt_size != 0:
errinfo = '[-] Error: response size not match, expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errinfo)
except ConnectionError:
raise
finally:
self.pool.release(conn)
num_storage = recv_size / si_fmt_size
si_list = []
i = 0
while num_storage:
si.set_info(recv_buffer[(i * si_fmt_size): ((i + 1) * si_fmt_size)])
si_list.append(si)
si = Storage_info()
num_storage -= 1
i += 1
ret_dict = {}
ret_dict['Group name'] = group_name
ret_dict['Servers'] = si_list
return ret_dict
def tracker_list_one_group(self, group_name):
conn = self.pool.get_connection()
th = Tracker_header()
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP
# group_fmt: |-group_name(16)-|
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
group_info = Group_info()
group_info.set_info(recv_buffer)
except ConnectionError:
raise
finally:
self.pool.release(conn)
return group_info
def tracker_list_all_groups(self):
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
except:
raise
finally:
self.pool.release(conn)
gi = Group_info()
gi_fmt_size = gi.get_fmt_size()
if recv_size % gi_fmt_size != 0:
errmsg = '[-] Error: Response size is mismatch, except: %d, actul: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
num_groups = recv_size / gi_fmt_size
ret_dict = {}
ret_dict['Groups count'] = num_groups
gi_list = []
i = 0
while num_groups:
gi.set_info(recv_buffer[i * gi_fmt_size: (i + 1) * gi_fmt_size])
gi_list.append(gi)
gi = Group_info()
i += 1
num_groups -= 1
ret_dict['Groups'] = gi_list
return ret_dict
def tracker_query_storage_stor_without_group(self):
'''Query storage server for upload, without group name.
Return: Storage_server object'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def tracker_query_storage_stor_with_group(self, group_name):
'''Query storage server for upload, based group name.
arguments:
@group_name: string
@Return Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.send_header(conn)
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
send_buffer = struct.pack(group_fmt, group_name)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)-|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def _tracker_do_query_storage(self, group_name, filename, cmd):
'''
core of query storage, based group name and filename.
It is useful download, delete and set meta.
arguments:
@group_name: string
@filename: string. remote file_id
@Return: Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
file_name_len = len(filename)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
th.cmd = cmd
th.send_header(conn)
# query_fmt: |-group_name(16)-filename(file_name_len)-|
query_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
send_buffer = struct.pack(query_fmt, group_name, filename)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_FETCH_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ip_addr(16)-port(8)-|
recv_fmt = '!%ds %ds Q' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ipaddr, store_serv.port) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ipaddr.strip(b'\x00')
return store_serv
def tracker_query_storage_update(self, group_name, filename):
'''
Query storage server to update(delete and set_meta).
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE)
def tracker_query_storage_fetch(self, group_name, filename):
'''
Query storage server to download.
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE)
#!/usr/bin/env python
# -*- coding = utf-8 -*-
# filename: utils.py
import io
import os
import sys
import stat
import platform
import configparser
SUFFIX = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
__os_sep__ = "/" if platform.system() == 'Windows' else os.sep
def appromix(size, base=0):
'''Conver bytes stream size to human-readable format.
Keyword arguments:
size: int, bytes stream size
base: int, suffix index
Return: string
'''
multiples = 1024
if size < 0:
raise ValueError('[-] Error: number must be non-negative.')
if size < multiples:
return '{0:d}{1}'.format(size, SUFFIX[base])
for suffix in SUFFIX[base:]:
if size < multiples:
return '{0:.2f}{1}'.format(size, suffix)
size = size / float(multiples)
raise ValueError('[-] Error: number too big.')
def get_file_ext_name(filename, double_ext=True):
li = filename.split(os.extsep)
if len(li) <= 1:
return ''
else:
if li[-1].find(__os_sep__) != -1:
return ''
if double_ext:
if len(li) > 2:
if li[-2].find(__os_sep__) == -1:
return '%s.%s' % (li[-2], li[-1])
return li[-1]
class Fdfs_ConfigParser(configparser.RawConfigParser):
"""
Extends ConfigParser to allow files without sections.
This is done by wrapping read files and prepending them with a placeholder
section, which defaults to '__config__'
"""
def __init__(self, default_section=None, *args, **kwargs):
configparser.RawConfigParser.__init__(self, *args, **kwargs)
self._default_section = None
self.set_default_section(default_section or '__config__')
def get_default_section(self):
return self._default_section
def set_default_section(self, section):
self.add_section(section)
# move all values from the previous default section to the new one
try:
default_section_items = self.items(self._default_section)
self.remove_section(self._default_section)
except configparser.NoSectionError:
pass
else:
for (key, value) in default_section_items:
self.set(section, key, value)
self._default_section = section
def read(self, filenames):
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename) as fp:
self.readfp(fp)
except IOError:
continue
else:
read_ok.append(filename)
return read_ok
def readfp(self, fp, *args, **kwargs):
stream = io.StringIO()
try:
stream.name = fp.name
except AttributeError:
pass
stream.write('[' + self._default_section + ']\n')
stream.write(fp.read())
stream.seek(0, 0)
return self._read(stream, stream.name)
def write(self, fp):
# Write the items from the default section manually and then remove them
# from the data. They'll be re-added later.
try:
default_section_items = self.items(self._default_section)
self.remove_section(self._default_section)
for (key, value) in default_section_items:
fp.write("{0} = {1}\n".format(key, value))
fp.write("\n")
except configparser.NoSectionError:
pass
configparser.RawConfigParser.write(self, fp)
self.add_section(self._default_section)
for (key, value) in default_section_items:
self.set(self._default_section, key, value)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
if optname in cursect:
if not isinstance(cursect[optname], list):
cursect[optname] = [cursect[optname]]
cursect[optname].append(optval)
else:
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
def split_remote_fileid(remote_file_id):
'''
Splite remote_file_id to (group_name, remote_file_name)
arguments:
@remote_file_id: string
@return tuple, (group_name, remote_file_name)
'''
index = remote_file_id.find(b'/')
if -1 == index:
return None
return (remote_file_id[0:index], remote_file_id[(index + 1):])
def fdfs_check_file(filename):
ret = True
errmsg = ''
if not os.path.isfile(filename):
ret = False
errmsg = '[-] Error: %s is not a file.' % filename
elif not stat.S_ISREG(os.stat(filename).st_mode):
ret = False
errmsg = '[-] Error: %s is not a regular file.' % filename
return (ret, errmsg)
if __name__ == '__main__':
print(get_file_ext_name('/bc.tar.gz'))
"""
新三板股票代码 东方财富网
"""
# 东方财富网沪深A股 股票代码采集
# http://quote.eastmoney.com/center/gridlist.html?st=ChangePercent&sortType=C&sortRule=-1#hs_a_board
import json
import random
import time
import pymysql
import requests
from base.BaseCore import BaseCore
class Gpdm(object):
def __int__(self):
pass
baseCore = BaseCore()
log = baseCore.getLogger()
headers={
'X-AUTH-TOKEN':'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxMzY4MzgxNjk4NCIsImlhdCI6MTY5MDE3ODYyOCwiZXhwIjoxNjkyNzcwNjI4fQ.VV3Zoa4RM5nVN8UXBc0-81KMGqLzTOme6rButeETGfFQi7p5h4ydg8CFrEsizr_iFwB3_BVaKR2o2xR-M4ipbQ',
'X-TYCID':'77e997401d5f11ee9e91d5a0fd3c0b83',
'version':'TYC-Web',
'Content-Type':'application/json;charset=UTF-8'
}
cnx = pymysql.connect(host='114.115.159.144', user='caiji', password='zzsn9988', db='caiji',charset='utf8mb4')
cursor= cnx.cursor()
taskType = '股票代码/东方财富网/新三板'
def getTotal(self,pageSize,start):
total=0
for num in range(3):
try:
#'http://22.push2.eastmoney.com/api/qt/clist/get?cb=jQuery112408864553991444548_1694403822615&pn=1&pz={pageSize}&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&wbp2u=|0|0|0|web&fid=f3&fs=m:0+t:81+s:!2052&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152,f111&_={self.baseCore.getNowTime(3)}'
url = f"http://22.push2.eastmoney.com/api/qt/clist/get?&pn=1&pz={pageSize}&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&wbp2u=|0|0|0|web&fid=f3&fs=m:0+t:81+s:!2052&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152,f111&_={self.baseCore.getNowTime(3)}"
# url = f"http://17.push2.eastmoney.com/api/qt/clist/get?cb=jQuery1124020359136113854692_1688967721474&pn=1&pz={pageSize}&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&wbp2u=|0|0|0|web&fid=f3&fs=m:0+t:6,m:0+t:80,m:1+t:2,m:1+t:23,m:0+t:81+s:2048&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152&_={self.baseCore.getNowTime(3)}"
ip = self.baseCore.get_proxy()
self.headers['User-Agent'] = self.baseCore.getRandomUserAgent()
response = requests.get(url, headers=self.headers, verify=False, proxies=ip)
time.sleep(random.randint(3, 5))
# jQuery1124020359136113854692_1688967721474({"rc":0,"rt":6,"svr":182993358,"lt":1,"full":1,"dlmkts":"","data":{"total":5488,"diff":[{"f1":2,"f2":35.37,"f3":130.87,"f4":20.05,"f5":505082,"f6":1561753667.0,"f7":72.85,"f8":73.63,"f9":79.87,"f10":"-","f11":-0.34,"f12":"603119","f13":1,"f14":"N\xe6\xb5\x99\xe8\x8d\xa3","f15":37.54,"f16":26.38,"f17":28.88,"f18":15.32,"f20":9903600000,"f21":2426214099,"f22":-0.03,"f23":6.46,"f24":130.87,"f25":130.87,"f62":503279629.0,"f115":70.77,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":70.7,"f3":26.98,"f4":15.02,"f5":278191,"f6":2015432017.69,"f7":19.83,"f8":73.92,"f9":44.38,"f10":"-","f11":0.41,"f12":"301371","f13":0,"f14":"N\xe6\x95\xb7\xe5\xb0\x94\xe4\xbd\xb3","f15":80.04,"f16":69.0,"f17":80.0,"f18":55.68,"f20":28285656000,"f21":2660599297,"f22":0.11,"f23":5.64,"f24":26.98,"f25":26.98,"f62":476657031.0,"f115":33.47,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":27.6,"f3":20.0,"f4":4.6,"f5":135775,"f6":348360366.27,"f7":21.04,"f8":33.94,"f9":212.8,"f10":3.1,"f11":0.0,"f12":"301316","f13":0,"f14":"\xe6\x85\xa7\xe5\x8d\x9a\xe4\xba\x91\xe9\x80\x9a","f15":27.6,"f16":22.76,"f17":23.11,"f18":23.0,"f20":11040276000,"f21":1104274261,"f22":0.0,"f23":11.68,"f24":18.1,"f25":44.43,"f62":107348086.0,"f115":124.43,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":43.62,"f3":20.0,"f4":7.27,"f5":75204,"f6":311935188.44,"f7":21.79,"f8":29.67,"f9":56.11,"f10":13.27,"f11":0.0,"f12":"301289","f13":0,"f14":"\xe5\x9b\xbd\xe7\xbc\x86\xe6\xa3\x80\xe6\xb5\x8b","f15":43.62,"f16":35.7,"f17":36.61,"f18":36.35,"f20":3402360000,"f21":1105762682,"f22":0.0,"f23":3.86,"f24":28.26,"f25":35.55,"f62":80534335.0,"f115":47.25,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":40.98,"f3":20.0,"f4":6.83,"f5":118733,"f6":464542197.42,"f7":20.73,"f8":40.73,"f9":56.02,"f10":2.57,"f11":0.0,"f12":"300881","f13":0,"f14":"\xe7\x9b\x9b\xe5\xbe\xb7\xe9\x91\xab\xe6\xb3\xb0","f15":40.98,"f16":33.9,"f17":33.9,"f18":34.15,"f20":4507800000,"f21":1194567000,"f22":0.0,"f23":5.48,"f24":23.81,"f25":42.05,"f62":16802132.0,"f115":56.01,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":21.0,"f3":19.45,"f4":3.42,"f5":50301,"f6":97244231.42,"f7":16.1,"f8":16.87,"f9":46.64,"f10":1.95,"f11":1.35,"f12":"873576","f13":0,"f14":"\xe5\xa4\xa9\xe5\x8a\x9b\xe5\xa4\x8d\xe5\x90\x88","f15":21.0,"f16":18.17,"f17":18.18,"f18":17.58,"f20":2247000000,"f21":626162250,"f22":0.72,"f23":5.16,"f24":50.21,"f25":50.21,"f62":11286257.0,"f115":29.96,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":76.8,"f3":16.21,"f4":10.71,"f5":153518,"f6":1100431330.98,"f7":23.24,"f8":73.58,"f9":190.79,"f10":1.6,"f11":0.27,"f12":"301315","f13":0,"f14":"\xe5\xa8\x81\xe5\xa3\xab\xe9\xa1\xbf","f15":79.31,"f16":63.95,"f17":63.95,"f18":66.09,"f20":6758400000,"f21":1602347750,"f22":0.17,"f23":7.03,"f24":137.84,"f25":137.84,"f62":112419255.0,"f115":102.68,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":72.99,"f3":16.17,"f4":10.16,"f5":106236,"f6":714127513.24,"f7":23.68,"f8":52.41,"f9":123.41,"f10":1.71,"f11":0.4,"f12":"301141","f13":0,"f14":"\xe4\xb8\xad\xe7\xa7\x91\xe7\xa3\x81\xe4\xb8\x9a","f15":74.88,"f16":60.0,"f17":62.85,"f18":62.83,"f20":6466528467,"f21":1479619267,"f22":0.07,"f23":3.14,"f24":96.74,"f25":78.02,"f62":-26422445.0,"f115":87.31,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":27.3,"f3":12.81,"f4":3.1,"f5":171865,"f6":442577004.48,"f7":15.25,"f8":7.3,"f9":-156.2,"f10":0.94,"f11":-0.15,"f12":"300551","f13":0,"f14":"\xe5\x8f\xa4\xe9\xb3\x8c\xe7\xa7\x91\xe6\x8a\x80","f15":27.55,"f16":23.86,"f17":24.2,"f18":24.2,"f20":9439055235,"f21":6427896275,"f22":-0.11,"f23":8.93,"f24":48.37,"f25":133.73,"f62":16013778.0,"f115":-126.12,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":84.3,"f3":12.18,"f4":9.15,"f5":124022,"f6":989104033.4,"f7":17.33,"f8":64.35,"f9":99.53,"f10":1.15,"f11":0.19,"f12":"301398","f13":0,"f14":"\xe6\x98\x9f\xe6\xba\x90\xe5\x8d\x93\xe9\x95\x81","f15":86.5,"f16":73.48,"f17":75.48,"f18":75.15,"f20":6744000000,"f21":1624735481,"f22":-0.04,"f23":6.81,"f24":157.88,"f25":173.35,"f62":-26812467.0,"f115":105.29,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":34.85,"f3":10.95,"f4":3.44,"f5":27626,"f6":95746251.0,"f7":9.87,"f8":7.18,"f9":-37.27,"f10":9.74,"f11":-0.03,"f12":"688622","f13":1,"f14":"\xe7\xa6\xbe\xe4\xbf\xa1\xe4\xbb\xaa\xe5\x99\xa8","f15":36.0,"f16":32.9,"f17":35.0,"f18":31.41,"f20":2439416569,"f21":1341637317,"f22":-0.03,"f23":4.74,"f24":-5.76,"f25":7.23,"f62":18152096.0,"f115":-36.22,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":87.8,"f3":10.66,"f4":8.46,"f5":22037,"f6":184811228.0,"f7":11.33,"f8":6.52,"f9":116.36,"f10":4.84,"f11":1.09,"f12":"688776","f13":1,"f14":"\xe5\x9b\xbd\xe5\x85\x89\xe7\x94\xb5\xe6\xb0\x94","f15":87.99,"f16":79.0,"f17":79.0,"f18":79.34,"f20":9516064188,"f21":2968587801,"f22":-0.22,"f23":5.39,"f24":-5.88,"f25":-29.79,"f62":2907315.0,"f115":65.69,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":2.05,"f3":10.22,"f4":0.19,"f5":3258788,"f6":657251653.18,"f7":9.68,"f8":6.48,"f9":-12.82,"f10":3.95,"f11":0.0,"f12":"000413","f13":0,"f14":"\xe4\xb8\x9c\xe6\x97\xad\xe5\x85\x89\xe7\x94\xb5","f15":2.05,"f16":1.87,"f17":1.87,"f18":1.86,"f20":11547137393,"f21":10310048690,"f22":0.0,"f23":0.52,"f24":17.82,"f25":15.82,"f62":213263692.0,"f115":-8.55,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":2.7,"f3":10.2,"f4":0.25,"f5":1107878,"f6":291343381.08,"f7":11.84,"f8":7.94,"f9":-19.65,"f10":2.01,"f11":0.0,"f12":"002256","f13":0,"f14":"\xe5\x85\x86\xe6\x96\xb0\xe8\x82\xa1\xe4\xbb\xbd","f15":2.7,"f16":2.41,"f17":2.44,"f18":2.45,"f20":5082512054,"f21":3769280384,"f22":0.0,"f23":4.31,"f24":11.11,"f25":12.97,"f62":96164236.0,"f115":-99.3,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":2.92,"f3":10.19,"f4":0.27,"f5":1178068,"f6":333498626.0,"f7":9.06,"f8":7.34,"f9":7.63,"f10":1.4,"f11":0.0,"f12":"600239","f13":1,"f14":"\xe4\xba\x91\xe5\x8d\x97\xe5\x9f\x8e\xe6\x8a\x95","f15":2.92,"f16":2.68,"f17":2.69,"f18":2.65,"f20":4688605774,"f21":4688605774,"f22":0.0,"f23":2.89,"f24":28.07,"f25":51.3,"f62":27795948.0,"f115":-16.59,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":3.15,"f3":10.14,"f4":0.29,"f5":2973491,"f6":920586623.66,"f7":8.74,"f8":28.9,"f9":-7.07,"f10":4.18,"f11":0.0,"f12":"002630","f13":0,"f14":"\xe5\x8d\x8e\xe8\xa5\xbf\xe8\x83\xbd\xe6\xba\x90","f15":3.15,"f16":2.9,"f17":2.95,"f18":2.86,"f20":3719520000,"f21":3240482440,"f22":0.0,"f23":4.9,"f24":26.51,"f25":7.14,"f62":-18293260.0,"f115":-5.07,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":4.79,"f3":10.11,"f4":0.44,"f5":1857359,"f6":864538200.0,"f7":10.8,"f8":9.31,"f9":24.64,"f10":9.05,"f11":0.0,"f12":"600577","f13":1,"f14":"\xe7\xb2\xbe\xe8\xbe\xbe\xe8\x82\xa1\xe4\xbb\xbd","f15":4.79,"f16":4.32,"f17":4.35,"f18":4.35,"f20":9959122877,"f21":9559956211,"f22":0.0,"f23":2.07,"f24":14.05,"f25":16.26,"f62":161845983.0,"f115":26.21,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":4.36,"f3":10.1,"f4":0.4,"f5":617159,"f6":264661451.0,"f7":11.62,"f8":2.74,"f9":122.48,"f10":3.79,"f11":0.0,"f12":"601777","f13":1,"f14":"\xe5\x8a\x9b\xe5\xb8\x86\xe7\xa7\x91\xe6\x8a\x80","f15":4.36,"f16":3.9,"f17":3.95,"f18":3.96,"f20":19931840280,"f21":9811962000,"f22":0.0,"f23":1.94,"f24":24.22,"f25":12.95,"f62":41966291.0,"f115":137.9,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":3.27,"f3":10.1,"f4":0.3,"f5":290547,"f6":93712867.6,"f7":8.08,"f8":2.28,"f9":1394.52,"f10":1.03,"f11":0.0,"f12":"002175","f13":0,"f14":"\xe4\xb8\x9c\xe6\x96\xb9\xe6\x99\xba\xe9\x80\xa0","f15":3.27,"f16":3.03,"f17":3.04,"f18":2.97,"f20":4175072977,"f21":4175040277,"f22":0.0,"f23":8.53,"f24":13.54,"f25":-8.66,"f62":52561839.0,"f115":41.98,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2},{"f1":2,"f2":2.51,"f3":10.09,"f4":0.23,"f5":1715205,"f6":423246793.0,"f7":10.96,"f8":5.97,"f9":-4.84,"f10":2.8,"f11":0.0,"f12":"600569","f13":1,"f14":"\xe5\xae\x89\xe9\x98\xb3\xe9\x92\xa2\xe9\x93\x81","f15":2.51,"f16":2.26,"f17":2.26,"f18":2.28,"f20":7209777679,"f21":7209777679,"f22":0.0,"f23":1.02,"f24":17.84,"f25":21.26,"f62":88473646.0,"f115":-2.55,"f128":"-","f140":"-","f141":"-","f136":"-","f152":2}]}});
content = response.content.decode('utf-8')
# content = self.baseCore.getSubStr(content, '{', '}')
retJson = json.loads(content)
total = retJson['data']['total']
response.close()
return total
except Exception as e:
self.log.info(f"------第{num}次出错---{e}")
continue
exception = '链接失败'
state = 0
takeTime = self.baseCore.getTimeCost(start, time.time())
self.baseCore.recordLog('', self.taskType, state, takeTime, 'http://quote.eastmoney.com/center/gridlist.html#neeq_stocks', exception)
return total
def getPageDta(self,pageIndex,pageSize,totalPage,gpdmList):
gpdmListPage = []
for num in range(3):
try:
start = time.time()
self.log.info(f"【{pageIndex}/{totalPage}】-----------begin")
url = f"http://22.push2.eastmoney.com/api/qt/clist/get?&pn={pageIndex}&pz={pageSize}&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&wbp2u=|0|0|0|web&fid=f3&fs=m:0+t:81+s:!2052&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f26,f22,f33,f11,f62,f128,f136,f115,f152,f111&_={self.baseCore.getNowTime(3)}"
ip = self.baseCore.get_proxy()
self.headers['User-Agent'] = self.baseCore.getRandomUserAgent()
response = requests.get(url, headers=self.headers, verify=False, proxies=ip)
time.sleep(random.randint(3, 5))
content = response.content.decode('utf-8')
# content = self.baseCore.getSubStr(content, '{', '}')
retJson = json.loads(content)
dataList= retJson['data']['diff']
for dataIndex in range(len(dataList)):
gpdm=dataList[dataIndex]['f12']
name=dataList[dataIndex]['f14']
gpdmListPage.append(gpdm)
selectSql = f"select count(1) from gpdm where gpdm='{gpdm}' and name='{name}' "
self.cursor.execute(selectSql)
count = self.cursor.fetchone()[0]
if count>0:
#更新type为3
updateSql = f"update gpdm set type=3 where gpdm='{gpdm}' and name='{name}' "
self.cursor.execute(updateSql)
self.cnx.commit()
self.log.info(f"{gpdm}-------{name}---已经存在")
continue
else:
self.log.info(f"{gpdm}-------{name}---新增")
insertSql= f"insert into gpdm(gpdm,name,state,type,create_date) values ('{gpdm}','{name}',1,3,now())"
self.cursor.execute(insertSql)
self.cnx.commit()
response.close()
self.log.info(f"【{pageIndex}/{totalPage}】-----------end,耗时{self.baseCore.getTimeCost(start, time.time())}")
# break
return gpdmListPage
except Exception as e:
self.log.info(f"------第{num}次出错---{e}")
continue
exception = f'第{pageIndex}页链接失败'
state = 0
takeTime = self.baseCore.getTimeCost(start, time.time())
self.baseCore.recordLog('', self.taskType, state, takeTime, '', exception)
return gpdmListPage
def doJob(self):
pageSize=20
start_time = time.time()
total=self.getTotal(pageSize,start_time)
gpdmList = []
if total==0:
exception = '股票代码总数为零'
state = 0
takeTime = self.baseCore.getTimeCost(start_time, time.time())
self.baseCore.recordLog('', self.taskType, state, takeTime, 'http://quote.eastmoney.com/center/gridlist.html?st=ChangePercent&sortType=C&sortRule=-1#hs_a_board', exception)
self.log.info(f"股票代码总数-----------{total},请检查")
return
self.log.info(f"股票代码总数-----------{total}")
if (total % pageSize == 0):
totalPage = total // pageSize
else:
totalPage = total // pageSize + 1
#测试:
# totalPage = 2
for pageIndex in range(1, totalPage + 1):
gpdmListPage = self.getPageDta(pageIndex,pageSize,totalPage,gpdmList)
if gpdmListPage != []:
pass
else:
continue
[gpdmList.append(gpdm) for gpdm in gpdmListPage]
# print(len(gpdmList))
state = 1
takeTime = self.baseCore.getTimeCost(start_time, time.time())
self.baseCore.recordLog('', self.taskType, state, takeTime, '', '')
# 释放资源
self.cursor.close()
self.cnx.close()
self.baseCore.close()
return gpdmList
if __name__ == '__main__':
Gpdm().doJob()
"""
企业上市信息:只有上市的企业才能如企业库,未上市企业跳过采集步骤。退市企业标注为0
"""
import json
import time
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import urllib3
from BaseCore import BaseCore
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# from gpdm import Gpdm
baseCore = BaseCore()
# chromedriver = r"E:\kkwork\zzsn_spider\comData\ipoInfo\chromedriver.exe"
# browser = webdriver.Chrome(chromedriver)
taskType = '上市信息/东方财富网/新三板'
# gpdm = Gpdm()
# gpdmList = gpdm.doJob()
log = baseCore.getLogger()
error_list = []
list_all_info = []
# 需要提供股票代码、企业信用代码
flag = 0
while True:
#从表中读取企业
com_code = baseCore.redicPullData('NQEnterprise:nq_Ipo')
# com_code = baseCore.redicPullData('NQEnterprise:nq_Ipo_test')
if com_code is None:
if flag==0:
time.sleep(20)
log.info('已没有数据----------等待')
continue
elif flag==1:
# 通过接口将数据保存进数据库
for num in range(0, len(list_all_info), 100):
json_updata = json.dumps(list_all_info[num:num + 100])
# print(json_updata)
try:
response = requests.post('http://114.115.236.206:8088/sync/enterpriseIpo', data=json_updata,
timeout=300,
verify=False)
except Exception as e:
print(e)
print("{}:到:{}".format(num, num + 100))
print(response.text)
log.info('-----------数据发送接口完毕----------')
flag = 0
continue
#从数据库中查询到其他信息
log.info(f'========正在采集{com_code}===========')
data = baseCore.getInfomation(com_code)
social_code = data[1]
short_name = data[3]
start = time.time()
log.info(f'======开始采集{com_code}======')
url = f'https://xinsanban.eastmoney.com/F10/CompanyInfo/Introduction/{com_code}.html'
headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Cookie': 'qgqp_b_id=28edcf226f056ee077983f40f115eacf; st_si=15067486119520; emshistory=%5B%22%E4%BA%A7%E4%B8%9A%E9%93%BE%22%2C%22sz007sz%22%5D; websitepoptg_show_time=1694403032729; HAList=ty-0-002342-%u5DE8%u529B%u7D22%u5177%2Cty-0-301192-%u6CF0%u7965%u80A1%u4EFD%2Cty-1-688382-%u76CA%u65B9%u751F%u7269-U%2Cty-1-600895-%u5F20%u6C5F%u9AD8%u79D1%2Cty-1-600669-*ST%u978D%u6210%2Cty-116-00691-%u5C71%u6C34%u6C34%u6CE5%2Cty-0-300865-%u5927%u5B8F%u7ACB%2Cty-0-000656-%u91D1%u79D1%u80A1%u4EFD%2Cty-1-600257-%u5927%u6E56%u80A1%u4EFD%2Cty-1-688981-%u4E2D%u82AF%u56FD%u9645; xsb_history=833658%7C%u94C1%u8840%u79D1%u6280%2C838616%7C%u5317%u9CD0%u98DF%u54C1; st_asi=delete; st_pvi=44810095342512; st_sp=2023-07-18%2013%3A55%3A09; st_inirUrl=https%3A%2F%2Fwww.baidu.com%2Flink; st_sn=337; st_psi=20230914142347564-119112305908-4534169252',
'Host': 'xinsanban.eastmoney.com',
'Pragma': 'no-cache',
'Referer': 'https://xinsanban.eastmoney.com/F10/CompanyInfo/Introduction/833658.html',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"'
}
req = requests.get(url=url,headers=headers)
reslut = BeautifulSoup(req.content,'html.parser')
# print(reslut)
li_list = reslut.find('div',id='company_info').find('ul',class_='company-page-left').find_all('li')
security = reslut.find('div',id='security_info').find('ul',class_='company-page-right').find_all('li')
listingDate = security[1].find('span',class_='company-page-item-right').text
businessScope = li_list[7].find('span',class_='company-page-item-right').text
industry = li_list[8].find('span',class_='company-page-item-right').text
secutities_type = '新三板'
category = '3'
exchange = '1'
dic_cwsj = {
"exchange": exchange,
"category": category, # 股票类型(1-A股;2-B股;3-新三板;4-H股)
'listed': '1',
"listingDate": listingDate,
"securitiesCode": com_code,
"securitiesShortName": short_name,
"securitiesType": secutities_type,
"socialCreditCode": social_code,
"businessScope": businessScope,
"eastIndustry": industry,
"csrcIndustry": ''
}
# print(dic_cwsj)
list_all_info.append(dic_cwsj)
log.info(f'======{com_code}====采集成功=====')
flag = 1
"""
雪球网财务数据 根据接口
"""
import json
import time
import redis
import requests
from bs4 import BeautifulSoup
import datetime
from selenium import webdriver
from BaseCore import BaseCore
baseCore = BaseCore()
log = baseCore.getLogger()
def getFormatedate(timestamp):
date = datetime.datetime.fromtimestamp(timestamp)
formatted_date = date.strftime('%Y-%m-%d')
return formatted_date
def check_code(com_code):
r = redis.Redis(host="114.115.236.206",port=6379,password='clbzzsn',db=3)
res = r.exists('com_xqcwsj_code::'+com_code)
if res:
return False
else:
return True
def check_date(com_code,info_date):
r = redis.Redis(host="114.115.236.206", port=6379, password='clbzzsn', db=3)
res = r.sismember('com_xqcwsj_code::'+com_code, info_date) # 注意是 保存set的方式
if res:
return True
else:
return False
# 将采集后的股票代码对应的报告期保存进redis
def add_date(com_code,report_date):
r = redis.Redis(host="114.115.236.206", port=6379, password='clbzzsn',db=3)
res = r.sadd('com_xqcwsj_code::'+com_code,report_date)
def getrequests(url):
req = requests.get(url=url, headers=headers)
data = req.json()
a_infoData = data['data']['list']
return a_infoData
def getdriver(url_name):
# 使用模拟浏览器打开
browser.get(url_name)
time.sleep(3)
page_source = browser.page_source
soup = BeautifulSoup(page_source, 'html.parser')
return soup
def getdetail(reportInfodata,name_map,listinfo,url_name):
# # 报告期
# report_date = reportInfodata['report_date']
# report_date = getFormatedate(int(report_date / 1000))
#模拟浏览器请求
soup = getdriver(url_name)
time.sleep(2)
# 利润表
table = soup.find('div', class_='tab-table-responsive')
list_tr = table.find_all('tr')
for tr in list_tr[1:]:
info_name = tr.find('td', colspan='2').text
# 营业总收入
try:
info_enname = name_map[info_name]
info_data = reportInfodata[info_enname][0]
if info_data is None:
info_data = '--'
except:
info_enname = '--'
info_data = '--'
dic_info = {
"name": info_name,
'enName': info_enname,
"value": info_data
}
listinfo.append(dic_info)
return listinfo
def getinfo(info_date,com_code,social_code):
for nnn in range(0, 3):
try:
panduan = check_date(com_code, info_date)
except:
time.sleep(1)
if panduan:
log.info(f'{info_date}----已采集过')
return
else:
pass
for nnn in range(0, 3):
try:
ynFirst = check_code(com_code)
break
except:
time.sleep(1)
#'https://stock.xueqiu.com/v5/stock/finance/cn/balance.json?symbol=NQ873286&type=all&is_detail=true&count=5&timestamp=1694508688637'
url_lrb = f'https://stock.xueqiu.com/v5/stock/finance/cn/income.json?symbol={com_code}&type=all&is_detail=true&count=5&timestamp=1694414063178'
url_zcfzb = f'https://stock.xueqiu.com/v5/stock/finance/cn/balance.json?symbol={com_code}&type=all&is_detail=true&count=5&timestamp=1694508688637'
url_xjllb = f'https://stock.xueqiu.com/v5/stock/finance/cn/cash_flow.json?symbol={com_code}&type=all&is_detail=true&count=5&timestamp=1694512695956'
lrb_name = f'https://xueqiu.com/snowman/S/{com_code}/detail#/GSLRB'
zcfzb_name = f'https://xueqiu.com/snowman/S/{com_code}/detail#/ZCFZB'
xjllb_name = f'https://xueqiu.com/snowman/S/{com_code}/detail#/XJLLB'
a_infoData = getrequests(url_lrb)
b_infoData = getrequests(url_zcfzb)
c_infoData = getrequests(url_xjllb)
listLrb = []
listZcfzb = []
listXjllb = []
for i in range(len(a_infoData)):
report_date_a = a_infoData[i]['report_date']
report_date_a = getFormatedate(int(report_date_a / 1000))
if info_date == report_date_a:
log.info(f'======正在采集利润表:{com_code}---{info_date}=======')
# 利润表
reportLrbdata = a_infoData[i]
listLrb = getdetail(reportLrbdata, lrb_name_map, listLrb, lrb_name)
log.info(f'利润表数据:{len(listLrb)}个')
break
else:
continue
for j in range(len(b_infoData)):
report_date_b = b_infoData[j]['report_date']
report_date_b = getFormatedate(int(report_date_b / 1000))
if info_date == report_date_b:
log.info(f'======正在采集资产负债表:{com_code}---{info_date}=======')
reportZcfzbdata = b_infoData[j]
listZcfzb = getdetail(reportZcfzbdata, zcfzb_name_map, listZcfzb, zcfzb_name)
log.info(f'资产负债表数据:{len(listZcfzb)}个')
break
else:
continue
for k in range(len(c_infoData)):
report_date_c = c_infoData[k]['report_date']
report_date_c = getFormatedate(int(report_date_c / 1000))
if info_date == report_date_c:
log.info(f'======正在采集现金流量表:{com_code}---{info_date}=======')
reportXjllbdata = c_infoData[k]
listXjllb = getdetail(reportXjllbdata, xjllb_name_map, listXjllb, xjllb_name)
log.info(f'现金流量表数据:{len(listXjllb)}个')
break
else:
continue
# reportLrbname = a_infoData[i]['report_name']
# reporZCFZbname = b_infoData[i]['report_name']
# reportXJLLBname = c_infoData[i]['report_name']
#时间戳转化为日期
# report_date = getFormatedate(int(report_date / 1000))
# 检查报告期是否已经存在
# log.info(f'======正在采集:{com_code}---{info_date}=======')
# #利润表
# reportLrbdata = a_infoData[i]
# list_Lrb = getdetail(reportLrbdata,lrb_name_map,listLrb,lrb_name)
# log.info(f'利润表数据:{len(list_Lrb)}个')
#
# # print(list_Lrb)
# #资产负债表
# try:
# reportZcfzbdata = b_infoData[j]
# list_Zcfzb = getdetail(reportZcfzbdata,zcfzb_name_map,listZcfzb,zcfzb_name)
#
# except:
# list_Zcfzb = []
# #现金流量表
# reportXjllbdata = c_infoData[k]
# list_Xjllb = getdetail(reportXjllbdata,xjllb_name_map,listXjllb,xjllb_name)
dic_info = {
"socialCreditCode": social_code,
"securitiesCode": com_code[2:],
"date": info_date,
"debt": listZcfzb,
"profit": listLrb,
"cash": listXjllb,
"ynFirst": ynFirst,
}
# print(dic_info)
#一个报告期结束
log.info(f'----{com_code}--{info_date}----结束')
if dic_info:
# 调凯歌接口存储数据
data = json.dumps(dic_info)
# print(data)
url_baocun = 'http://114.115.236.206:8088/sync/finance/xq'
for nnn in range(0, 3):
try:
res_baocun = requests.post(url_baocun, data=data)
break
except:
time.sleep(1)
log.info(f'----{com_code}--{info_date}--------数据发送接口完毕------------')
for nnn in range(0, 3):
try:
add_date(com_code, info_date)
break
except:
time.sleep(1)
else:
log.error(f'---{com_code}--{info_date}--')
if __name__ == '__main__':
info_date_list = []
list_date = ['2023-06-30']
list_month = ['-12-31', '-06-30']
for year in range(2022, 2020, -1):
for month in list_month:
date = str(year) + month
list_date.append(date)
opt = webdriver.ChromeOptions()
opt.add_argument(
'user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36')
opt.add_argument("--ignore-certificate-errors")
opt.add_argument("--ignore-ssl-errors")
opt.add_experimental_option("excludeSwitches", ["enable-automation"])
opt.add_experimental_option('excludeSwitches', ['enable-logging'])
opt.add_experimental_option('useAutomationExtension', False)
opt.binary_location = r'D:/Google/Chrome/Application/chrome.exe'
chromedriver = r'D:/cmd100/chromedriver.exe'
browser = webdriver.Chrome(chrome_options=opt, executable_path=chromedriver)
headers = {
'authority': 'stock.xueqiu.com',
'method': 'GET',
'path': '/v5/stock/finance/cn/income.json?symbol=NQ873286&type=all&is_detail=true&count=5&timestamp=1694414063178',
'scheme': 'https',
'Accept': 'application/json, text/plain, */*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'no-cache',
'Cookie': 'device_id=84ced64554d8060750b1528dc22a3696; s=bt110kz0n8; cookiesu=661693188384462; u=661693188384462; Hm_lvt_1db88642e346389874251b5a1eded6e3=1693188388; xq_a_token=29bdb37dee2432c294425cc9e8f45710a62643a5; xqat=29bdb37dee2432c294425cc9e8f45710a62643a5; xq_r_token=3a35db27fcf5471898becda7aa5dab6afeafe471; xq_id_token=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJ1aWQiOi0xLCJpc3MiOiJ1YyIsImV4cCI6MTY5NjgxMTc5NCwiY3RtIjoxNjk0NDEzNTQ2ODU4LCJjaWQiOiJkOWQwbjRBWnVwIn0.Xxu329nQq4bMtwKFJWlScnUUSWrky4T5SWkMum46c2G8la2z4g0d4nyvsO08WP-7moMffId6P3bGWuELULkbv6EHvIZgqge9-fAD4-hmLOjeRh96NsoGfyTAQK7tbnt9LhKz1fDg6SUi8loMqYgM7l-4g-ZM4B6zrZ5hKWdQJFLy0-V8Wzx7HTFYZSX3FNSsbgGqHlW4vykIpsRaNeOOX1M6LYdt6BhbAi1Iv4TflB08LIdu6F1n4dTRbmPq1KCndb2LsLR2HrJZmqmHJB9WMzwlVcIGdz778_CutNrwuWgJbWtb-s3dSESzO0WWw1uIIGZUvRl1D0KSl0P_GQLw9w; Hm_lpvt_1db88642e346389874251b5a1eded6e3=1694414056',
'Origin': 'https://xueqiu.com',
'Pragma': 'no-cache',
'Referer': 'https://xueqiu.com/snowman/S/NQ873286/detail',
'Sec-Ch-Ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'Sec-Ch-Ua-Mobile': '?0',
'Sec-Ch-Ua-Platform': '"Windows"',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
}
#中英文名称映射
lrb_name_map = {
'营业总收入':'total_revenue',
'其中:营业收入':'revenue',
'营业总成本':'operating_costs',
'其中:营业成本':'operating_cost',
'营业税金及附加':'operating_taxes_and_surcharge',
'销售费用':'sales_fee',
'管理费用':'manage_fee',
'研发费用':'rad_cost',
'财务费用':'financing_expenses',
'其中:利息费用':'finance_cost_interest_fee',
'利息收入':'finance_cost_interest_income',
'资产减值损失':'asset_impairment_loss',
'信用减值损失':'credit_impairment_loss',
'加:公允价值变动收益':'',
'投资收益':'invest_income',
'其中:对联营企业和合营企业的投资收益':'',
'资产处置收益':'asset_disposal_income',
'其他收益':'other_income',
'营业利润':'op',
'加:营业外收入':'non_operating_income',
'其中:非流动资产处置利得':'',
'减:营业外支出':'non_operating_payout',
'其中:非流动资产处置损失':'',
'利润总额':'profit_total_amt',
'减:所得税费用':'income_tax_expenses',
'净利润差额(合计平衡项目)':'',
'净利润':'net_profit',
'(一)持续经营净利润':'continous_operating_np',
'归属于母公司股东的净利润':'net_profit_atsopc',
'少数股东损益':'minority_gal',
'扣除非经常性损益后的净利润':'net_profit_after_nrgal_atsolc',
'基本每股收益':'basic_eps',
'稀释每股收益':'dlt_earnings_per_share',
'其他综合收益':'othr_compre_income',
'归属母公司所有者的其他综合收益':'',
'综合收益总额':'total_compre_income',
'归属于母公司股东的综合收益总额':'net_profit_atsopc',
'归属于少数股东的综合收益总额':'total_compre_income_atms'
}
zcfzb_name_map = {
'货币资金':'currency_funds',
'交易性金融资产':'',
'应收票据及应收账款':'ar_and_br',
'其中:应收票据':'bills_receivable',
'应收账款':'account_receivable',
'预付款项':'pre_payment',
'应收利息':'',
'应收股利':'',
'其他应收款':'othr_receivables',
'存货':'inventory',
'合同资产':'',
'划分为持有待售的资产':'',
'一年内到期的非流动资产':'nca_due_within_one_year',
'其他流动资产':'intangible_assets',
'流动资产合计':'total_current_assets',
'可供出售金融资产':'',
'持有至到期投资':'',
'长期应收款':'',
'长期股权投资':'',
'其他权益工具投资':'',
'其他非流动金融资产':'',
'投资性房地产':'',
'固定资产合计':'fixed_asset_sum',
'其中:固定资产':'fixed_asset',
'固定资产清理':'',
'在建工程合计':'construction_in_process_sum',
'其中:在建工程':'construction_in_process',
'工程物资':'',
'生产性生物资产':'',
'油气资产':'',
'无形资产':'intangible_assets',
'开发支出':'dev_expenditure',
'商誉':'',
'长期待摊费用':'lt_deferred_expense',
'递延所得税资产':'dt_assets',
'其他非流动资产':'othr_noncurrent_assets',
'非流动资产合计':'total_noncurrent_assets',
'资产合计':'total_assets',
'短期借款':'st_loan',
'交易性金融负债':'',
'衍生金融负债':'',
'应付票据及应付账款':'accounts_payable',
'应付票据':'',
'应付账款':'',
'预收款项':'',
'合同负债':'contract_liabilities',
'应付职工薪酬':'payroll_payable',
'应交税费':'tax_payable',
'应付利息':'',
'应付股利':'',
'其他应付款':'othr_payables',
'划分为持有待售的负债':'',
'一年内到期的非流动负债':'noncurrent_liab_due_in1y',
'其他流动负债':'othr_current_liab',
'流动负债合计':'total_current_liab',
'长期借款':'lt_loan',
'应付债券':'',
'长期应付款合计':'lt_payable_sum',
'长期应付款':'lt_payable',
'专项应付款':'',
'预计负债':'',
'递延所得税负债':'dt_liab',
'递延收益-非流动负债':'',
'其他非流动负债':'',
'非流动负债合计':'total_noncurrent_liab',
'负债合计':'total_liab',
'实收资本(或股本)':'shares',
'其他权益工具':'',
'永续债':'',
'资本公积':'capital_reserve',
'减:库存股':'',
'其他综合收益':'',
'专项储备':'special_reserve',
'盈余公积':'earned_surplus',
'未分配利润':'undstrbtd_profit',
'一般风险准备':'',
'外币报表折算差额':'',
'归属于母公司股东权益合计':'total_quity_atsopc',
'少数股东权益':'minority_equity',
'股东权益合计':'total_holders_equity',
'负债和股东权益总计':'total_assets'
}
xjllb_name_map = {
'销售商品、提供劳务收到的现金':'cash_received_of_sales_service',
'收到的税费返还':'refund_of_tax_and_levies',
'收到其他与经营活动有关的现金':'cash_received_of_othr_oa',
'经营活动现金流入小计':'sub_total_of_ci_from_oa',
'购买商品、接受劳务支付的现金':'goods_buy_and_service_cash_pay',
'支付给职工以及为职工支付的现金':'cash_paid_to_employee_etc',
'支付的各项税费':'payments_of_all_taxes',
'支付其他与经营活动有关的现金':'othrcash_paid_relating_to_oa',
'经营活动现金流出小计':'sub_total_of_cos_from_oa',
'经营活动产生的现金流量净额':'ncf_from_oa',
'收回投资收到的现金':'cash_received_of_dspsl_invest',
'取得投资收益收到的现金':"othrcash_paid_relating_to_fa",
'处置固定资产、无形资产和其他长期资产收回的现金净额':'net_cash_of_disposal_assets',
'处置子公司及其他营业单位收到的现金净额':'',
'收到其他与投资活动有关的现金':'',
'投资活动现金流入小计':'sub_total_of_ci_from_ia',
'购建固定资产、无形资产和其他长期资产支付的现金':'cash_paid_for_assets',
'投资支付的现金':'invest_paid_cash',
'取得子公司及其他营业单位支付的现金净额':'',
'支付其他与投资活动有关的现金':'',
'投资活动现金流出小计':'sub_total_of_cos_from_ia',
'投资活动产生的现金流量净额':'ncf_from_ia',
'筹资活动产生的现金流量':'',
'吸收投资收到的现金':'cash_received_of_absorb_invest',
'其中:子公司吸收少数股东投资收到的现金':'',
'取得借款收到的现金':'cash_received_of_borrowing',
'发行债券收到的现金':'',
'收到其他与筹资活动有关的现金':'cash_received_of_othr_fa',
'筹资活动现金流入小计':'sub_total_of_ci_from_fa',
'偿还债务支付的现金':'cash_pay_for_debt',
'分配股利、利润或偿付利息支付的现金':'cash_paid_of_distribution',
'其中:子公司支付给少数股东的股利':'',
'支付其他与筹资活动有关的现金':'othrcash_paid_relating_to_fa',
'筹资活动现金流出小计':'sub_total_of_cos_from_fa',
'筹资活动产生的现金流量净额':'ncf_from_fa',
'汇率变动对现金及现金等价物的影响':'',
'现金及现金等价物净增加额':'net_increase_in_cce',
'加:期初现金及现金等价物余额':'final_balance_of_cce',
'期末现金及现金等价物余额':'final_balance_of_cce'
}
while True:
# social_code = baseCore.redicPullData('NQEnterprise:nq_finance')
social_code = '9144030067312725XJ'
if social_code is None:
log.info('已没有数据----------等待')
time.sleep(20)
continue
log.info(f'========正在采集{social_code}===========')
data = baseCore.getInfomation(social_code)
# social_code = data[1]
com_code = data[3]
start = time.time()
com_code = 'NQ' + com_code
for info_date in list_date:
dic_info = getinfo(info_date,com_code,social_code)
++ /dev/null
import json
import random
import re
import time
import fitz
import pymysql
import requests
from bs4 import BeautifulSoup
from kafka import KafkaProducer
from datetime import datetime
from base import BaseCore
# from fdfs_client.client import get_tracker_conf, Fdfs_client
baseCore = BaseCore.BaseCore()
log = baseCore.getLogger()
cnx = baseCore.cnx
cursor = baseCore.cursor
cnx_ = baseCore.cnx_
cursor_ = baseCore.cursor_
# tracker_conf = get_tracker_conf('./client.conf')
# client = Fdfs_client(tracker_conf)
taskType = '企业公告/证监会/新三板'
#todo:股转公告和挂牌审核包含在公司公告中,没有单独的id
type_map = {
'zljgcs':'自律监管措施',
'wxh':'问询函',
'jlcf':'纪律处分',
'9506':'公司公告',
'9509':'公司公告',
'9503':'公司公告',
'9504':'公司公告',
'9505':'公司公告',
'9510':'公司公告',
'9520':'公司公告',
'9605':'公司公告',
'9533':'公司公告',
}
type_id_map = {
'公司公告': '8',
'股转公告': '9',
'挂牌审核': '10',
'自律监管措施': '11',
'问询函': '12',
'纪律处分': '13'
}
def secrchATT(item_id, name, type_id):
sel_sql = '''select id from clb_sys_attachment where item_id = %s and name = %s and type_id=%s '''
cursor_.execute(sel_sql, (item_id, name, type_id))
selects = cursor_.fetchone()
return selects
# 插入到att表 返回附件id
def tableUpdate(retData, com_name, year, pdf_name, num):
item_id = retData['item_id']
type_id = retData['type_id']
group_name = retData['group_name']
path = retData['path']
full_path = retData['full_path']
category = retData['category']
file_size = retData['file_size']
status = retData['status']
create_by = retData['create_by']
page_size = retData['page_size']
create_time = retData['create_time']
order_by = num
selects = secrchATT(item_id, pdf_name, type_id)
if selects:
log.info(f'com_name:{com_name}已存在')
id = selects[0]
return id
else:
Upsql = '''insert into clb_sys_attachment(year,name,type_id,item_id,group_name,path,full_path,category,file_size,order_by,status,create_by,create_time,page_size) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'''
values = (
year, pdf_name, type_id, item_id, group_name, path, full_path, category, file_size, order_by,
status, create_by,
create_time, page_size)
cursor_.execute(Upsql, values) # 插入
cnx_.commit() # 提交
log.info("更新完成:{}".format(Upsql))
selects = secrchATT(item_id, pdf_name, type_id)
id = selects[0]
return id
def RequestUrl(url, payload, social_code,start_time):
# ip = get_proxy()[random.randint(0, 3)]
pattern = r"\(\[(.*?)\]\)"
for m in range(0, 3):
try:
response = requests.post(url=url, headers=headers, data=payload) # ,proxies=ip)
response.encoding = response.apparent_encoding
break
except Exception as e:
log.error(f"request请求异常----{m}-----{e}")
pass
# 检查响应状态码
if response.status_code == 200:
# 请求成功,处理响应数据
# print(response.text)
soup = BeautifulSoup(response.text, 'html.parser')
match = re.search(pattern, str(soup))
if match:
retJsonData = match.group(1)
retJsonData = json.loads(retJsonData)
# retJsonData = response.json()
pass
else:
# 请求失败,输出错误信息
log.error('请求失败:', url)
state = 0
takeTime = baseCore.getTimeCost(start_time, time.time())
baseCore.recordLog(social_code, taskType, state, takeTime, url, '请求失败')
retJsonData = ''
return retJsonData
def getPages(url,com_code):
payload = f"startTime=&page=1&companyCd={com_code}&keyword=&disclosureType%5B%5D=9506&disclosureType%5B%5D=9509&disclosureType%5B%5D=9503&disclosureType%5B%5D=9504&disclosureType%5B%5D=9505&disclosureType%5B%5D=9510&disclosureType%5B%5D=9520&disclosureType%5B%5D=9605&disclosureType%5B%5D=9533&wxhType=wxh&zljgcsType=zljgcs&jlcfType=jlcf&newThreeArray%5B%5D=0&newThreeArray%5B%5D=1&newThreeArray%5B%5D=2&siteId=1&sortfield=publishDate&sorttype=desc&keyword1="
retJsonData = RequestUrl(url, payload, social_code, start_time)
# 第一次请求获取页数
# print(retJsonData)
totalPages = retJsonData['listInfo']['totalPages']
print(totalPages)
return totalPages
def InsterInto(short_name, social_code, pdf_url):
inster = False
sel_sql = '''select social_credit_code,source_address from brpa_source_article where social_credit_code = %s and source_address = %s and origin='全国中小企业股份转让系统' and type='1' '''
cursor.execute(sel_sql, (social_code, pdf_url))
selects = cursor.fetchone()
if selects:
print(f'com_name:{short_name}、{pdf_url}已存在')
return inster
# 信息插入数据库
try:
insert_sql = '''insert into brpa_source_article(social_credit_code,source_address,origin,type,create_time) values(%s,%s,%s,%s,now())'''
list_info = [
social_code,
pdf_url,
'全国中小企业股份转让系统',
'1',
]
#144数据库
cursor.execute(insert_sql, tuple(list_info))
cnx.commit()
insert = True
return insert
except:
state = 0
takeTime = baseCore.getTimeCost(start_time, time.time())
baseCore.recordLog(social_code, taskType, state, takeTime, pdf_url, '数据库传输失败')
return insert
def GetContent(pdf_url, pdf_name, social_code, year, pub_time, start_time,com_name,num,kfkid):
#上传至文件服务器
retData = baseCore.upLoadToServe(pdf_url,8,social_code)
#附件插入att数据库
if retData['state']:
pass
else:
log.info(f'====pdf解析失败====')
return False
num = num + 1
att_id = tableUpdate(retData,com_name,year,pdf_name,num)
content = retData['content']
if retData['state']:
pass
else:
log.info(f'====pdf解析失败====')
return False
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
dic_news = {
'attachmentIds': att_id,
'author': '',
'content': content,
'contentWithTag': '',
'createDate': time_now,
'deleteFlag': '0',
'id': '',
'keyWords': '',
'lang': 'zh',
'origin': '全国中小企业股份转让系统',
'publishDate': pub_time,
'sid': '1684032033495392257',
'sourceAddress': pdf_url, # 原文链接
'summary': '',
'title': pdf_name,
'type': kfkid,
'socialCreditCode': social_code,
'year': year
}
# print(dic_news)
# 将相应字段通过kafka传输保存
try:
producer = KafkaProducer(bootstrap_servers=['114.115.159.144:9092'])
kafka_result = producer.send("researchReportTopic", json.dumps(dic_news, ensure_ascii=False).encode('utf8'))
print(kafka_result.get(timeout=10))
dic_result = {
'success': 'ture',
'message': '操作成功',
'code': '200',
}
print(dic_result)
return True
except Exception as e:
dic_result = {
'success': 'false',
'message': '操作失败',
'code': '204',
'e': e
}
state = 0
takeTime = baseCore.getTimeCost(start_time, time.time())
baseCore.recordLog(social_code, taskType, state, takeTime, pdf_url, 'Kafka操作失败')
print(dic_result)
return False
# 采集信息
def SpiderByZJH(url, dic_info, start_time,num): # dic_info 数据库中获取到的基本信息
okCount = 0
errorCount = 0
social_code = dic_info[2]
short_name = dic_info[4]
com_name = dic_info[1]
totalPages = getPages(url, com_code)
for i in range(0, int(totalPages)):
payload = f"startTime=&page={i}&companyCd={com_code}&keyword=&disclosureType%5B%5D=9506&disclosureType%5B%5D=9509&disclosureType%5B%5D=9503&disclosureType%5B%5D=9504&disclosureType%5B%5D=9505&disclosureType%5B%5D=9510&disclosureType%5B%5D=9520&disclosureType%5B%5D=9605&disclosureType%5B%5D=9533&wxhType=wxh&zljgcsType=zljgcs&jlcfType=jlcf&newThreeArray%5B%5D=0&newThreeArray%5B%5D=1&newThreeArray%5B%5D=2&siteId=1&sortfield=publishDate&sorttype=desc&keyword1="
retjson = RequestUrl(url, payload, social_code, start_time)
content_list = retjson['listInfo']['content']
for rp in content_list:
pdf_url = 'https://www.neeq.com.cn' + rp['destFilePath']
name_pdf = rp['disclosureTitle']
rp_type = type_map[rp['disclosureType']]
kfkid = type_id_map[rp_type]
publishDate = rp['publishDate']
year = publishDate[:4]
# 数据入库
insert = InsterInto(short_name, social_code, name_pdf)
if insert:
# # 公告信息列表
# okCount = okCount + 1
# 解析PDF内容,先获取PDF链接 下载 解析成功,解析失败 ,传输成功,传输失败
log.info(f'======={short_name}===========插入公告库成功')
result = GetContent(pdf_url, name_pdf, social_code, year, publishDate, start_time, com_name, num,kfkid)
if result:
# 公告信息列表
okCount = okCount + 1
log.info(f'{short_name}==============解析传输操作成功')
state = 1
takeTime = baseCore.getTimeCost(start_time, time.time())
baseCore.recordLog(social_code, taskType, state, takeTime, pdf_url, '')
pass
else:
errorCount += 1
# time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
log.error(f'{short_name}=============解析或传输操作失败')
continue
else:
log.info(f'======={short_name}===========已存在')
continue
if __name__ == '__main__':
num = 0
headers = {
'Accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Content-Length': '442',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': 'HOY_TR=FCEXZOPIKBGTYHDL,945C236781ABDFE0,xfslaodpytzTmieq; Hm_lvt_b58fe8237d8d72ce286e1dbd2fc8308c=1694480321; Hm_lpvt_b58fe8237d8d72ce286e1dbd2fc8308c=1694597182',
'Host': 'www.neeq.com.cn',
'Origin': 'https://www.neeq.com.cn',
'Pragma': 'no-cache',
'Referer': 'https://www.neeq.com.cn/products/neeq_listed_companies/related_announcement.html?companyCode=430054',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest',
'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"'
}
dic_parms = {}
# 读取数据库获取股票代码 简称 以及 社会信用代码
while True:
start_time = time.time()
# # 获取企业信息
social_code = baseCore.redicPullData('NQEnterprise:nq_finance')
# social_code = '9110000071092841XX'
# com_code = '430045'
# short_name = '超毅网络'
dic_info = {}
# # 判断 如果Redis中已经没有数据,则等待
if social_code == None:
time.sleep(20)
continue
data = baseCore.getInfomation(social_code)
com_code = data[3]
short_name = data[4]
url = 'https://www.neeq.com.cn/disclosureInfoController/productInfoResult.do'
#翻页 page 0~ 25 totalPages
SpiderByZJH(url, dic_info, start_time, num)
cursor.close()
cnx.close()
# cursor_.close()
# cnx_.close()
# 释放资源
baseCore.close()
# connect timeout in seconds
# default value is 30s
connect_timeout=300
# network timeout in seconds
# default value is 30s
network_timeout=600
# the base path to store log files
#base_path=/home/tarena/django-project/cc_shop1/cc_shop1/logs
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=114.115.215.96:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# if load FastDFS parameters from tracker server
# since V4.05
# default value is false
load_fdfs_parameters_from_tracker=false
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V4.05
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V4.05
storage_ids_filename = storage_ids.conf
#HTTP settings
http.tracker_server_port=80
#use "#include" directive to include HTTP other settiongs
##include http.conf
\ No newline at end of file
# __init__.py
__version__ = '2.2.0'
VERSION = tuple(map(int, __version__.split('.')))
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: client.py
'''
Client module for Fastdfs 3.08
author: scott yuan scottzer8@gmail.com
date: 2012-06-21
'''
import os
import sys
from fdfs_client.utils import *
from fdfs_client.tracker_client import *
from fdfs_client.storage_client import *
from fdfs_client.exceptions import *
def get_tracker_conf(conf_path='client.conf'):
cf = Fdfs_ConfigParser()
tracker = {}
try:
cf.read(conf_path)
timeout = cf.getint('__config__', 'connect_timeout')
tracker_list = cf.get('__config__', 'tracker_server')
if isinstance(tracker_list, str):
tracker_list = [tracker_list]
tracker_ip_list = []
for tr in tracker_list:
tracker_ip, tracker_port = tr.split(':')
tracker_ip_list.append(tracker_ip)
tracker['host_tuple'] = tuple(tracker_ip_list)
tracker['port'] = int(tracker_port)
tracker['timeout'] = timeout
tracker['name'] = 'Tracker Pool'
except:
raise
return tracker
class Fdfs_client(object):
'''
Class Fdfs_client implemented Fastdfs client protol ver 3.08.
It's useful upload, download, delete file to or from fdfs server, etc. It's uses
connection pool to manage connection to server.
'''
def __init__(self, trackers, poolclass=ConnectionPool):
self.trackers = trackers
self.tracker_pool = poolclass(**self.trackers)
self.timeout = self.trackers['timeout']
return None
def __del__(self):
try:
self.pool.destroy()
self.pool = None
except:
pass
def upload_by_filename(self, filename, meta_dict=None):
'''
Upload a file to Storage server.
arguments:
@filename: string, name of file that will be uploaded
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} meta_dict can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : local_file_name,
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_filename(tc, store_serv, filename, meta_dict)
def upload_by_file(self, filename, meta_dict=None):
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_file(tc, store_serv, filename, meta_dict)
def upload_by_buffer(self, filebuffer, file_ext_name=None, meta_dict=None):
'''
Upload a buffer to Storage server.
arguments:
@filebuffer: string, buffer
@file_ext_name: string, file extend name
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_buffer(tc, store_serv, filebuffer, file_ext_name, meta_dict)
def upload_slave_by_filename(self, filename, remote_file_id, prefix_name, meta_dict=None):
'''
Upload slave file to Storage server.
arguments:
@filename: string, local file name
@remote_file_id: string, remote file id
@prefix_name: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading slave)')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
if not prefix_name:
raise DataError('[-] Error: prefix_name can not be null.')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_with_group(group_name)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
try:
ret_dict = store.storage_upload_slave_by_filename(tc, store_serv, filename, prefix_name, remote_filename,
meta_dict=None)
except:
raise
ret_dict['Status'] = 'Upload slave file successed.'
return ret_dict
def upload_slave_by_file(self, filename, remote_file_id, prefix_name, meta_dict=None):
'''
Upload slave file to Storage server.
arguments:
@filename: string, local file name
@remote_file_id: string, remote file id
@prefix_name: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading slave)')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
if not prefix_name:
raise DataError('[-] Error: prefix_name can not be null.')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_with_group(group_name)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
try:
ret_dict = store.storage_upload_slave_by_file(tc, store_serv, filename, prefix_name, remote_filename,
meta_dict=None)
except:
raise
ret_dict['Status'] = 'Upload slave file successed.'
return ret_dict
def upload_slave_by_buffer(self, filebuffer, remote_file_id, meta_dict=None, file_ext_name=None):
'''
Upload slave file by buffer
arguments:
@filebuffer: string
@remote_file_id: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_slave_by_buffer(tc, store_serv, filebuffer, remote_filename, meta_dict,
file_ext_name)
def upload_appender_by_filename(self, local_filename, meta_dict=None):
'''
Upload an appender file by filename.
arguments:
@local_filename: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} Notice: it can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(uploading appender)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_filename(tc, store_serv, local_filename, meta_dict)
def upload_appender_by_file(self, local_filename, meta_dict=None):
'''
Upload an appender file by file.
arguments:
@local_filename: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} Notice: it can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(uploading appender)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_file(tc, store_serv, local_filename, meta_dict)
def upload_appender_by_buffer(self, filebuffer, file_ext_name=None, meta_dict=None):
'''
Upload a buffer to Storage server.
arguments:
@filebuffer: string
@file_ext_name: string, can be null
@meta_dict: dictionary, can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_buffer(tc, store_serv, filebuffer, meta_dict, file_ext_name)
def delete_file(self, remote_file_id):
'''
Delete a file from Storage server.
arguments:
@remote_file_id: string, file_id of file that is on storage server
@return tuple ('Delete file successed.', remote_file_id, storage_ip)
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in delete file)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_delete_file(tc, store_serv, remote_filename)
def download_to_file(self, local_filename, remote_file_id, offset=0, down_bytes=0):
'''
Download a file from Storage server.
arguments:
@local_filename: string, local name of file
@remote_file_id: string, file_id of file that is on storage server
@offset: long
@downbytes: long
@return dict {
'Remote file_id' : remote_file_id,
'Content' : local_filename,
'Download size' : downloaded_size,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in download file)')
group_name, remote_filename = tmp
if not offset:
file_offset = int(offset)
if not down_bytes:
download_bytes = int(down_bytes)
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_fetch(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_download_to_file(tc, store_serv, local_filename, file_offset, download_bytes,
remote_filename)
def download_to_buffer(self, remote_file_id, offset=0, down_bytes=0):
'''
Download a file from Storage server and store in buffer.
arguments:
@remote_file_id: string, file_id of file that is on storage server
@offset: long
@down_bytes: long
@return dict {
'Remote file_id' : remote_file_id,
'Content' : file_buffer,
'Download size' : downloaded_size,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in download file)')
group_name, remote_filename = tmp
if not offset:
file_offset = int(offset)
if not down_bytes:
download_bytes = int(down_bytes)
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_fetch(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
file_buffer = None
return store.storage_download_to_buffer(tc, store_serv, file_buffer, file_offset, download_bytes,
remote_filename)
def list_one_group(self, group_name):
'''
List one group information.
arguments:
@group_name: string, group name will be list
@return Group_info, instance
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_one_group(group_name)
def list_servers(self, group_name, storage_ip=None):
'''
List all storage servers information in a group
arguments:
@group_name: string
@return dictionary {
'Group name' : group_name,
'Servers' : server list,
}
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_servers(group_name, storage_ip)
def list_all_groups(self):
'''
List all group information.
@return dictionary {
'Groups count' : group_count,
'Groups' : list of groups
}
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_all_groups()
def get_meta_data(self, remote_file_id):
'''
Get meta data of remote file.
arguments:
@remote_fileid: string, remote file id
@return dictionary, meta data
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in get meta data)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_get_metadata(tc, store_serv, remote_filename)
def set_meta_data(self, remote_file_id, meta_dict, op_flag=STORAGE_SET_METADATA_FLAG_OVERWRITE):
'''
Set meta data of remote file.
arguments:
@remote_file_id: string
@meta_dict: dictionary
@op_flag: char, 'O' for overwrite, 'M' for merge
@return dictionary {
'Status' : status,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in set meta data)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
try:
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
status = store.storage_set_metadata(tc, store_serv, remote_filename, meta_dict)
except (ConnectionError, ResponseError, DataError):
raise
# if status == 2:
# raise DataError('[-] Error: remote file %s is not exist.' % remote_file_id)
if status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
ret_dict = {}
ret_dict['Status'] = 'Set meta data success.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def append_by_filename(self, local_filename, remote_fileid):
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(append)')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_filename(tc, store_serv, local_filename, appended_filename)
def append_by_file(self, local_filename, remote_fileid):
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(append)')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_file(tc, store_serv, local_filename, appended_filename)
def append_by_buffer(self, file_buffer, remote_fileid):
if not file_buffer:
raise DataError('[-] Error: file_buffer can not be null.')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_buffer(tc, store_serv, file_buffer, appended_filename)
def truncate_file(self, truncated_filesize, appender_fileid):
'''
Truncate file in Storage server.
arguments:
@truncated_filesize: long
@appender_fileid: remote_fileid
@return: dictionary {
'Status' : 'Truncate successed.',
'Storage IP' : storage_ip
}
'''
trunc_filesize = int(truncated_filesize)
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: appender_fileid is invalid.(truncate)')
group_name, appender_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_truncate_file(tc, store_serv, trunc_filesize, appender_filename)
def modify_by_filename(self, filename, appender_fileid, offset=0):
'''
Modify a file in Storage server by file.
arguments:
@filename: string, local file name
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(modify)')
filesize = os.stat(filename).st_size
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_filename(tc, store_serv, filename, file_offset, filesize, appender_filename)
def modify_by_file(self, filename, appender_fileid, offset=0):
'''
Modify a file in Storage server by file.
arguments:
@filename: string, local file name
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(modify)')
filesize = os.stat(filename).st_size
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_file(tc, store_serv, filename, file_offset, filesize, appender_filename)
def modify_by_buffer(self, filebuffer, appender_fileid, offset=0):
'''
Modify a file in Storage server by buffer.
arguments:
@filebuffer: string, file buffer
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
if not filebuffer:
raise DataError('[-] Error: filebuffer can not be null.(modify)')
filesize = len(filebuffer)
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_buffer(tc, store_serv, filebuffer, file_offset, filesize, appender_filename)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: connection.py
import socket
import os
import sys
import time
import random
from itertools import chain
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# start class Connection
class Connection(object):
'''Manage TCP comunication to and from Fastdfs Server.'''
def __init__(self, **conn_kwargs):
self.pid = os.getpid()
self.host_tuple = conn_kwargs['host_tuple']
self.remote_port = conn_kwargs['port']
self.remote_addr = None
self.timeout = conn_kwargs['timeout']
self._sock = None
def __del__(self):
try:
self.disconnect()
except:
pass
def connect(self):
'''Connect to fdfs server.'''
if self._sock:
return
try:
sock = self._connect()
except socket.error as e:
raise ConnectionError(self._errormessage(e))
self._sock = sock
# print '[+] Create a connection success.'
# print '\tLocal address is %s:%s.' % self._sock.getsockname()
# print '\tRemote address is %s:%s' % (self.remote_addr, self.remote_port)
def _connect(self):
'''Create TCP socket. The host is random one of host_tuple.'''
self.remote_addr = random.choice(self.host_tuple)
# print '[+] Connecting... remote: %s:%s' % (self.remote_addr, self.remote_port)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.settimeout(self.timeout)
sock = socket.create_connection((self.remote_addr, self.remote_port), self.timeout)
return sock
def disconnect(self):
'''Disconnect from fdfs server.'''
if self._sock is None:
return
try:
self._sock.close()
except socket.error as e:
raise ConnectionError(self._errormessage(e))
self._sock = None
def get_sock(self):
return self._sock
def _errormessage(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message" '''
if len(exception.args) == 1:
return "[-] Error: connect to %s:%s. %s." % (self.remote_addr, self.remote_port, exception.args[0])
else:
return "[-] Error: %s connect to %s:%s. %s." % \
(exception.args[0], self.remote_addr, self.remote_port, exception.args[1])
# end class Connection
# start ConnectionPool
class ConnectionPool(object):
'''Generic Connection Pool'''
def __init__(self, name='', conn_class=Connection,
max_conn=None, **conn_kwargs):
self.pool_name = name
self.pid = os.getpid()
self.conn_class = conn_class
self.max_conn = max_conn or 2 ** 31
self.conn_kwargs = conn_kwargs
self._conns_created = 0
self._conns_available = []
self._conns_inuse = set()
# print '[+] Create a connection pool success, name: %s.' % self.pool_name
def _check_pid(self):
if self.pid != os.getpid():
self.destroy()
self.__init__(self.conn_class, self.max_conn, **self.conn_kwargs)
def make_conn(self):
'''Create a new connection.'''
if self._conns_created >= self.max_conn:
raise ConnectionError('[-] Error: Too many connections.')
num_try = 10
while True:
try:
if num_try <= 0:
sys.exit()
conn_instance = self.conn_class(**self.conn_kwargs)
conn_instance.connect()
self._conns_created += 1
break
except ConnectionError as e:
print(e)
num_try -= 1
conn_instance = None
return conn_instance
def get_connection(self):
'''Get a connection from pool.'''
self._check_pid()
try:
conn = self._conns_available.pop()
# print '[+] Get a connection from pool %s.' % self.pool_name
# print '\tLocal address is %s:%s.' % conn._sock.getsockname()
# print '\tRemote address is %s:%s' % (conn.remote_addr, conn.remote_port)
except IndexError:
conn = self.make_conn()
self._conns_inuse.add(conn)
return conn
def remove(self, conn):
'''Remove connection from pool.'''
if conn in self._conns_inuse:
self._conns_inuse.remove(conn)
self._conns_created -= 1
if conn in self._conns_available:
self._conns_available.remove(conn)
self._conns_created -= 1
def destroy(self):
'''Disconnect all connections in the pool.'''
all_conns = chain(self._conns_inuse, self._conns_available)
for conn in all_conns:
conn.disconnect()
# print '[-] Destroy connection pool %s.' % self.pool_name
def release(self, conn):
'''Release the connection back to the pool.'''
self._check_pid()
if conn.pid == self.pid:
self._conns_inuse.remove(conn)
self._conns_available.append(conn)
# print '[-] Release connection back to pool %s.' % self.pool_name
# end ConnectionPool class
def tcp_recv_response(conn, bytes_size, buffer_size=4096):
'''Receive response from server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_size: int, will be received byte_stream size
@buffer_size: int, receive buffer size
@Return: tuple,(response, received_size)
'''
recv_buff = []
total_size = 0
try:
while bytes_size > 0:
resp = conn._sock.recv(buffer_size)
recv_buff.append(resp)
total_size += len(resp)
bytes_size -= len(resp)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while reading from socket: (%s)' % e.args)
return (b''.join(recv_buff), total_size)
def tcp_send_data(conn, bytes_stream):
'''Send buffer to server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_stream: trasmit buffer
@Return bool
'''
try:
conn._sock.sendall(bytes_stream)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while writting to socket: (%s)' % e.args)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: exceptions.py
'''Core exceptions raised by fdfs client'''
class FDFSError(Exception):
pass
class ConnectionError(FDFSError):
pass
class ResponseError(FDFSError):
pass
class InvaildResponse(FDFSError):
pass
class DataError(FDFSError):
pass
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: fdfs_protol.py
import struct
import socket
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# define FDFS protol constans
TRACKER_PROTO_CMD_STORAGE_JOIN = 81
FDFS_PROTO_CMD_QUIT = 82
TRACKER_PROTO_CMD_STORAGE_BEAT = 83 # storage heart beat
TRACKER_PROTO_CMD_STORAGE_REPORT_DISK_USAGE = 84 # report disk usage
TRACKER_PROTO_CMD_STORAGE_REPLICA_CHG = 85 # repl new storage servers
TRACKER_PROTO_CMD_STORAGE_SYNC_SRC_REQ = 86 # src storage require sync
TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_REQ = 87 # dest storage require sync
TRACKER_PROTO_CMD_STORAGE_SYNC_NOTIFY = 88 # sync done notify
TRACKER_PROTO_CMD_STORAGE_SYNC_REPORT = 89 # report src last synced time as dest server
TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_QUERY = 79 # dest storage query sync src storage server
TRACKER_PROTO_CMD_STORAGE_REPORT_IP_CHANGED = 78 # storage server report it's ip changed
TRACKER_PROTO_CMD_STORAGE_CHANGELOG_REQ = 77 # storage server request storage server's changelog
TRACKER_PROTO_CMD_STORAGE_REPORT_STATUS = 76 # report specified storage server status
TRACKER_PROTO_CMD_STORAGE_PARAMETER_REQ = 75 # storage server request parameters
TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FREE = 74 # storage report trunk free space
TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FID = 73 # storage report current trunk file id
TRACKER_PROTO_CMD_STORAGE_FETCH_TRUNK_FID = 72 # storage get current trunk file id
TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_START = 61 # start of tracker get system data files
TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_END = 62 # end of tracker get system data files
TRACKER_PROTO_CMD_TRACKER_GET_ONE_SYS_FILE = 63 # tracker get a system data file
TRACKER_PROTO_CMD_TRACKER_GET_STATUS = 64 # tracker get status of other tracker
TRACKER_PROTO_CMD_TRACKER_PING_LEADER = 65 # tracker ping leader
TRACKER_PROTO_CMD_TRACKER_NOTIFY_NEXT_LEADER = 66 # notify next leader to other trackers
TRACKER_PROTO_CMD_TRACKER_COMMIT_NEXT_LEADER = 67 # commit next leader to other trackers
TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP = 90
TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS = 91
TRACKER_PROTO_CMD_SERVER_LIST_STORAGE = 92
TRACKER_PROTO_CMD_SERVER_DELETE_STORAGE = 93
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE = 101
TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE = 102
TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE = 103
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE = 104
TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL = 105
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL = 106
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL = 107
TRACKER_PROTO_CMD_RESP = 100
FDFS_PROTO_CMD_ACTIVE_TEST = 111 # active test, tracker and storage both support since V1.28
STORAGE_PROTO_CMD_REPORT_CLIENT_IP = 9 # ip as tracker client
STORAGE_PROTO_CMD_UPLOAD_FILE = 11
STORAGE_PROTO_CMD_DELETE_FILE = 12
STORAGE_PROTO_CMD_SET_METADATA = 13
STORAGE_PROTO_CMD_DOWNLOAD_FILE = 14
STORAGE_PROTO_CMD_GET_METADATA = 15
STORAGE_PROTO_CMD_SYNC_CREATE_FILE = 16
STORAGE_PROTO_CMD_SYNC_DELETE_FILE = 17
STORAGE_PROTO_CMD_SYNC_UPDATE_FILE = 18
STORAGE_PROTO_CMD_SYNC_CREATE_LINK = 19
STORAGE_PROTO_CMD_CREATE_LINK = 20
STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE = 21
STORAGE_PROTO_CMD_QUERY_FILE_INFO = 22
STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE = 23 # create appender file
STORAGE_PROTO_CMD_APPEND_FILE = 24 # append file
STORAGE_PROTO_CMD_SYNC_APPEND_FILE = 25
STORAGE_PROTO_CMD_FETCH_ONE_PATH_BINLOG = 26 # fetch binlog of one store path
STORAGE_PROTO_CMD_RESP = TRACKER_PROTO_CMD_RESP
STORAGE_PROTO_CMD_UPLOAD_MASTER_FILE = STORAGE_PROTO_CMD_UPLOAD_FILE
STORAGE_PROTO_CMD_TRUNK_ALLOC_SPACE = 27 # since V3.00
STORAGE_PROTO_CMD_TRUNK_ALLOC_CONFIRM = 28 # since V3.00
STORAGE_PROTO_CMD_TRUNK_FREE_SPACE = 29 # since V3.00
STORAGE_PROTO_CMD_TRUNK_SYNC_BINLOG = 30 # since V3.00
STORAGE_PROTO_CMD_TRUNK_GET_BINLOG_SIZE = 31 # since V3.07
STORAGE_PROTO_CMD_TRUNK_DELETE_BINLOG_MARKS = 32 # since V3.07
STORAGE_PROTO_CMD_TRUNK_TRUNCATE_BINLOG_FILE = 33 # since V3.07
STORAGE_PROTO_CMD_MODIFY_FILE = 34 # since V3.08
STORAGE_PROTO_CMD_SYNC_MODIFY_FILE = 35 # since V3.08
STORAGE_PROTO_CMD_TRUNCATE_FILE = 36 # since V3.08
STORAGE_PROTO_CMD_SYNC_TRUNCATE_FILE = 37 # since V3.08
# for overwrite all old metadata
STORAGE_SET_METADATA_FLAG_OVERWRITE = 'O'
STORAGE_SET_METADATA_FLAG_OVERWRITE_STR = "O"
# for replace, insert when the meta item not exist, otherwise update it
STORAGE_SET_METADATA_FLAG_MERGE = 'M'
STORAGE_SET_METADATA_FLAG_MERGE_STR = "M"
FDFS_RECORD_SEPERATOR = '\x01'
FDFS_FIELD_SEPERATOR = '\x02'
# common constants
FDFS_GROUP_NAME_MAX_LEN = 16
IP_ADDRESS_SIZE = 16
FDFS_PROTO_PKG_LEN_SIZE = 8
FDFS_PROTO_CMD_SIZE = 1
FDFS_PROTO_STATUS_SIZE = 1
FDFS_PROTO_IP_PORT_SIZE = (IP_ADDRESS_SIZE + 6)
FDFS_MAX_SERVERS_EACH_GROUP = 32
FDFS_MAX_GROUPS = 512
FDFS_MAX_TRACKERS = 16
FDFS_DOMAIN_NAME_MAX_LEN = 128
FDFS_MAX_META_NAME_LEN = 64
FDFS_MAX_META_VALUE_LEN = 256
FDFS_FILE_PREFIX_MAX_LEN = 16
FDFS_LOGIC_FILE_PATH_LEN = 10
FDFS_TRUE_FILE_PATH_LEN = 6
FDFS_FILENAME_BASE64_LENGTH = 27
FDFS_TRUNK_FILE_INFO_LEN = 16
FDFS_FILE_EXT_NAME_MAX_LEN = 6
FDFS_SPACE_SIZE_BASE_INDEX = 2 # storage space size based (MB)
FDFS_UPLOAD_BY_BUFFER = 1
FDFS_UPLOAD_BY_FILENAME = 2
FDFS_UPLOAD_BY_FILE = 3
FDFS_DOWNLOAD_TO_BUFFER = 1
FDFS_DOWNLOAD_TO_FILE = 2
FDFS_NORMAL_LOGIC_FILENAME_LENGTH = (
FDFS_LOGIC_FILE_PATH_LEN + FDFS_FILENAME_BASE64_LENGTH + FDFS_FILE_EXT_NAME_MAX_LEN + 1)
FDFS_TRUNK_FILENAME_LENGTH = (
FDFS_TRUE_FILE_PATH_LEN + FDFS_FILENAME_BASE64_LENGTH + FDFS_TRUNK_FILE_INFO_LEN + 1 + FDFS_FILE_EXT_NAME_MAX_LEN)
FDFS_TRUNK_LOGIC_FILENAME_LENGTH = (FDFS_TRUNK_FILENAME_LENGTH + (FDFS_LOGIC_FILE_PATH_LEN - FDFS_TRUE_FILE_PATH_LEN))
FDFS_VERSION_SIZE = 6
TRACKER_QUERY_STORAGE_FETCH_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE)
TRACKER_QUERY_STORAGE_STORE_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE + 1)
# status code, order is important!
FDFS_STORAGE_STATUS_INIT = 0
FDFS_STORAGE_STATUS_WAIT_SYNC = 1
FDFS_STORAGE_STATUS_SYNCING = 2
FDFS_STORAGE_STATUS_IP_CHANGED = 3
FDFS_STORAGE_STATUS_DELETED = 4
FDFS_STORAGE_STATUS_OFFLINE = 5
FDFS_STORAGE_STATUS_ONLINE = 6
FDFS_STORAGE_STATUS_ACTIVE = 7
FDFS_STORAGE_STATUS_RECOVERY = 9
FDFS_STORAGE_STATUS_NONE = 99
class Storage_server(object):
'''Class storage server for upload.'''
def __init__(self):
self.ip_addr = None
self.port = None
self.group_name = ''
self.store_path_index = 0
# Class tracker_header
class Tracker_header(object):
'''
Class for Pack or Unpack tracker header
struct tracker_header{
char pkg_len[FDFS_PROTO_PKG_LEN_SIZE],
char cmd,
char status,
}
'''
def __init__(self):
self.fmt = '!QBB' # pkg_len[FDFS_PROTO_PKG_LEN_SIZE] + cmd + status
self.st = struct.Struct(self.fmt)
self.pkg_len = 0
self.cmd = 0
self.status = 0
def _pack(self, pkg_len=0, cmd=0, status=0):
return self.st.pack(pkg_len, cmd, status)
def _unpack(self, bytes_stream):
self.pkg_len, self.cmd, self.status = self.st.unpack(bytes_stream)
return True
def header_len(self):
return self.st.size
def send_header(self, conn):
'''Send Tracker header to server.'''
header = self._pack(self.pkg_len, self.cmd, self.status)
try:
conn._sock.sendall(header)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while writting to socket: %s' % (e.args,))
def recv_header(self, conn):
'''Receive response from server.
if sucess, class member (pkg_len, cmd, status) is response.
'''
try:
header = conn._sock.recv(self.header_len())
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while reading from socket: %s' % (e.args,))
self._unpack(header)
def fdfs_pack_metadata(meta_dict):
ret = ''
for key in meta_dict:
ret += '%s%c%s%c' % (key, FDFS_FIELD_SEPERATOR, meta_dict[key], FDFS_RECORD_SEPERATOR)
return ret[0:-1]
def fdfs_unpack_metadata(bytes_stream):
li = bytes_stream.split(FDFS_RECORD_SEPERATOR)
return dict([item.split(FDFS_FIELD_SEPERATOR) for item in li])
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: fdfs_test.py
import os
import sys
import time
try:
from fdfs_client.client import *
from fdfs_client.exceptions import *
except ImportError:
import_path = os.path.abspath('../')
sys.path.append(import_path)
from fdfs_client.client import *
from fdfs_client.exceptions import *
def usage():
s = 'Usage: python fdfs_test.py {options} [{local_filename} [{remote_file_id}]]\n'
s += 'options: upfile, upbuffer, downfile, downbuffer, delete, listgroup, listserv\n'
s += ' upslavefile, upslavebuffer, upappendfile, upappendbuffer\n'
s += '\tupfile {local_filename}\n'
s += '\tupbuffer {local_filename}\n'
s += '\tdownfile {local_filename} {remote_file_id}\n'
s += '\tdownbuffer {remote_file_id}\n'
s += '\tdelete {remote_file_id}\n'
s += '\tlistgroup {group_name}\n'
s += '\tlistall \n'
s += '\tlistsrv {group_name} [storage_ip]\n'
s += '\tsetmeta {remote_file_id}\n'
s += '\tgetmeta {remote_file_id}\n'
s += '\tupslavefile {local_filename} {remote_fileid} {prefix_name}\n'
s += '\tupappendfile {local_filename}\n'
s += '\ttruncate {truncate_filesize} {remote_fileid}\n'
s += '\tmodifyfile {local_filename} {remote_fileid} {file_offset}\n'
s += '\tmodifybuffer {local_filename} {remote_fileid} {file_offset}\n'
s += 'e.g.: python fdfs_test.py upfile test'
print(s)
sys.exit(0)
if len(sys.argv) < 2:
usage()
client = Fdfs_client('client.conf')
def upfile_func():
# Upload by filename
# usage: python fdfs_test.py upfile {local_filename}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
file_size = os.stat(local_filename).st_size
# meta_buffer can be null.
meta_dict = {
'ext_name': 'py',
'file_size': str(file_size) + 'B'
}
t1 = time.time()
ret_dict = client.upload_by_filename(local_filename, meta_dict)
t2 = time.time()
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
print('[+] time consume: %fs' % (t2 - t1))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upfileex_func():
# Upload by file
# usage: python fdfs_test.py upfileex {local_filename}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
t1 = time.time()
ret_dict = client.upload_by_file(local_filename)
t2 = time.time()
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
print('[+] time consume: %fs' % (t2 - t1))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upslavefile_func():
# upload slave file
# usage: python fdfs_test.py upslavefile {local_filename} {remote_fileid} {prefix_name}
if len(sys.argv) < 5:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
prefix_name = sys.argv[4]
ret_dict = client.upload_slave_by_file(local_filename, remote_fileid, \
prefix_name)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upslavebuffer_func():
# upload slave by buffer
# usage: python fdfs_test.py upslavebuffer {local_filename} {remote_fileid} {prefix_name}
if len(sys.argv) < 5:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
prefix_name = sys.argv[4]
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.upload_slave_by_buffer(local_filename, \
remote_fileid, prefix_name)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def del_func():
# delete file
# usage: python fdfs_test.py delete {remote_fileid}
if len(sys.argv) < 3:
usage()
return None
try:
remote_file_id = sys.argv[2]
ret_tuple = client.delete_file(remote_file_id)
print('[+] %s' % ret_tuple[0])
print('[+] remote_fileid: %s' % ret_tuple[1])
print('[+] Storage IP: %s' % ret_tuple[2])
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def downfile_func():
# Download to file
# usage: python fdfs_test.py downfile {local_filename} {remote_fileid}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
ret_dict = client.download_to_file(local_filename, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def list_group_func():
# List one group info
# usage: python fdfs_test.py listgroup {group_name}
if len(sys.argv) < 3:
usage()
return None
try:
group_name = sys.argv[2]
ret = client.list_one_group(group_name)
print(ret)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def listall_func():
# List all group info
# usage: python fdfs_test.py listall
if len(sys.argv) < 2:
usage()
return None
try:
ret_dict = client.list_all_groups()
print('=' * 80)
print('Groups count:', ret_dict['Groups count'])
for li in ret_dict['Groups']:
print('-' * 80)
print(li)
print('-' * 80)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def list_server_func():
# List all servers info of group
# usage: python fdfs_test.py listsrv {group_name} [storage_ip]
if len(sys.argv) < 3:
usage()
return None
try:
group_name = sys.argv[2]
if len(sys.argv) > 3:
storage_ip = sys.argv[3]
else:
storage_ip = None
ret_dict = client.list_servers(group_name, storage_ip)
print('=' * 80)
print('Group name: %s' % ret_dict['Group name'])
print('=' * 80)
i = 1
for serv in ret_dict['Servers']:
print('Storage server %d:' % i)
print('=' * 80)
print(serv)
i += 1
print('=' * 80)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upbuffer_func():
# Upload by buffer
# usage: python fdfs_test.py upbuffer {local_filename} [remote_file_ext_name]
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
if len(sys.argv) > 3:
ext_name = sys.argv[3]
else:
ext_name = None
# meta_buffer can be null.
meta_buffer = {
'ext_name': 'gif',
'width': '150px',
'height': '80px'
}
try:
with open(local_filename, 'rb') as f:
file_buffer = f.read()
ret_dict = client.upload_by_buffer(file_buffer, ext_name, meta_buffer)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def downbuffer_func():
# Download to buffer
# usage: python fdfs_test.py downbuffer {remote_file_id}
# e.g.: 'group1/M00/00/00/wKjzhU_rLNmjo2-1AAAamGDONEA5818.py'
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
try:
ret_dict = client.download_to_buffer(remote_fileid)
print('Downloaded content:')
print(ret_dict['Content'])
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def get_meta_data_func():
# Get meta data of remote file
# usage python fdfs_test.py getmeta {remote_file_id}
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
try:
ret_dict = client.get_meta_data(remote_fileid)
print(ret_dict)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def set_meta_data_func():
# Set meta data of remote file
# usage python fdfs_test.py setmeta {remote_file_id}
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
meta_dict = {
'ext_name': 'jgp',
'width': '160px',
'hight': '80px',
}
try:
ret_dict = client.set_meta_data(remote_fileid, meta_dict)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upappendfile_func():
# Upload an appender file by filename
# usage: python fdfs_test.py upappendfile {local_filename}
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
try:
ret_dict = client.upload_appender_by_file(local_filename)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upappendbuffer_func():
# Upload an appender file by buffer
# usage: python fdfs_test.py upappendbuffer {local_filename}
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
try:
with open(local_filename, 'rb') as f:
file_buffer = f.read()
ret_dict = client.upload_appender_by_buffer(file_buffer)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def appendfile_func():
# Append a remote file
# usage: python fdfs_test.py appendfile {local_filename} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
try:
ret_dict = client.append_by_file(local_filename, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def appendbuffer_func():
# Append a remote file by buffer
# usage: python fdfs_test.py appendbuffer {local_filename} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
try:
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.append_by_buffer(filebuffer, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def truncate_func():
# Truncate file
# usage: python fdfs_test.py truncate {truncate_filesize} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
truncate_filesize = int(sys.argv[2])
remote_fileid = sys.argv[3]
try:
ret_dict = client.truncate_file(truncate_filesize, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def modifyfile_func():
# Modify file by filename
# usage: python fdfs_test.py modifyfile {local_filename} {remote_fileid} [file_offset]
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
if len(sys.argv) > 4:
file_offset = int(sys.argv[4])
else:
file_offset = 0
try:
ret_dict = client.modify_by_filename(local_filename, remote_fileid, file_offset)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def modifybuffer_func():
# Modify file by buffer
# usage: python fdfs_test.py modifybuffer {local_filename} {remote_fileid} [file_offset]
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
if len(sys.argv) > 4:
file_offset = int(sys.argv[4])
else:
file_offset = 0
try:
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.modify_by_buffer(filebuffer, remote_fileid, file_offset)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
result = {
'upfile': lambda: upfile_func(),
'upfileex': lambda: upfileex_func(),
'upbuffer': lambda: upbuffer_func(),
'delete': lambda: del_func(),
'downfile': lambda: downfile_func(),
'downbuffer': lambda: downbuffer_func(),
'listgroup': lambda: list_group_func(),
'listall': lambda: listall_func(),
'listsrv': lambda: list_server_func(),
'getmeta': lambda: get_meta_data_func(),
'setmeta': lambda: set_meta_data_func(),
'upslavefile': lambda: upslavefile_func(),
'upappendfile': lambda: upappendfile_func(),
'upappendbuffer': lambda: upappendbuffer_func(),
'appendfile': lambda: appendfile_func(),
'appendbuffer': lambda: appendbuffer_func(),
'truncate': lambda: truncate_func(),
'modifyfile': lambda: modifyfile_func(),
'modifybuffer': lambda: modifybuffer_func(),
'-h': lambda: usage(),
}[sys.argv[1].lower()]()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: storage_client.py
import os
import stat
import errno
import struct
import socket
import datetime
import platform
from fdfs_client.fdfs_protol import *
from fdfs_client.connection import *
# from test_fdfs.sendfile import *
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
from fdfs_client.utils import *
__os_sep__ = "/" if platform.system() == 'Windows' else os.sep
def tcp_send_file(conn, filename, buffer_size=1024):
'''
Send file to server, and split into multiple pkgs while sending.
arguments:
@conn: connection
@filename: string
@buffer_size: int ,send buffer size
@Return int: file size if success else raise ConnectionError.
'''
file_size = 0
with open(filename, 'rb') as f:
while 1:
try:
send_buffer = f.read(buffer_size)
send_size = len(send_buffer)
if send_size == 0:
break
tcp_send_data(conn, send_buffer)
file_size += send_size
except ConnectionError as e:
raise ConnectionError('[-] Error while uploading file(%s).' % e.args)
except IOError as e:
raise DataError('[-] Error while reading local file(%s).' % e.args)
return file_size
def tcp_send_file_ex(conn, filename, buffer_size=4096):
'''
Send file to server. Using linux system call 'sendfile'.
arguments:
@conn: connection
@filename: string
@return long, sended size
'''
if 'linux' not in sys.platform.lower():
raise DataError('[-] Error: \'sendfile\' system call only available on linux.')
nbytes = 0
offset = 0
sock_fd = conn.get_sock().fileno()
with open(filename, 'rb') as f:
in_fd = f.fileno()
while 1:
try:
pass
# sent = sendfile(sock_fd, in_fd, offset, buffer_size)
# if 0 == sent:
# break
# nbytes += sent
# offset += sent
except OSError as e:
if e.errno == errno.EAGAIN:
continue
raise
return nbytes
def tcp_recv_file(conn, local_filename, file_size, buffer_size=1024):
'''
Receive file from server, fragmented it while receiving and write to disk.
arguments:
@conn: connection
@local_filename: string
@file_size: int, remote file size
@buffer_size: int, receive buffer size
@Return int: file size if success else raise ConnectionError.
'''
total_file_size = 0
flush_size = 0
remain_bytes = file_size
with open(local_filename, 'wb+') as f:
while remain_bytes > 0:
try:
if remain_bytes >= buffer_size:
file_buffer, recv_size = tcp_recv_response(conn, buffer_size, buffer_size)
else:
file_buffer, recv_size = tcp_recv_response(conn, remain_bytes, buffer_size)
f.write(file_buffer)
remain_bytes -= buffer_size
total_file_size += recv_size
flush_size += recv_size
if flush_size >= 4096:
f.flush()
flush_size = 0
except ConnectionError as e:
raise ConnectionError('[-] Error: while downloading file(%s).' % e.args)
except IOError as e:
raise DataError('[-] Error: while writting local file(%s).' % e.args)
return total_file_size
class Storage_client(object):
'''
The Class Storage_client for storage server.
Note: argument host_tuple of storage server ip address, that should be a single element.
'''
def __init__(self, *kwargs):
conn_kwargs = {
'name': 'Storage Pool',
'host_tuple': (kwargs[0],),
'port': kwargs[1],
'timeout': kwargs[2]
}
self.pool = ConnectionPool(**conn_kwargs)
return None
def __del__(self):
try:
self.pool.destroy()
self.pool = None
except:
pass
def update_pool(self, old_store_serv, new_store_serv, timeout=30):
'''
Update connection pool of storage client.
We need update connection pool of storage client, while storage server is changed.
but if server not changed, we do nothing.
'''
if old_store_serv.ip_addr == new_store_serv.ip_addr:
return None
self.pool.destroy()
conn_kwargs = {
'name': 'Storage_pool',
'host_tuple': (new_store_serv.ip_addr,),
'port': new_store_serv.port,
'timeout': timeout
}
self.pool = ConnectionPool(**conn_kwargs)
return True
def _storage_do_upload_file(self, tracker_client, store_serv, file_buffer, file_size=None, upload_type=None,
meta_dict=None, cmd=None, master_filename=None, prefix_name=None, file_ext_name=None):
'''
core of upload file.
arguments:
@tracker_client: Tracker_client, it is useful connect to tracker server
@store_serv: Storage_server, it is return from query tracker server
@file_buffer: string, file name or file buffer for send
@file_size: int
@upload_type: int, optional: FDFS_UPLOAD_BY_FILE, FDFS_UPLOAD_BY_FILENAME,
FDFS_UPLOAD_BY_BUFFER
@meta_dic: dictionary, store metadata in it
@cmd: int, reference fdfs protol
@master_filename: string, useful upload slave file
@prefix_name: string
@file_ext_name: string
@Return dictionary
{
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : status,
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
}
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
master_filename_len = len(master_filename) if master_filename else 0
prefix_name_len = len(prefix_name) if prefix_name else 0
upload_slave = len(store_serv.group_name) and master_filename_len
file_ext_name = str(file_ext_name) if file_ext_name else ''
# non_slave_fmt |-store_path_index(1)-file_size(8)-file_ext_name(6)-|
non_slave_fmt = '!B Q %ds' % FDFS_FILE_EXT_NAME_MAX_LEN
# slave_fmt |-master_len(8)-file_size(8)-prefix_name(16)-file_ext_name(6)
# -master_name(master_filename_len)-|
slave_fmt = '!Q Q %ds %ds %ds' % (FDFS_FILE_PREFIX_MAX_LEN, FDFS_FILE_EXT_NAME_MAX_LEN, master_filename_len)
th.pkg_len = struct.calcsize(slave_fmt) if upload_slave else struct.calcsize(non_slave_fmt)
th.pkg_len += file_size
th.cmd = cmd
th.send_header(store_conn)
if upload_slave:
send_buffer = struct.pack(
slave_fmt, master_filename_len, file_size, prefix_name, file_ext_name, master_filename)
else:
send_buffer = struct.pack(non_slave_fmt, store_serv.store_path_index, file_size, file_ext_name.encode())
try:
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
send_file_size = tcp_send_file(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
send_file_size = tcp_send_file_ex(store_conn, file_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
if recv_size <= FDFS_GROUP_NAME_MAX_LEN:
errmsg = '[-] Error: Storage response length is not match, '
errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
# recv_fmt: |-group_name(16)-remote_file_name(recv_size - 16)-|
recv_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, th.pkg_len - FDFS_GROUP_NAME_MAX_LEN)
(group_name, remote_name) = struct.unpack(recv_fmt, recv_buffer)
remote_filename = remote_name.strip(b'\x00')
if meta_dict and len(meta_dict) > 0:
status = self.storage_set_metadata(tracker_client, store_serv, remote_filename, meta_dict)
if status != 0:
# rollback
self.storage_delete_file(tracker_client, store_serv, remote_filename)
raise DataError('[-] Error: %d, %s' % (status, os.strerror(status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dic = {
'Group name': group_name.strip(b'\x00'),
'Remote file_id': group_name.strip(b'\x00') + __os_sep__.encode() + remote_filename,
'Status': 'Upload successed.',
'Local file name': file_buffer if (upload_type == FDFS_UPLOAD_BY_FILENAME
or upload_type == FDFS_UPLOAD_BY_FILE
) else '',
'Uploaded size': appromix(send_file_size) if (upload_type == FDFS_UPLOAD_BY_FILENAME
or upload_type == FDFS_UPLOAD_BY_FILE
) else appromix(len(file_buffer)),
'Storage IP': store_serv.ip_addr
}
return ret_dic
def storage_upload_by_filename(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_by_file(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_by_buffer(self, tracker_client, store_serv, file_buffer, file_ext_name=None, meta_dict=None):
buffer_size = len(file_buffer)
return self._storage_do_upload_file(tracker_client, store_serv, file_buffer, buffer_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_slave_by_filename(self, tracker_client, store_serv, filename, prefix_name, remote_filename,
meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, remote_filename,
prefix_name, file_ext_name)
def storage_upload_slave_by_file(self, tracker_client, store_serv, filename, prefix_name, remote_filename,
meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, remote_filename,
prefix_name, file_ext_name)
def storage_upload_slave_by_buffer(self, tracker_client, store_serv, filebuffer, remote_filename, meta_dict,
file_ext_name):
file_size = len(filebuffer)
return self._storage_do_upload_file(tracker_client, store_serv, filebuffer, file_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, None, remote_filename,
file_ext_name)
def storage_upload_appender_by_filename(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_upload_appender_by_file(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_upload_appender_by_buffer(self, tracker_client, store_serv, file_buffer, meta_dict=None,
file_ext_name=None):
file_size = len(file_buffer)
return self._storage_do_upload_file(tracker_client, store_serv, file_buffer, file_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_delete_file(self, tracker_client, store_serv, remote_filename):
'''
Delete file from storage server.
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_DELETE_FILE
file_name_len = len(remote_filename)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
try:
th.send_header(store_conn)
# del_fmt: |-group_name(16)-filename(len)-|
del_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
send_buffer = struct.pack(del_fmt, store_serv.group_name, remote_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: remote file %s is not exist.'
# % (store_serv.group_name + __os_sep__.encode() + remote_filename))
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
# recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
remote_filename = store_serv.group_name + __os_sep__.encode() + remote_filename
return ('Delete file successed.', remote_filename, store_serv.ip_addr)
def _storage_do_download_file(self, tracker_client, store_serv, file_buffer, offset, download_size,
download_type, remote_filename):
'''
Core of download file from storage server.
You can choice download type, optional FDFS_DOWNLOAD_TO_FILE or
FDFS_DOWNLOAD_TO_BUFFER. And you can choice file offset.
@Return dictionary
'Remote file name' : remote_filename,
'Content' : local_filename or buffer,
'Download size' : download_size,
'Storage IP' : storage_ip
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
remote_filename_len = len(remote_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + FDFS_GROUP_NAME_MAX_LEN + remote_filename_len
th.cmd = STORAGE_PROTO_CMD_DOWNLOAD_FILE
try:
th.send_header(store_conn)
# down_fmt: |-offset(8)-download_bytes(8)-group_name(16)-remote_filename(len)-|
down_fmt = '!Q Q %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len)
send_buffer = struct.pack(down_fmt, offset, download_size, store_serv.group_name, remote_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: remote file %s is not exist.' %
# (store_serv.group_name + __os_sep__.encode() + remote_filename))
if th.status != 0:
raise DataError('Error: %d %s' % (th.status, os.strerror(th.status)))
if download_type == FDFS_DOWNLOAD_TO_FILE:
total_recv_size = tcp_recv_file(store_conn, file_buffer, th.pkg_len)
elif download_type == FDFS_DOWNLOAD_TO_BUFFER:
recv_buffer, total_recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
ret_dic = {
'Remote file_id': store_serv.group_name + __os_sep__.encode() + remote_filename,
'Content': file_buffer if download_type == FDFS_DOWNLOAD_TO_FILE else recv_buffer,
'Download size': appromix(total_recv_size),
'Storage IP': store_serv.ip_addr
}
return ret_dic
def storage_download_to_file(self, tracker_client, store_serv, local_filename, file_offset, download_bytes,
remote_filename):
return self._storage_do_download_file(tracker_client, store_serv, local_filename, file_offset, download_bytes,
FDFS_DOWNLOAD_TO_FILE, remote_filename)
def storage_download_to_buffer(self, tracker_client, store_serv, file_buffer, file_offset, download_bytes,
remote_filename):
return self._storage_do_download_file(tracker_client, store_serv, file_buffer, file_offset, download_bytes,
FDFS_DOWNLOAD_TO_BUFFER, remote_filename)
def storage_set_metadata(self, tracker_client, store_serv, remote_filename, meta_dict,
op_flag=STORAGE_SET_METADATA_FLAG_OVERWRITE):
ret = 0
conn = self.pool.get_connection()
remote_filename_len = len(remote_filename)
meta_buffer = fdfs_pack_metadata(meta_dict)
meta_len = len(meta_buffer)
th = Tracker_header()
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + 1 + FDFS_GROUP_NAME_MAX_LEN + remote_filename_len + meta_len
th.cmd = STORAGE_PROTO_CMD_SET_METADATA
try:
th.send_header(conn)
# meta_fmt: |-filename_len(8)-meta_len(8)-op_flag(1)-group_name(16)
# -filename(remote_filename_len)-meta(meta_len)|
meta_fmt = '!Q Q c %ds %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len, meta_len)
send_buffer = struct.pack(meta_fmt, remote_filename_len, meta_len, op_flag, store_serv.group_name,
remote_filename, meta_buffer)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
ret = th.status
except:
raise
finally:
self.pool.release(conn)
return ret
def storage_get_metadata(self, tracker_client, store_serv, remote_file_name):
store_conn = self.pool.get_connection()
th = Tracker_header()
remote_filename_len = len(remote_file_name)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + remote_filename_len
th.cmd = STORAGE_PROTO_CMD_GET_METADATA
try:
th.send_header(store_conn)
# meta_fmt: |-group_name(16)-filename(remote_filename_len)-|
meta_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len)
send_buffer = struct.pack(meta_fmt, store_serv.group_name, remote_file_name.encode())
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: Remote file %s has no meta data.'
# % (store_serv.group_name + __os_sep__.encode() + remote_file_name))
if th.status != 0:
raise DataError('[-] Error:%d, %s' % (th.status, os.strerror(th.status)))
if th.pkg_len == 0:
ret_dict = {}
meta_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = fdfs_unpack_metadata(meta_buffer)
return ret_dict
def _storage_do_append_file(self, tracker_client, store_serv, file_buffer, file_size, upload_type,
appended_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
appended_filename_len = len(appended_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appended_filename_len + file_size
th.cmd = STORAGE_PROTO_CMD_APPEND_FILE
try:
th.send_header(store_conn)
# append_fmt: |-appended_filename_len(8)-file_size(8)-appended_filename(len)
# -filecontent(filesize)-|
append_fmt = '!Q Q %ds' % appended_filename_len
send_buffer = struct.pack(append_fmt, appended_filename_len, file_size, appended_filename)
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
tcp_send_file(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
tcp_send_file_ex(store_conn, file_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Append file successed.'
ret_dict['Appender file name'] = store_serv.group_name + __os_sep__.encode() + appended_filename
ret_dict['Appended size'] = appromix(file_size)
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_append_by_filename(self, tracker_client, store_serv, local_filename, appended_filename):
file_size = os.stat(local_filename).st_size
return self._storage_do_append_file(tracker_client, store_serv, local_filename, file_size,
FDFS_UPLOAD_BY_FILENAME, appended_filename)
def storage_append_by_file(self, tracker_client, store_serv, local_filename, appended_filename):
file_size = os.stat(local_filename).st_size
return self._storage_do_append_file(tracker_client, store_serv, local_filename, file_size, FDFS_UPLOAD_BY_FILE,
appended_filename)
def storage_append_by_buffer(self, tracker_client, store_serv, file_buffer, appended_filename):
file_size = len(file_buffer)
return self._storage_do_append_file(tracker_client, store_serv, file_buffer, file_size, FDFS_UPLOAD_BY_BUFFER,
appended_filename)
def _storage_do_truncate_file(self, tracker_client, store_serv, truncated_filesize, appender_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_TRUNCATE_FILE
appender_filename_len = len(appender_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appender_filename_len
try:
th.send_header(store_conn)
# truncate_fmt:|-appender_filename_len(8)-truncate_filesize(8)
# -appender_filename(len)-|
truncate_fmt = '!Q Q %ds' % appender_filename_len
send_buffer = struct.pack(truncate_fmt, appender_filename_len, truncated_filesize, appender_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Truncate successed.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_truncate_file(self, tracker_client, store_serv, truncated_filesize, appender_filename):
return self._storage_do_truncate_file(tracker_client, store_serv, truncated_filesize, appender_filename)
def _storage_do_modify_file(self, tracker_client, store_serv, upload_type, filebuffer, offset, filesize,
appender_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_MODIFY_FILE
appender_filename_len = len(appender_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 3 + appender_filename_len + filesize
try:
th.send_header(store_conn)
# modify_fmt: |-filename_len(8)-offset(8)-filesize(8)-filename(len)-|
modify_fmt = '!Q Q Q %ds' % appender_filename_len
send_buffer = struct.pack(modify_fmt, appender_filename_len, offset, filesize, appender_filename)
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
upload_size = tcp_send_file(store_conn, filebuffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, filebuffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
upload_size = tcp_send_file_ex(store_conn, filebuffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Modify successed.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_modify_by_filename(self, tracker_client, store_serv, filename, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_FILENAME, filename, offset,
filesize, appender_filename)
def storage_modify_by_file(self, tracker_client, store_serv, filename, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_FILE, filename, offset, filesize,
appender_filename)
def storage_modify_by_buffer(self, tracker_client, store_serv, filebuffer, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_BUFFER, filebuffer, offset,
filesize, appender_filename)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: tracker_client.py
import struct
import socket
from datetime import datetime
from fdfs_client.fdfs_protol import *
from fdfs_client.connection import *
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
from fdfs_client.utils import *
def parse_storage_status(status_code):
try:
ret = {
FDFS_STORAGE_STATUS_INIT: lambda: 'INIT',
FDFS_STORAGE_STATUS_WAIT_SYNC: lambda: 'WAIT_SYNC',
FDFS_STORAGE_STATUS_SYNCING: lambda: 'SYNCING',
FDFS_STORAGE_STATUS_IP_CHANGED: lambda: 'IP_CHANGED',
FDFS_STORAGE_STATUS_DELETED: lambda: 'DELETED',
FDFS_STORAGE_STATUS_OFFLINE: lambda: 'OFFLINE',
FDFS_STORAGE_STATUS_ONLINE: lambda: 'ONLINE',
FDFS_STORAGE_STATUS_ACTIVE: lambda: 'ACTIVE',
FDFS_STORAGE_STATUS_RECOVERY: lambda: 'RECOVERY'
}[status_code]()
except KeyError:
ret = 'UNKNOW'
return ret
class Storage_info(object):
def __init__(self):
self.status = 0
self.id = ''
self.ip_addr = ''
self.domain_name = ''
self.src_id = ''
self.version = ''
self.join_time = datetime.fromtimestamp(0).isoformat()
self.up_time = datetime.fromtimestamp(0).isoformat()
self.totalMB = ''
self.freeMB = ''
self.upload_prio = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_write_path = 0
self.storage_port = 23000
self.storage_http_port = 80
self.alloc_count = 0
self.current_count = 0
self.max_count = 0
self.total_upload_count = 0
self.success_upload_count = 0
self.total_append_count = 0
self.success_append_count = 0
self.total_modify_count = 0
self.success_modify_count = 0
self.total_truncate_count = 0
self.success_truncate_count = 0
self.total_setmeta_count = 0
self.success_setmeta_count = 0
self.total_del_count = 0
self.success_del_count = 0
self.total_download_count = 0
self.success_download_count = 0
self.total_getmeta_count = 0
self.success_getmeta_count = 0
self.total_create_link_count = 0
self.success_create_link_count = 0
self.total_del_link_count = 0
self.success_del_link_count = 0
self.total_upload_bytes = 0
self.success_upload_bytes = 0
self.total_append_bytes = 0
self.success_append_bytes = 0
self.total_modify_bytes = 0
self.success_modify_bytes = 0
self.total_download_bytes = 0
self.success_download_bytes = 0
self.total_sync_in_bytes = 0
self.success_sync_in_bytes = 0
self.total_sync_out_bytes = 0
self.success_sync_out_bytes = 0
self.total_file_open_count = 0
self.success_file_open_count = 0
self.total_file_read_count = 0
self.success_file_read_count = 0
self.total_file_write_count = 0
self.success_file_write_count = 0
self.last_source_sync = datetime.fromtimestamp(0).isoformat()
self.last_sync_update = datetime.fromtimestamp(0).isoformat()
self.last_synced_time = datetime.fromtimestamp(0).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(0).isoformat()
self.if_trunk_server = ''
# fmt = |-status(1)-ipaddr(16)-domain(128)-srcipaddr(16)-ver(6)-52*8-|
self.fmt = '!B 16s 16s 128s 16s 6s 10Q 4s4s4s 42Q?'
def set_info(self, bytes_stream):
(self.status, self.id, ip_addr, domain_name, self.src_id, version, join_time, up_time, totalMB, freeMB,
self.upload_prio, self.store_path_count, self.subdir_count_per_path, self.curr_write_path, self.storage_port,
self.storage_http_port, self.alloc_count, self.current_count, self.max_count, self.total_upload_count,
self.success_upload_count, self.total_append_count, self.success_append_count, self.total_modify_count,
self.success_modify_count, self.total_truncate_count, self.success_truncate_count, self.total_setmeta_count,
self.success_setmeta_count, self.total_del_count, self.success_del_count, self.total_download_count,
self.success_download_count, self.total_getmeta_count, self.success_getmeta_count,
self.total_create_link_count, self.success_create_link_count, self.total_del_link_count,
self.success_del_link_count, self.total_upload_bytes, self.success_upload_bytes, self.total_append_bytes,
self.total_append_bytes, self.total_modify_bytes, self.success_modify_bytes, self.total_download_bytes,
self.success_download_bytes, self.total_sync_in_bytes, self.success_sync_in_bytes, self.total_sync_out_bytes,
self.success_sync_out_bytes, self.total_file_open_count, self.success_file_open_count,
self.total_file_read_count, self.success_file_read_count, self.total_file_write_count,
self.success_file_write_count, last_source_sync, last_sync_update, last_synced_time, last_heartbeat_time,
self.if_trunk_server,) = struct.unpack(self.fmt, bytes_stream)
try:
self.ip_addr = ip_addr.strip(b'\x00')
self.domain_name = domain_name.strip(b'\x00')
self.version = version.strip(b'\x00')
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError as e:
raise ResponseError('[-] Error: disk space overrun, can not represented it.')
self.join_time = datetime.fromtimestamp(join_time).isoformat()
self.up_time = datetime.fromtimestamp(up_time).isoformat()
self.last_source_sync = datetime.fromtimestamp(last_source_sync).isoformat()
self.last_sync_update = datetime.fromtimestamp(last_sync_update).isoformat()
self.last_synced_time = datetime.fromtimestamp(last_synced_time).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(last_heartbeat_time).isoformat()
return True
def __str__(self):
'''Transform to readable string.'''
s = 'Storage information:\n'
s += '\tip_addr = %s (%s)\n' % (self.ip_addr, parse_storage_status(self.status))
s += '\thttp domain = %s\n' % self.domain_name
s += '\tversion = %s\n' % self.version
s += '\tjoin time = %s\n' % self.join_time
s += '\tup time = %s\n' % self.up_time
s += '\ttotal storage = %s\n' % self.totalMB
s += '\tfree storage = %s\n' % self.freeMB
s += '\tupload priority = %d\n' % self.upload_prio
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.storage_http_port
s += '\tcurrent write path = %d\n' % self.curr_write_path
s += '\tsource ip_addr = %s\n' % self.ip_addr
s += '\tif_trunk_server = %d\n' % self.if_trunk_server
s += '\ttotal upload count = %ld\n' % self.total_upload_count
s += '\tsuccess upload count = %ld\n' % self.success_upload_count
s += '\ttotal download count = %ld\n' % self.total_download_count
s += '\tsuccess download count = %ld\n' % self.success_download_count
s += '\ttotal append count = %ld\n' % self.total_append_count
s += '\tsuccess append count = %ld\n' % self.success_append_count
s += '\ttotal modify count = %ld\n' % self.total_modify_count
s += '\tsuccess modify count = %ld\n' % self.success_modify_count
s += '\ttotal truncate count = %ld\n' % self.total_truncate_count
s += '\tsuccess truncate count = %ld\n' % self.success_truncate_count
s += '\ttotal delete count = %ld\n' % self.total_del_count
s += '\tsuccess delete count = %ld\n' % self.success_del_count
s += '\ttotal set_meta count = %ld\n' % self.total_setmeta_count
s += '\tsuccess set_meta count = %ld\n' % self.success_setmeta_count
s += '\ttotal get_meta count = %ld\n' % self.total_getmeta_count
s += '\tsuccess get_meta count = %ld\n' % self.success_getmeta_count
s += '\ttotal create link count = %ld\n' % self.total_create_link_count
s += '\tsuccess create link count = %ld\n' % self.success_create_link_count
s += '\ttotal delete link count = %ld\n' % self.total_del_link_count
s += '\tsuccess delete link count = %ld\n' % self.success_del_link_count
s += '\ttotal upload bytes = %ld\n' % self.total_upload_bytes
s += '\tsuccess upload bytes = %ld\n' % self.success_upload_bytes
s += '\ttotal download bytes = %ld\n' % self.total_download_bytes
s += '\tsuccess download bytes = %ld\n' % self.success_download_bytes
s += '\ttotal append bytes = %ld\n' % self.total_append_bytes
s += '\tsuccess append bytes = %ld\n' % self.success_append_bytes
s += '\ttotal modify bytes = %ld\n' % self.total_modify_bytes
s += '\tsuccess modify bytes = %ld\n' % self.success_modify_bytes
s += '\ttotal sync_in bytes = %ld\n' % self.total_sync_in_bytes
s += '\tsuccess sync_in bytes = %ld\n' % self.success_sync_in_bytes
s += '\ttotal sync_out bytes = %ld\n' % self.total_sync_out_bytes
s += '\tsuccess sync_out bytes = %ld\n' % self.success_sync_out_bytes
s += '\ttotal file open count = %ld\n' % self.total_file_open_count
s += '\tsuccess file open count = %ld\n' % self.success_file_open_count
s += '\ttotal file read count = %ld\n' % self.total_file_read_count
s += '\tsuccess file read count = %ld\n' % self.success_file_read_count
s += '\ttotal file write count = %ld\n' % self.total_file_write_count
s += '\tsucess file write count = %ld\n' % self.success_file_write_count
s += '\tlast heartbeat time = %s\n' % self.last_heartbeat_time
s += '\tlast source update = %s\n' % self.last_source_sync
s += '\tlast sync update = %s\n' % self.last_sync_update
s += '\tlast synced time = %s\n' % self.last_synced_time
return s
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Group_info(object):
def __init__(self):
self.group_name = ''
self.totalMB = ''
self.freeMB = ''
self.trunk_freeMB = ''
self.count = 0
self.storage_port = 0
self.store_http_port = 0
self.active_count = 0
self.curr_write_server = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_trunk_file_id = 0
self.fmt = '!%ds 11Q' % (FDFS_GROUP_NAME_MAX_LEN + 1)
return None
def __str__(self):
s = 'Group information:\n'
s += '\tgroup name = %s\n' % self.group_name
s += '\ttotal disk space = %s\n' % self.totalMB
s += '\tdisk free space = %s\n' % self.freeMB
s += '\ttrunk free space = %s\n' % self.trunk_freeMB
s += '\tstorage server count = %d\n' % self.count
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.store_http_port
s += '\tactive server count = %d\n' % self.active_count
s += '\tcurrent write server index = %d\n' % self.curr_write_server
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tcurrent trunk file id = %d\n' % self.curr_trunk_file_id
return s
def set_info(self, bytes_stream):
(group_name, totalMB, freeMB, trunk_freeMB, self.count, self.storage_port, self.store_http_port,
self.active_count, self.curr_write_server, self.store_path_count, self.subdir_count_per_path,
self.curr_trunk_file_id) = struct.unpack(self.fmt, bytes_stream)
try:
self.group_name = group_name.strip(b'\x00')
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.trunk_freeMB = appromix(trunk_freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError:
raise DataError('[-] Error disk space overrun, can not represented it.')
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Tracker_client(object):
'''Class Tracker client.'''
def __init__(self, pool):
self.pool = pool
def tracker_list_servers(self, group_name, storage_ip=None):
'''
List servers in a storage group
'''
conn = self.pool.get_connection()
th = Tracker_header()
ip_len = len(storage_ip) if storage_ip else 0
if ip_len >= IP_ADDRESS_SIZE:
ip_len = IP_ADDRESS_SIZE - 1
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + ip_len
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_STORAGE
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
store_ip_addr = storage_ip or ''
storage_ip_fmt = '!%ds' % ip_len
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name) + struct.pack(storage_ip_fmt, store_ip_addr)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
si = Storage_info()
si_fmt_size = si.get_fmt_size()
recv_size = len(recv_buffer)
if recv_size % si_fmt_size != 0:
errinfo = '[-] Error: response size not match, expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errinfo)
except ConnectionError:
raise
finally:
self.pool.release(conn)
num_storage = recv_size / si_fmt_size
si_list = []
i = 0
while num_storage:
si.set_info(recv_buffer[(i * si_fmt_size): ((i + 1) * si_fmt_size)])
si_list.append(si)
si = Storage_info()
num_storage -= 1
i += 1
ret_dict = {}
ret_dict['Group name'] = group_name
ret_dict['Servers'] = si_list
return ret_dict
def tracker_list_one_group(self, group_name):
conn = self.pool.get_connection()
th = Tracker_header()
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP
# group_fmt: |-group_name(16)-|
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
group_info = Group_info()
group_info.set_info(recv_buffer)
except ConnectionError:
raise
finally:
self.pool.release(conn)
return group_info
def tracker_list_all_groups(self):
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
except:
raise
finally:
self.pool.release(conn)
gi = Group_info()
gi_fmt_size = gi.get_fmt_size()
if recv_size % gi_fmt_size != 0:
errmsg = '[-] Error: Response size is mismatch, except: %d, actul: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
num_groups = recv_size / gi_fmt_size
ret_dict = {}
ret_dict['Groups count'] = num_groups
gi_list = []
i = 0
while num_groups:
gi.set_info(recv_buffer[i * gi_fmt_size: (i + 1) * gi_fmt_size])
gi_list.append(gi)
gi = Group_info()
i += 1
num_groups -= 1
ret_dict['Groups'] = gi_list
return ret_dict
def tracker_query_storage_stor_without_group(self):
'''Query storage server for upload, without group name.
Return: Storage_server object'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def tracker_query_storage_stor_with_group(self, group_name):
'''Query storage server for upload, based group name.
arguments:
@group_name: string
@Return Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.send_header(conn)
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
send_buffer = struct.pack(group_fmt, group_name)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)-|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def _tracker_do_query_storage(self, group_name, filename, cmd):
'''
core of query storage, based group name and filename.
It is useful download, delete and set meta.
arguments:
@group_name: string
@filename: string. remote file_id
@Return: Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
file_name_len = len(filename)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
th.cmd = cmd
th.send_header(conn)
# query_fmt: |-group_name(16)-filename(file_name_len)-|
query_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
send_buffer = struct.pack(query_fmt, group_name, filename)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_FETCH_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ip_addr(16)-port(8)-|
recv_fmt = '!%ds %ds Q' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ipaddr, store_serv.port) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ipaddr.strip(b'\x00')
return store_serv
def tracker_query_storage_update(self, group_name, filename):
'''
Query storage server to update(delete and set_meta).
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE)
def tracker_query_storage_fetch(self, group_name, filename):
'''
Query storage server to download.
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE)
#!/usr/bin/env python
# -*- coding = utf-8 -*-
# filename: utils.py
import io
import os
import sys
import stat
import platform
import configparser
SUFFIX = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
__os_sep__ = "/" if platform.system() == 'Windows' else os.sep
def appromix(size, base=0):
'''Conver bytes stream size to human-readable format.
Keyword arguments:
size: int, bytes stream size
base: int, suffix index
Return: string
'''
multiples = 1024
if size < 0:
raise ValueError('[-] Error: number must be non-negative.')
if size < multiples:
return '{0:d}{1}'.format(size, SUFFIX[base])
for suffix in SUFFIX[base:]:
if size < multiples:
return '{0:.2f}{1}'.format(size, suffix)
size = size / float(multiples)
raise ValueError('[-] Error: number too big.')
def get_file_ext_name(filename, double_ext=True):
li = filename.split(os.extsep)
if len(li) <= 1:
return ''
else:
if li[-1].find(__os_sep__) != -1:
return ''
if double_ext:
if len(li) > 2:
if li[-2].find(__os_sep__) == -1:
return '%s.%s' % (li[-2], li[-1])
return li[-1]
class Fdfs_ConfigParser(configparser.RawConfigParser):
"""
Extends ConfigParser to allow files without sections.
This is done by wrapping read files and prepending them with a placeholder
section, which defaults to '__config__'
"""
def __init__(self, default_section=None, *args, **kwargs):
configparser.RawConfigParser.__init__(self, *args, **kwargs)
self._default_section = None
self.set_default_section(default_section or '__config__')
def get_default_section(self):
return self._default_section
def set_default_section(self, section):
self.add_section(section)
# move all values from the previous default section to the new one
try:
default_section_items = self.items(self._default_section)
self.remove_section(self._default_section)
except configparser.NoSectionError:
pass
else:
for (key, value) in default_section_items:
self.set(section, key, value)
self._default_section = section
def read(self, filenames):
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename) as fp:
self.readfp(fp)
except IOError:
continue
else:
read_ok.append(filename)
return read_ok
def readfp(self, fp, *args, **kwargs):
stream = io.StringIO()
try:
stream.name = fp.name
except AttributeError:
pass
stream.write('[' + self._default_section + ']\n')
stream.write(fp.read())
stream.seek(0, 0)
return self._read(stream, stream.name)
def write(self, fp):
# Write the items from the default section manually and then remove them
# from the data. They'll be re-added later.
try:
default_section_items = self.items(self._default_section)
self.remove_section(self._default_section)
for (key, value) in default_section_items:
fp.write("{0} = {1}\n".format(key, value))
fp.write("\n")
except configparser.NoSectionError:
pass
configparser.RawConfigParser.write(self, fp)
self.add_section(self._default_section)
for (key, value) in default_section_items:
self.set(self._default_section, key, value)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
if optname in cursect:
if not isinstance(cursect[optname], list):
cursect[optname] = [cursect[optname]]
cursect[optname].append(optval)
else:
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
def split_remote_fileid(remote_file_id):
'''
Splite remote_file_id to (group_name, remote_file_name)
arguments:
@remote_file_id: string
@return tuple, (group_name, remote_file_name)
'''
index = remote_file_id.find(b'/')
if -1 == index:
return None
return (remote_file_id[0:index], remote_file_id[(index + 1):])
def fdfs_check_file(filename):
ret = True
errmsg = ''
if not os.path.isfile(filename):
ret = False
errmsg = '[-] Error: %s is not a file.' % filename
elif not stat.S_ISREG(os.stat(filename).st_mode):
ret = False
errmsg = '[-] Error: %s is not a regular file.' % filename
return (ret, errmsg)
if __name__ == '__main__':
print(get_file_ext_name('/bc.tar.gz'))
# __init__.py
__version__ = '2.2.0'
VERSION = tuple(map(int, __version__.split('.')))
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: client.py
'''
Client module for Fastdfs 3.08
author: scott yuan scottzer8@gmail.com
date: 2012-06-21
'''
import os
import sys
from fdfs_client.utils import *
from fdfs_client.tracker_client import *
from fdfs_client.storage_client import *
from fdfs_client.exceptions import *
def get_tracker_conf(conf_path='../../client.conf'):
cf = Fdfs_ConfigParser()
tracker = {}
try:
cf.read(conf_path)
timeout = cf.getint('__config__', 'connect_timeout')
tracker_list = cf.get('__config__', 'tracker_server')
if isinstance(tracker_list, str):
tracker_list = [tracker_list]
tracker_ip_list = []
for tr in tracker_list:
tracker_ip, tracker_port = tr.split(':')
tracker_ip_list.append(tracker_ip)
tracker['host_tuple'] = tuple(tracker_ip_list)
tracker['port'] = int(tracker_port)
tracker['timeout'] = timeout
tracker['name'] = 'Tracker Pool'
except:
raise
return tracker
class Fdfs_client(object):
'''
Class Fdfs_client implemented Fastdfs client protol ver 3.08.
It's useful upload, download, delete file to or from fdfs server, etc. It's uses
connection pool to manage connection to server.
'''
def __init__(self, trackers, poolclass=ConnectionPool):
self.trackers = trackers
self.tracker_pool = poolclass(**self.trackers)
self.timeout = self.trackers['timeout']
return None
def __del__(self):
try:
self.pool.destroy()
self.pool = None
except:
pass
def upload_by_filename(self, filename, meta_dict=None):
'''
Upload a file to Storage server.
arguments:
@filename: string, name of file that will be uploaded
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} meta_dict can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : local_file_name,
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_filename(tc, store_serv, filename, meta_dict)
def upload_by_file(self, filename, meta_dict=None):
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_file(tc, store_serv, filename, meta_dict)
def upload_by_buffer(self, filebuffer, file_ext_name=None, meta_dict=None):
'''
Upload a buffer to Storage server.
arguments:
@filebuffer: string, buffer
@file_ext_name: string, file extend name
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_by_buffer(tc, store_serv, filebuffer, file_ext_name, meta_dict)
def upload_slave_by_filename(self, filename, remote_file_id, prefix_name, meta_dict=None):
'''
Upload slave file to Storage server.
arguments:
@filename: string, local file name
@remote_file_id: string, remote file id
@prefix_name: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading slave)')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
if not prefix_name:
raise DataError('[-] Error: prefix_name can not be null.')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_with_group(group_name)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
try:
ret_dict = store.storage_upload_slave_by_filename(tc, store_serv, filename, prefix_name, remote_filename,
meta_dict=None)
except:
raise
ret_dict['Status'] = 'Upload slave file successed.'
return ret_dict
def upload_slave_by_file(self, filename, remote_file_id, prefix_name, meta_dict=None):
'''
Upload slave file to Storage server.
arguments:
@filename: string, local file name
@remote_file_id: string, remote file id
@prefix_name: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(uploading slave)')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
if not prefix_name:
raise DataError('[-] Error: prefix_name can not be null.')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_with_group(group_name)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
try:
ret_dict = store.storage_upload_slave_by_file(tc, store_serv, filename, prefix_name, remote_filename,
meta_dict=None)
except:
raise
ret_dict['Status'] = 'Upload slave file successed.'
return ret_dict
def upload_slave_by_buffer(self, filebuffer, remote_file_id, meta_dict=None, file_ext_name=None):
'''
Upload slave file by buffer
arguments:
@filebuffer: string
@remote_file_id: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
}
@return dictionary {
'Status' : 'Upload slave successed.',
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Remote file id' : remote_file_id,
'Storage IP' : storage_ip
}
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(uploading slave)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_slave_by_buffer(tc, store_serv, filebuffer, remote_filename, meta_dict,
file_ext_name)
def upload_appender_by_filename(self, local_filename, meta_dict=None):
'''
Upload an appender file by filename.
arguments:
@local_filename: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} Notice: it can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(uploading appender)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_filename(tc, store_serv, local_filename, meta_dict)
def upload_appender_by_file(self, local_filename, meta_dict=None):
'''
Upload an appender file by file.
arguments:
@local_filename: string
@meta_dict: dictionary e.g.:{
'ext_name' : 'jpg',
'file_size' : '10240B',
'width' : '160px',
'hight' : '80px'
} Notice: it can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(uploading appender)')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_file(tc, store_serv, local_filename, meta_dict)
def upload_appender_by_buffer(self, filebuffer, file_ext_name=None, meta_dict=None):
'''
Upload a buffer to Storage server.
arguments:
@filebuffer: string
@file_ext_name: string, can be null
@meta_dict: dictionary, can be null
@return dict {
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : 'Upload successed.',
'Local file name' : '',
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
} if success else None
'''
if not filebuffer:
raise DataError('[-] Error: argument filebuffer can not be null.')
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_stor_without_group()
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_upload_appender_by_buffer(tc, store_serv, filebuffer, meta_dict, file_ext_name)
def delete_file(self, remote_file_id):
'''
Delete a file from Storage server.
arguments:
@remote_file_id: string, file_id of file that is on storage server
@return tuple ('Delete file successed.', remote_file_id, storage_ip)
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in delete file)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_delete_file(tc, store_serv, remote_filename)
def download_to_file(self, local_filename, remote_file_id, offset=0, down_bytes=0):
'''
Download a file from Storage server.
arguments:
@local_filename: string, local name of file
@remote_file_id: string, file_id of file that is on storage server
@offset: long
@downbytes: long
@return dict {
'Remote file_id' : remote_file_id,
'Content' : local_filename,
'Download size' : downloaded_size,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in download file)')
group_name, remote_filename = tmp
if not offset:
file_offset = int(offset)
if not down_bytes:
download_bytes = int(down_bytes)
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_fetch(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_download_to_file(tc, store_serv, local_filename, file_offset, download_bytes,
remote_filename)
def download_to_buffer(self, remote_file_id, offset=0, down_bytes=0):
'''
Download a file from Storage server and store in buffer.
arguments:
@remote_file_id: string, file_id of file that is on storage server
@offset: long
@down_bytes: long
@return dict {
'Remote file_id' : remote_file_id,
'Content' : file_buffer,
'Download size' : downloaded_size,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in download file)')
group_name, remote_filename = tmp
if not offset:
file_offset = int(offset)
if not down_bytes:
download_bytes = int(down_bytes)
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_fetch(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
file_buffer = None
return store.storage_download_to_buffer(tc, store_serv, file_buffer, file_offset, download_bytes,
remote_filename)
def list_one_group(self, group_name):
'''
List one group information.
arguments:
@group_name: string, group name will be list
@return Group_info, instance
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_one_group(group_name)
def list_servers(self, group_name, storage_ip=None):
'''
List all storage servers information in a group
arguments:
@group_name: string
@return dictionary {
'Group name' : group_name,
'Servers' : server list,
}
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_servers(group_name, storage_ip)
def list_all_groups(self):
'''
List all group information.
@return dictionary {
'Groups count' : group_count,
'Groups' : list of groups
}
'''
tc = Tracker_client(self.tracker_pool)
return tc.tracker_list_all_groups()
def get_meta_data(self, remote_file_id):
'''
Get meta data of remote file.
arguments:
@remote_fileid: string, remote file id
@return dictionary, meta data
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in get meta data)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_get_metadata(tc, store_serv, remote_filename)
def set_meta_data(self, remote_file_id, meta_dict, op_flag=STORAGE_SET_METADATA_FLAG_OVERWRITE):
'''
Set meta data of remote file.
arguments:
@remote_file_id: string
@meta_dict: dictionary
@op_flag: char, 'O' for overwrite, 'M' for merge
@return dictionary {
'Status' : status,
'Storage IP' : storage_ip
}
'''
tmp = split_remote_fileid(remote_file_id)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(in set meta data)')
group_name, remote_filename = tmp
tc = Tracker_client(self.tracker_pool)
try:
store_serv = tc.tracker_query_storage_update(group_name, remote_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
status = store.storage_set_metadata(tc, store_serv, remote_filename, meta_dict)
except (ConnectionError, ResponseError, DataError):
raise
# if status == 2:
# raise DataError('[-] Error: remote file %s is not exist.' % remote_file_id)
if status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
ret_dict = {}
ret_dict['Status'] = 'Set meta data success.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def append_by_filename(self, local_filename, remote_fileid):
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(append)')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_filename(tc, store_serv, local_filename, appended_filename)
def append_by_file(self, local_filename, remote_fileid):
isfile, errmsg = fdfs_check_file(local_filename)
if not isfile:
raise DataError(errmsg + '(append)')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_file(tc, store_serv, local_filename, appended_filename)
def append_by_buffer(self, file_buffer, remote_fileid):
if not file_buffer:
raise DataError('[-] Error: file_buffer can not be null.')
tmp = split_remote_fileid(remote_fileid)
if not tmp:
raise DataError('[-] Error: remote_file_id is invalid.(append)')
group_name, appended_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appended_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_append_by_buffer(tc, store_serv, file_buffer, appended_filename)
def truncate_file(self, truncated_filesize, appender_fileid):
'''
Truncate file in Storage server.
arguments:
@truncated_filesize: long
@appender_fileid: remote_fileid
@return: dictionary {
'Status' : 'Truncate successed.',
'Storage IP' : storage_ip
}
'''
trunc_filesize = int(truncated_filesize)
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: appender_fileid is invalid.(truncate)')
group_name, appender_filename = tmp
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_truncate_file(tc, store_serv, trunc_filesize, appender_filename)
def modify_by_filename(self, filename, appender_fileid, offset=0):
'''
Modify a file in Storage server by file.
arguments:
@filename: string, local file name
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(modify)')
filesize = os.stat(filename).st_size
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_filename(tc, store_serv, filename, file_offset, filesize, appender_filename)
def modify_by_file(self, filename, appender_fileid, offset=0):
'''
Modify a file in Storage server by file.
arguments:
@filename: string, local file name
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
isfile, errmsg = fdfs_check_file(filename)
if not isfile:
raise DataError(errmsg + '(modify)')
filesize = os.stat(filename).st_size
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_file(tc, store_serv, filename, file_offset, filesize, appender_filename)
def modify_by_buffer(self, filebuffer, appender_fileid, offset=0):
'''
Modify a file in Storage server by buffer.
arguments:
@filebuffer: string, file buffer
@offset: long, file offset
@appender_fileid: string, remote file id
@return: dictionary {
'Status' : 'Modify successed.',
'Storage IP' : storage_ip
}
'''
if not filebuffer:
raise DataError('[-] Error: filebuffer can not be null.(modify)')
filesize = len(filebuffer)
tmp = split_remote_fileid(appender_fileid)
if not tmp:
raise DataError('[-] Error: remote_fileid is invalid.(modify)')
group_name, appender_filename = tmp
if not offset:
file_offset = int(offset)
else:
file_offset = 0
tc = Tracker_client(self.tracker_pool)
store_serv = tc.tracker_query_storage_update(group_name, appender_filename)
store = Storage_client(store_serv.ip_addr, store_serv.port, self.timeout)
return store.storage_modify_by_buffer(tc, store_serv, filebuffer, file_offset, filesize, appender_filename)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: connection.py
import socket
import os
import sys
import time
import random
from itertools import chain
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# start class Connection
class Connection(object):
'''Manage TCP comunication to and from Fastdfs Server.'''
def __init__(self, **conn_kwargs):
self.pid = os.getpid()
self.host_tuple = conn_kwargs['host_tuple']
self.remote_port = conn_kwargs['port']
self.remote_addr = None
self.timeout = conn_kwargs['timeout']
self._sock = None
def __del__(self):
try:
self.disconnect()
except:
pass
def connect(self):
'''Connect to fdfs server.'''
if self._sock:
return
try:
sock = self._connect()
except socket.error as e:
raise ConnectionError(self._errormessage(e))
self._sock = sock
# print '[+] Create a connection success.'
# print '\tLocal address is %s:%s.' % self._sock.getsockname()
# print '\tRemote address is %s:%s' % (self.remote_addr, self.remote_port)
def _connect(self):
'''Create TCP socket. The host is random one of host_tuple.'''
self.remote_addr = random.choice(self.host_tuple)
# print '[+] Connecting... remote: %s:%s' % (self.remote_addr, self.remote_port)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.settimeout(self.timeout)
sock = socket.create_connection((self.remote_addr, self.remote_port), self.timeout)
return sock
def disconnect(self):
'''Disconnect from fdfs server.'''
if self._sock is None:
return
try:
self._sock.close()
except socket.error as e:
raise ConnectionError(self._errormessage(e))
self._sock = None
def get_sock(self):
return self._sock
def _errormessage(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message" '''
if len(exception.args) == 1:
return "[-] Error: connect to %s:%s. %s." % (self.remote_addr, self.remote_port, exception.args[0])
else:
return "[-] Error: %s connect to %s:%s. %s." % \
(exception.args[0], self.remote_addr, self.remote_port, exception.args[1])
# end class Connection
# start ConnectionPool
class ConnectionPool(object):
'''Generic Connection Pool'''
def __init__(self, name='', conn_class=Connection,
max_conn=None, **conn_kwargs):
self.pool_name = name
self.pid = os.getpid()
self.conn_class = conn_class
self.max_conn = max_conn or 2 ** 31
self.conn_kwargs = conn_kwargs
self._conns_created = 0
self._conns_available = []
self._conns_inuse = set()
# print '[+] Create a connection pool success, name: %s.' % self.pool_name
def _check_pid(self):
if self.pid != os.getpid():
self.destroy()
self.__init__(self.conn_class, self.max_conn, **self.conn_kwargs)
def make_conn(self):
'''Create a new connection.'''
if self._conns_created >= self.max_conn:
raise ConnectionError('[-] Error: Too many connections.')
num_try = 10
while True:
try:
if num_try <= 0:
sys.exit()
conn_instance = self.conn_class(**self.conn_kwargs)
conn_instance.connect()
self._conns_created += 1
break
except ConnectionError as e:
print(e)
num_try -= 1
conn_instance = None
return conn_instance
def get_connection(self):
'''Get a connection from pool.'''
self._check_pid()
try:
conn = self._conns_available.pop()
# print '[+] Get a connection from pool %s.' % self.pool_name
# print '\tLocal address is %s:%s.' % conn._sock.getsockname()
# print '\tRemote address is %s:%s' % (conn.remote_addr, conn.remote_port)
except IndexError:
conn = self.make_conn()
self._conns_inuse.add(conn)
return conn
def remove(self, conn):
'''Remove connection from pool.'''
if conn in self._conns_inuse:
self._conns_inuse.remove(conn)
self._conns_created -= 1
if conn in self._conns_available:
self._conns_available.remove(conn)
self._conns_created -= 1
def destroy(self):
'''Disconnect all connections in the pool.'''
all_conns = chain(self._conns_inuse, self._conns_available)
for conn in all_conns:
conn.disconnect()
# print '[-] Destroy connection pool %s.' % self.pool_name
def release(self, conn):
'''Release the connection back to the pool.'''
self._check_pid()
if conn.pid == self.pid:
self._conns_inuse.remove(conn)
self._conns_available.append(conn)
# print '[-] Release connection back to pool %s.' % self.pool_name
# end ConnectionPool class
def tcp_recv_response(conn, bytes_size, buffer_size=4096):
'''Receive response from server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_size: int, will be received byte_stream size
@buffer_size: int, receive buffer size
@Return: tuple,(response, received_size)
'''
recv_buff = []
total_size = 0
try:
while bytes_size > 0:
resp = conn._sock.recv(buffer_size)
recv_buff.append(resp)
total_size += len(resp)
bytes_size -= len(resp)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while reading from socket: (%s)' % e.args)
return (b''.join(recv_buff), total_size)
def tcp_send_data(conn, bytes_stream):
'''Send buffer to server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_stream: trasmit buffer
@Return bool
'''
try:
conn._sock.sendall(bytes_stream)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while writting to socket: (%s)' % e.args)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: exceptions.py
'''Core exceptions raised by fdfs client'''
class FDFSError(Exception):
pass
class ConnectionError(FDFSError):
pass
class ResponseError(FDFSError):
pass
class InvaildResponse(FDFSError):
pass
class DataError(FDFSError):
pass
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: fdfs_protol.py
import struct
import socket
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# define FDFS protol constans
TRACKER_PROTO_CMD_STORAGE_JOIN = 81
FDFS_PROTO_CMD_QUIT = 82
TRACKER_PROTO_CMD_STORAGE_BEAT = 83 # storage heart beat
TRACKER_PROTO_CMD_STORAGE_REPORT_DISK_USAGE = 84 # report disk usage
TRACKER_PROTO_CMD_STORAGE_REPLICA_CHG = 85 # repl new storage servers
TRACKER_PROTO_CMD_STORAGE_SYNC_SRC_REQ = 86 # src storage require sync
TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_REQ = 87 # dest storage require sync
TRACKER_PROTO_CMD_STORAGE_SYNC_NOTIFY = 88 # sync done notify
TRACKER_PROTO_CMD_STORAGE_SYNC_REPORT = 89 # report src last synced time as dest server
TRACKER_PROTO_CMD_STORAGE_SYNC_DEST_QUERY = 79 # dest storage query sync src storage server
TRACKER_PROTO_CMD_STORAGE_REPORT_IP_CHANGED = 78 # storage server report it's ip changed
TRACKER_PROTO_CMD_STORAGE_CHANGELOG_REQ = 77 # storage server request storage server's changelog
TRACKER_PROTO_CMD_STORAGE_REPORT_STATUS = 76 # report specified storage server status
TRACKER_PROTO_CMD_STORAGE_PARAMETER_REQ = 75 # storage server request parameters
TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FREE = 74 # storage report trunk free space
TRACKER_PROTO_CMD_STORAGE_REPORT_TRUNK_FID = 73 # storage report current trunk file id
TRACKER_PROTO_CMD_STORAGE_FETCH_TRUNK_FID = 72 # storage get current trunk file id
TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_START = 61 # start of tracker get system data files
TRACKER_PROTO_CMD_TRACKER_GET_SYS_FILES_END = 62 # end of tracker get system data files
TRACKER_PROTO_CMD_TRACKER_GET_ONE_SYS_FILE = 63 # tracker get a system data file
TRACKER_PROTO_CMD_TRACKER_GET_STATUS = 64 # tracker get status of other tracker
TRACKER_PROTO_CMD_TRACKER_PING_LEADER = 65 # tracker ping leader
TRACKER_PROTO_CMD_TRACKER_NOTIFY_NEXT_LEADER = 66 # notify next leader to other trackers
TRACKER_PROTO_CMD_TRACKER_COMMIT_NEXT_LEADER = 67 # commit next leader to other trackers
TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP = 90
TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS = 91
TRACKER_PROTO_CMD_SERVER_LIST_STORAGE = 92
TRACKER_PROTO_CMD_SERVER_DELETE_STORAGE = 93
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE = 101
TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE = 102
TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE = 103
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE = 104
TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL = 105
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL = 106
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL = 107
TRACKER_PROTO_CMD_RESP = 100
FDFS_PROTO_CMD_ACTIVE_TEST = 111 # active test, tracker and storage both support since V1.28
STORAGE_PROTO_CMD_REPORT_CLIENT_IP = 9 # ip as tracker client
STORAGE_PROTO_CMD_UPLOAD_FILE = 11
STORAGE_PROTO_CMD_DELETE_FILE = 12
STORAGE_PROTO_CMD_SET_METADATA = 13
STORAGE_PROTO_CMD_DOWNLOAD_FILE = 14
STORAGE_PROTO_CMD_GET_METADATA = 15
STORAGE_PROTO_CMD_SYNC_CREATE_FILE = 16
STORAGE_PROTO_CMD_SYNC_DELETE_FILE = 17
STORAGE_PROTO_CMD_SYNC_UPDATE_FILE = 18
STORAGE_PROTO_CMD_SYNC_CREATE_LINK = 19
STORAGE_PROTO_CMD_CREATE_LINK = 20
STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE = 21
STORAGE_PROTO_CMD_QUERY_FILE_INFO = 22
STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE = 23 # create appender file
STORAGE_PROTO_CMD_APPEND_FILE = 24 # append file
STORAGE_PROTO_CMD_SYNC_APPEND_FILE = 25
STORAGE_PROTO_CMD_FETCH_ONE_PATH_BINLOG = 26 # fetch binlog of one store path
STORAGE_PROTO_CMD_RESP = TRACKER_PROTO_CMD_RESP
STORAGE_PROTO_CMD_UPLOAD_MASTER_FILE = STORAGE_PROTO_CMD_UPLOAD_FILE
STORAGE_PROTO_CMD_TRUNK_ALLOC_SPACE = 27 # since V3.00
STORAGE_PROTO_CMD_TRUNK_ALLOC_CONFIRM = 28 # since V3.00
STORAGE_PROTO_CMD_TRUNK_FREE_SPACE = 29 # since V3.00
STORAGE_PROTO_CMD_TRUNK_SYNC_BINLOG = 30 # since V3.00
STORAGE_PROTO_CMD_TRUNK_GET_BINLOG_SIZE = 31 # since V3.07
STORAGE_PROTO_CMD_TRUNK_DELETE_BINLOG_MARKS = 32 # since V3.07
STORAGE_PROTO_CMD_TRUNK_TRUNCATE_BINLOG_FILE = 33 # since V3.07
STORAGE_PROTO_CMD_MODIFY_FILE = 34 # since V3.08
STORAGE_PROTO_CMD_SYNC_MODIFY_FILE = 35 # since V3.08
STORAGE_PROTO_CMD_TRUNCATE_FILE = 36 # since V3.08
STORAGE_PROTO_CMD_SYNC_TRUNCATE_FILE = 37 # since V3.08
# for overwrite all old metadata
STORAGE_SET_METADATA_FLAG_OVERWRITE = 'O'
STORAGE_SET_METADATA_FLAG_OVERWRITE_STR = "O"
# for replace, insert when the meta item not exist, otherwise update it
STORAGE_SET_METADATA_FLAG_MERGE = 'M'
STORAGE_SET_METADATA_FLAG_MERGE_STR = "M"
FDFS_RECORD_SEPERATOR = '\x01'
FDFS_FIELD_SEPERATOR = '\x02'
# common constants
FDFS_GROUP_NAME_MAX_LEN = 16
IP_ADDRESS_SIZE = 16
FDFS_PROTO_PKG_LEN_SIZE = 8
FDFS_PROTO_CMD_SIZE = 1
FDFS_PROTO_STATUS_SIZE = 1
FDFS_PROTO_IP_PORT_SIZE = (IP_ADDRESS_SIZE + 6)
FDFS_MAX_SERVERS_EACH_GROUP = 32
FDFS_MAX_GROUPS = 512
FDFS_MAX_TRACKERS = 16
FDFS_DOMAIN_NAME_MAX_LEN = 128
FDFS_MAX_META_NAME_LEN = 64
FDFS_MAX_META_VALUE_LEN = 256
FDFS_FILE_PREFIX_MAX_LEN = 16
FDFS_LOGIC_FILE_PATH_LEN = 10
FDFS_TRUE_FILE_PATH_LEN = 6
FDFS_FILENAME_BASE64_LENGTH = 27
FDFS_TRUNK_FILE_INFO_LEN = 16
FDFS_FILE_EXT_NAME_MAX_LEN = 6
FDFS_SPACE_SIZE_BASE_INDEX = 2 # storage space size based (MB)
FDFS_UPLOAD_BY_BUFFER = 1
FDFS_UPLOAD_BY_FILENAME = 2
FDFS_UPLOAD_BY_FILE = 3
FDFS_DOWNLOAD_TO_BUFFER = 1
FDFS_DOWNLOAD_TO_FILE = 2
FDFS_NORMAL_LOGIC_FILENAME_LENGTH = (
FDFS_LOGIC_FILE_PATH_LEN + FDFS_FILENAME_BASE64_LENGTH + FDFS_FILE_EXT_NAME_MAX_LEN + 1)
FDFS_TRUNK_FILENAME_LENGTH = (
FDFS_TRUE_FILE_PATH_LEN + FDFS_FILENAME_BASE64_LENGTH + FDFS_TRUNK_FILE_INFO_LEN + 1 + FDFS_FILE_EXT_NAME_MAX_LEN)
FDFS_TRUNK_LOGIC_FILENAME_LENGTH = (FDFS_TRUNK_FILENAME_LENGTH + (FDFS_LOGIC_FILE_PATH_LEN - FDFS_TRUE_FILE_PATH_LEN))
FDFS_VERSION_SIZE = 6
TRACKER_QUERY_STORAGE_FETCH_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE)
TRACKER_QUERY_STORAGE_STORE_BODY_LEN = (FDFS_GROUP_NAME_MAX_LEN + IP_ADDRESS_SIZE - 1 + FDFS_PROTO_PKG_LEN_SIZE + 1)
# status code, order is important!
FDFS_STORAGE_STATUS_INIT = 0
FDFS_STORAGE_STATUS_WAIT_SYNC = 1
FDFS_STORAGE_STATUS_SYNCING = 2
FDFS_STORAGE_STATUS_IP_CHANGED = 3
FDFS_STORAGE_STATUS_DELETED = 4
FDFS_STORAGE_STATUS_OFFLINE = 5
FDFS_STORAGE_STATUS_ONLINE = 6
FDFS_STORAGE_STATUS_ACTIVE = 7
FDFS_STORAGE_STATUS_RECOVERY = 9
FDFS_STORAGE_STATUS_NONE = 99
class Storage_server(object):
'''Class storage server for upload.'''
def __init__(self):
self.ip_addr = None
self.port = None
self.group_name = ''
self.store_path_index = 0
# Class tracker_header
class Tracker_header(object):
'''
Class for Pack or Unpack tracker header
struct tracker_header{
char pkg_len[FDFS_PROTO_PKG_LEN_SIZE],
char cmd,
char status,
}
'''
def __init__(self):
self.fmt = '!QBB' # pkg_len[FDFS_PROTO_PKG_LEN_SIZE] + cmd + status
self.st = struct.Struct(self.fmt)
self.pkg_len = 0
self.cmd = 0
self.status = 0
def _pack(self, pkg_len=0, cmd=0, status=0):
return self.st.pack(pkg_len, cmd, status)
def _unpack(self, bytes_stream):
self.pkg_len, self.cmd, self.status = self.st.unpack(bytes_stream)
return True
def header_len(self):
return self.st.size
def send_header(self, conn):
'''Send Tracker header to server.'''
header = self._pack(self.pkg_len, self.cmd, self.status)
try:
conn._sock.sendall(header)
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while writting to socket: %s' % (e.args,))
def recv_header(self, conn):
'''Receive response from server.
if sucess, class member (pkg_len, cmd, status) is response.
'''
try:
header = conn._sock.recv(self.header_len())
except (socket.error, socket.timeout) as e:
raise ConnectionError('[-] Error: while reading from socket: %s' % (e.args,))
self._unpack(header)
def fdfs_pack_metadata(meta_dict):
ret = ''
for key in meta_dict:
ret += '%s%c%s%c' % (key, FDFS_FIELD_SEPERATOR, meta_dict[key], FDFS_RECORD_SEPERATOR)
return ret[0:-1]
def fdfs_unpack_metadata(bytes_stream):
li = bytes_stream.split(FDFS_RECORD_SEPERATOR)
return dict([item.split(FDFS_FIELD_SEPERATOR) for item in li])
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: fdfs_test.py
import os
import sys
import time
try:
from fdfs_client.client import *
from fdfs_client.exceptions import *
except ImportError:
import_path = os.path.abspath('../')
sys.path.append(import_path)
from fdfs_client.client import *
from fdfs_client.exceptions import *
def usage():
s = 'Usage: python fdfs_test.py {options} [{local_filename} [{remote_file_id}]]\n'
s += 'options: upfile, upbuffer, downfile, downbuffer, delete, listgroup, listserv\n'
s += ' upslavefile, upslavebuffer, upappendfile, upappendbuffer\n'
s += '\tupfile {local_filename}\n'
s += '\tupbuffer {local_filename}\n'
s += '\tdownfile {local_filename} {remote_file_id}\n'
s += '\tdownbuffer {remote_file_id}\n'
s += '\tdelete {remote_file_id}\n'
s += '\tlistgroup {group_name}\n'
s += '\tlistall \n'
s += '\tlistsrv {group_name} [storage_ip]\n'
s += '\tsetmeta {remote_file_id}\n'
s += '\tgetmeta {remote_file_id}\n'
s += '\tupslavefile {local_filename} {remote_fileid} {prefix_name}\n'
s += '\tupappendfile {local_filename}\n'
s += '\ttruncate {truncate_filesize} {remote_fileid}\n'
s += '\tmodifyfile {local_filename} {remote_fileid} {file_offset}\n'
s += '\tmodifybuffer {local_filename} {remote_fileid} {file_offset}\n'
s += 'e.g.: python fdfs_test.py upfile test'
print(s)
sys.exit(0)
if len(sys.argv) < 2:
usage()
client = Fdfs_client('client.conf')
def upfile_func():
# Upload by filename
# usage: python fdfs_test.py upfile {local_filename}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
file_size = os.stat(local_filename).st_size
# meta_buffer can be null.
meta_dict = {
'ext_name': 'py',
'file_size': str(file_size) + 'B'
}
t1 = time.time()
ret_dict = client.upload_by_filename(local_filename, meta_dict)
t2 = time.time()
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
print('[+] time consume: %fs' % (t2 - t1))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upfileex_func():
# Upload by file
# usage: python fdfs_test.py upfileex {local_filename}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
t1 = time.time()
ret_dict = client.upload_by_file(local_filename)
t2 = time.time()
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
print('[+] time consume: %fs' % (t2 - t1))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upslavefile_func():
# upload slave file
# usage: python fdfs_test.py upslavefile {local_filename} {remote_fileid} {prefix_name}
if len(sys.argv) < 5:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
prefix_name = sys.argv[4]
ret_dict = client.upload_slave_by_file(local_filename, remote_fileid, \
prefix_name)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upslavebuffer_func():
# upload slave by buffer
# usage: python fdfs_test.py upslavebuffer {local_filename} {remote_fileid} {prefix_name}
if len(sys.argv) < 5:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
prefix_name = sys.argv[4]
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.upload_slave_by_buffer(local_filename, \
remote_fileid, prefix_name)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def del_func():
# delete file
# usage: python fdfs_test.py delete {remote_fileid}
if len(sys.argv) < 3:
usage()
return None
try:
remote_file_id = sys.argv[2]
ret_tuple = client.delete_file(remote_file_id)
print('[+] %s' % ret_tuple[0])
print('[+] remote_fileid: %s' % ret_tuple[1])
print('[+] Storage IP: %s' % ret_tuple[2])
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def downfile_func():
# Download to file
# usage: python fdfs_test.py downfile {local_filename} {remote_fileid}
if len(sys.argv) < 3:
usage()
return None
try:
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
ret_dict = client.download_to_file(local_filename, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def list_group_func():
# List one group info
# usage: python fdfs_test.py listgroup {group_name}
if len(sys.argv) < 3:
usage()
return None
try:
group_name = sys.argv[2]
ret = client.list_one_group(group_name)
print(ret)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def listall_func():
# List all group info
# usage: python fdfs_test.py listall
if len(sys.argv) < 2:
usage()
return None
try:
ret_dict = client.list_all_groups()
print('=' * 80)
print('Groups count:', ret_dict['Groups count'])
for li in ret_dict['Groups']:
print('-' * 80)
print(li)
print('-' * 80)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def list_server_func():
# List all servers info of group
# usage: python fdfs_test.py listsrv {group_name} [storage_ip]
if len(sys.argv) < 3:
usage()
return None
try:
group_name = sys.argv[2]
if len(sys.argv) > 3:
storage_ip = sys.argv[3]
else:
storage_ip = None
ret_dict = client.list_servers(group_name, storage_ip)
print('=' * 80)
print('Group name: %s' % ret_dict['Group name'])
print('=' * 80)
i = 1
for serv in ret_dict['Servers']:
print('Storage server %d:' % i)
print('=' * 80)
print(serv)
i += 1
print('=' * 80)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upbuffer_func():
# Upload by buffer
# usage: python fdfs_test.py upbuffer {local_filename} [remote_file_ext_name]
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
if len(sys.argv) > 3:
ext_name = sys.argv[3]
else:
ext_name = None
# meta_buffer can be null.
meta_buffer = {
'ext_name': 'gif',
'width': '150px',
'height': '80px'
}
try:
with open(local_filename, 'rb') as f:
file_buffer = f.read()
ret_dict = client.upload_by_buffer(file_buffer, ext_name, meta_buffer)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def downbuffer_func():
# Download to buffer
# usage: python fdfs_test.py downbuffer {remote_file_id}
# e.g.: 'group1/M00/00/00/wKjzhU_rLNmjo2-1AAAamGDONEA5818.py'
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
try:
ret_dict = client.download_to_buffer(remote_fileid)
print('Downloaded content:')
print(ret_dict['Content'])
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def get_meta_data_func():
# Get meta data of remote file
# usage python fdfs_test.py getmeta {remote_file_id}
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
try:
ret_dict = client.get_meta_data(remote_fileid)
print(ret_dict)
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def set_meta_data_func():
# Set meta data of remote file
# usage python fdfs_test.py setmeta {remote_file_id}
if len(sys.argv) < 3:
usage()
return None
remote_fileid = sys.argv[2]
meta_dict = {
'ext_name': 'jgp',
'width': '160px',
'hight': '80px',
}
try:
ret_dict = client.set_meta_data(remote_fileid, meta_dict)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upappendfile_func():
# Upload an appender file by filename
# usage: python fdfs_test.py upappendfile {local_filename}
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
try:
ret_dict = client.upload_appender_by_file(local_filename)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def upappendbuffer_func():
# Upload an appender file by buffer
# usage: python fdfs_test.py upappendbuffer {local_filename}
if len(sys.argv) < 3:
usage()
return None
local_filename = sys.argv[2]
try:
with open(local_filename, 'rb') as f:
file_buffer = f.read()
ret_dict = client.upload_appender_by_buffer(file_buffer)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def appendfile_func():
# Append a remote file
# usage: python fdfs_test.py appendfile {local_filename} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
try:
ret_dict = client.append_by_file(local_filename, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def appendbuffer_func():
# Append a remote file by buffer
# usage: python fdfs_test.py appendbuffer {local_filename} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
try:
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.append_by_buffer(filebuffer, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def truncate_func():
# Truncate file
# usage: python fdfs_test.py truncate {truncate_filesize} {remote_file_id}
if len(sys.argv) < 4:
usage()
return None
truncate_filesize = int(sys.argv[2])
remote_fileid = sys.argv[3]
try:
ret_dict = client.truncate_file(truncate_filesize, remote_fileid)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def modifyfile_func():
# Modify file by filename
# usage: python fdfs_test.py modifyfile {local_filename} {remote_fileid} [file_offset]
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
if len(sys.argv) > 4:
file_offset = int(sys.argv[4])
else:
file_offset = 0
try:
ret_dict = client.modify_by_filename(local_filename, remote_fileid, file_offset)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
def modifybuffer_func():
# Modify file by buffer
# usage: python fdfs_test.py modifybuffer {local_filename} {remote_fileid} [file_offset]
if len(sys.argv) < 4:
usage()
return None
local_filename = sys.argv[2]
remote_fileid = sys.argv[3]
if len(sys.argv) > 4:
file_offset = int(sys.argv[4])
else:
file_offset = 0
try:
with open(local_filename, 'rb') as f:
filebuffer = f.read()
ret_dict = client.modify_by_buffer(filebuffer, remote_fileid, file_offset)
for key in ret_dict:
print('[+] %s : %s' % (key, ret_dict[key]))
except (ConnectionError, ResponseError, DataError) as e:
print(e)
result = {
'upfile': lambda: upfile_func(),
'upfileex': lambda: upfileex_func(),
'upbuffer': lambda: upbuffer_func(),
'delete': lambda: del_func(),
'downfile': lambda: downfile_func(),
'downbuffer': lambda: downbuffer_func(),
'listgroup': lambda: list_group_func(),
'listall': lambda: listall_func(),
'listsrv': lambda: list_server_func(),
'getmeta': lambda: get_meta_data_func(),
'setmeta': lambda: set_meta_data_func(),
'upslavefile': lambda: upslavefile_func(),
'upappendfile': lambda: upappendfile_func(),
'upappendbuffer': lambda: upappendbuffer_func(),
'appendfile': lambda: appendfile_func(),
'appendbuffer': lambda: appendbuffer_func(),
'truncate': lambda: truncate_func(),
'modifyfile': lambda: modifyfile_func(),
'modifybuffer': lambda: modifybuffer_func(),
'-h': lambda: usage(),
}[sys.argv[1].lower()]()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: storage_client.py
import os
import stat
import errno
import struct
import socket
import datetime
import platform
from fdfs_client.fdfs_protol import *
from fdfs_client.connection import *
# from test_fdfs.sendfile import *
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
from fdfs_client.utils import *
__os_sep__ = "/" if platform.system() == 'Windows' else os.sep
def tcp_send_file(conn, filename, buffer_size=1024):
'''
Send file to server, and split into multiple pkgs while sending.
arguments:
@conn: connection
@filename: string
@buffer_size: int ,send buffer size
@Return int: file size if success else raise ConnectionError.
'''
file_size = 0
with open(filename, 'rb') as f:
while 1:
try:
send_buffer = f.read(buffer_size)
send_size = len(send_buffer)
if send_size == 0:
break
tcp_send_data(conn, send_buffer)
file_size += send_size
except ConnectionError as e:
raise ConnectionError('[-] Error while uploading file(%s).' % e.args)
except IOError as e:
raise DataError('[-] Error while reading local file(%s).' % e.args)
return file_size
def tcp_send_file_ex(conn, filename, buffer_size=4096):
'''
Send file to server. Using linux system call 'sendfile'.
arguments:
@conn: connection
@filename: string
@return long, sended size
'''
if 'linux' not in sys.platform.lower():
raise DataError('[-] Error: \'sendfile\' system call only available on linux.')
nbytes = 0
offset = 0
sock_fd = conn.get_sock().fileno()
with open(filename, 'rb') as f:
in_fd = f.fileno()
while 1:
try:
pass
# sent = sendfile(sock_fd, in_fd, offset, buffer_size)
# if 0 == sent:
# break
# nbytes += sent
# offset += sent
except OSError as e:
if e.errno == errno.EAGAIN:
continue
raise
return nbytes
def tcp_recv_file(conn, local_filename, file_size, buffer_size=1024):
'''
Receive file from server, fragmented it while receiving and write to disk.
arguments:
@conn: connection
@local_filename: string
@file_size: int, remote file size
@buffer_size: int, receive buffer size
@Return int: file size if success else raise ConnectionError.
'''
total_file_size = 0
flush_size = 0
remain_bytes = file_size
with open(local_filename, 'wb+') as f:
while remain_bytes > 0:
try:
if remain_bytes >= buffer_size:
file_buffer, recv_size = tcp_recv_response(conn, buffer_size, buffer_size)
else:
file_buffer, recv_size = tcp_recv_response(conn, remain_bytes, buffer_size)
f.write(file_buffer)
remain_bytes -= buffer_size
total_file_size += recv_size
flush_size += recv_size
if flush_size >= 4096:
f.flush()
flush_size = 0
except ConnectionError as e:
raise ConnectionError('[-] Error: while downloading file(%s).' % e.args)
except IOError as e:
raise DataError('[-] Error: while writting local file(%s).' % e.args)
return total_file_size
class Storage_client(object):
'''
The Class Storage_client for storage server.
Note: argument host_tuple of storage server ip address, that should be a single element.
'''
def __init__(self, *kwargs):
conn_kwargs = {
'name': 'Storage Pool',
'host_tuple': (kwargs[0],),
'port': kwargs[1],
'timeout': kwargs[2]
}
self.pool = ConnectionPool(**conn_kwargs)
return None
def __del__(self):
try:
self.pool.destroy()
self.pool = None
except:
pass
def update_pool(self, old_store_serv, new_store_serv, timeout=30):
'''
Update connection pool of storage client.
We need update connection pool of storage client, while storage server is changed.
but if server not changed, we do nothing.
'''
if old_store_serv.ip_addr == new_store_serv.ip_addr:
return None
self.pool.destroy()
conn_kwargs = {
'name': 'Storage_pool',
'host_tuple': (new_store_serv.ip_addr,),
'port': new_store_serv.port,
'timeout': timeout
}
self.pool = ConnectionPool(**conn_kwargs)
return True
def _storage_do_upload_file(self, tracker_client, store_serv, file_buffer, file_size=None, upload_type=None,
meta_dict=None, cmd=None, master_filename=None, prefix_name=None, file_ext_name=None):
'''
core of upload file.
arguments:
@tracker_client: Tracker_client, it is useful connect to tracker server
@store_serv: Storage_server, it is return from query tracker server
@file_buffer: string, file name or file buffer for send
@file_size: int
@upload_type: int, optional: FDFS_UPLOAD_BY_FILE, FDFS_UPLOAD_BY_FILENAME,
FDFS_UPLOAD_BY_BUFFER
@meta_dic: dictionary, store metadata in it
@cmd: int, reference fdfs protol
@master_filename: string, useful upload slave file
@prefix_name: string
@file_ext_name: string
@Return dictionary
{
'Group name' : group_name,
'Remote file_id' : remote_file_id,
'Status' : status,
'Local file name' : local_filename,
'Uploaded size' : upload_size,
'Storage IP' : storage_ip
}
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
master_filename_len = len(master_filename) if master_filename else 0
prefix_name_len = len(prefix_name) if prefix_name else 0
upload_slave = len(store_serv.group_name) and master_filename_len
file_ext_name = str(file_ext_name) if file_ext_name else ''
# non_slave_fmt |-store_path_index(1)-file_size(8)-file_ext_name(6)-|
non_slave_fmt = '!B Q %ds' % FDFS_FILE_EXT_NAME_MAX_LEN
# slave_fmt |-master_len(8)-file_size(8)-prefix_name(16)-file_ext_name(6)
# -master_name(master_filename_len)-|
slave_fmt = '!Q Q %ds %ds %ds' % (FDFS_FILE_PREFIX_MAX_LEN, FDFS_FILE_EXT_NAME_MAX_LEN, master_filename_len)
th.pkg_len = struct.calcsize(slave_fmt) if upload_slave else struct.calcsize(non_slave_fmt)
th.pkg_len += file_size
th.cmd = cmd
th.send_header(store_conn)
if upload_slave:
send_buffer = struct.pack(
slave_fmt, master_filename_len, file_size, prefix_name, file_ext_name, master_filename)
else:
send_buffer = struct.pack(non_slave_fmt, store_serv.store_path_index, file_size, file_ext_name.encode())
try:
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
send_file_size = tcp_send_file(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
send_file_size = tcp_send_file_ex(store_conn, file_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
if recv_size <= FDFS_GROUP_NAME_MAX_LEN:
errmsg = '[-] Error: Storage response length is not match, '
errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
# recv_fmt: |-group_name(16)-remote_file_name(recv_size - 16)-|
recv_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, th.pkg_len - FDFS_GROUP_NAME_MAX_LEN)
(group_name, remote_name) = struct.unpack(recv_fmt, recv_buffer)
remote_filename = remote_name.strip(b'\x00')
if meta_dict and len(meta_dict) > 0:
status = self.storage_set_metadata(tracker_client, store_serv, remote_filename, meta_dict)
if status != 0:
# rollback
self.storage_delete_file(tracker_client, store_serv, remote_filename)
raise DataError('[-] Error: %d, %s' % (status, os.strerror(status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dic = {
'Group name': group_name.strip(b'\x00'),
'Remote file_id': group_name.strip(b'\x00') + __os_sep__.encode() + remote_filename,
'Status': 'Upload successed.',
'Local file name': file_buffer if (upload_type == FDFS_UPLOAD_BY_FILENAME
or upload_type == FDFS_UPLOAD_BY_FILE
) else '',
'Uploaded size': appromix(send_file_size) if (upload_type == FDFS_UPLOAD_BY_FILENAME
or upload_type == FDFS_UPLOAD_BY_FILE
) else appromix(len(file_buffer)),
'Storage IP': store_serv.ip_addr
}
return ret_dic
def storage_upload_by_filename(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_by_file(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_by_buffer(self, tracker_client, store_serv, file_buffer, file_ext_name=None, meta_dict=None):
buffer_size = len(file_buffer)
return self._storage_do_upload_file(tracker_client, store_serv, file_buffer, buffer_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_FILE, None, None, file_ext_name)
def storage_upload_slave_by_filename(self, tracker_client, store_serv, filename, prefix_name, remote_filename,
meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, remote_filename,
prefix_name, file_ext_name)
def storage_upload_slave_by_file(self, tracker_client, store_serv, filename, prefix_name, remote_filename,
meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, remote_filename,
prefix_name, file_ext_name)
def storage_upload_slave_by_buffer(self, tracker_client, store_serv, filebuffer, remote_filename, meta_dict,
file_ext_name):
file_size = len(filebuffer)
return self._storage_do_upload_file(tracker_client, store_serv, filebuffer, file_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE, None, remote_filename,
file_ext_name)
def storage_upload_appender_by_filename(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILENAME,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_upload_appender_by_file(self, tracker_client, store_serv, filename, meta_dict=None):
file_size = os.stat(filename).st_size
file_ext_name = get_file_ext_name(filename)
return self._storage_do_upload_file(tracker_client, store_serv, filename, file_size, FDFS_UPLOAD_BY_FILE,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_upload_appender_by_buffer(self, tracker_client, store_serv, file_buffer, meta_dict=None,
file_ext_name=None):
file_size = len(file_buffer)
return self._storage_do_upload_file(tracker_client, store_serv, file_buffer, file_size, FDFS_UPLOAD_BY_BUFFER,
meta_dict, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, None, None,
file_ext_name)
def storage_delete_file(self, tracker_client, store_serv, remote_filename):
'''
Delete file from storage server.
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_DELETE_FILE
file_name_len = len(remote_filename)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
try:
th.send_header(store_conn)
# del_fmt: |-group_name(16)-filename(len)-|
del_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
send_buffer = struct.pack(del_fmt, store_serv.group_name, remote_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: remote file %s is not exist.'
# % (store_serv.group_name + __os_sep__.encode() + remote_filename))
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
# recv_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
remote_filename = store_serv.group_name + __os_sep__.encode() + remote_filename
return ('Delete file successed.', remote_filename, store_serv.ip_addr)
def _storage_do_download_file(self, tracker_client, store_serv, file_buffer, offset, download_size,
download_type, remote_filename):
'''
Core of download file from storage server.
You can choice download type, optional FDFS_DOWNLOAD_TO_FILE or
FDFS_DOWNLOAD_TO_BUFFER. And you can choice file offset.
@Return dictionary
'Remote file name' : remote_filename,
'Content' : local_filename or buffer,
'Download size' : download_size,
'Storage IP' : storage_ip
'''
store_conn = self.pool.get_connection()
th = Tracker_header()
remote_filename_len = len(remote_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + FDFS_GROUP_NAME_MAX_LEN + remote_filename_len
th.cmd = STORAGE_PROTO_CMD_DOWNLOAD_FILE
try:
th.send_header(store_conn)
# down_fmt: |-offset(8)-download_bytes(8)-group_name(16)-remote_filename(len)-|
down_fmt = '!Q Q %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len)
send_buffer = struct.pack(down_fmt, offset, download_size, store_serv.group_name, remote_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: remote file %s is not exist.' %
# (store_serv.group_name + __os_sep__.encode() + remote_filename))
if th.status != 0:
raise DataError('Error: %d %s' % (th.status, os.strerror(th.status)))
if download_type == FDFS_DOWNLOAD_TO_FILE:
total_recv_size = tcp_recv_file(store_conn, file_buffer, th.pkg_len)
elif download_type == FDFS_DOWNLOAD_TO_BUFFER:
recv_buffer, total_recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
ret_dic = {
'Remote file_id': store_serv.group_name + __os_sep__.encode() + remote_filename,
'Content': file_buffer if download_type == FDFS_DOWNLOAD_TO_FILE else recv_buffer,
'Download size': appromix(total_recv_size),
'Storage IP': store_serv.ip_addr
}
return ret_dic
def storage_download_to_file(self, tracker_client, store_serv, local_filename, file_offset, download_bytes,
remote_filename):
return self._storage_do_download_file(tracker_client, store_serv, local_filename, file_offset, download_bytes,
FDFS_DOWNLOAD_TO_FILE, remote_filename)
def storage_download_to_buffer(self, tracker_client, store_serv, file_buffer, file_offset, download_bytes,
remote_filename):
return self._storage_do_download_file(tracker_client, store_serv, file_buffer, file_offset, download_bytes,
FDFS_DOWNLOAD_TO_BUFFER, remote_filename)
def storage_set_metadata(self, tracker_client, store_serv, remote_filename, meta_dict,
op_flag=STORAGE_SET_METADATA_FLAG_OVERWRITE):
ret = 0
conn = self.pool.get_connection()
remote_filename_len = len(remote_filename)
meta_buffer = fdfs_pack_metadata(meta_dict)
meta_len = len(meta_buffer)
th = Tracker_header()
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + 1 + FDFS_GROUP_NAME_MAX_LEN + remote_filename_len + meta_len
th.cmd = STORAGE_PROTO_CMD_SET_METADATA
try:
th.send_header(conn)
# meta_fmt: |-filename_len(8)-meta_len(8)-op_flag(1)-group_name(16)
# -filename(remote_filename_len)-meta(meta_len)|
meta_fmt = '!Q Q c %ds %ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len, meta_len)
send_buffer = struct.pack(meta_fmt, remote_filename_len, meta_len, op_flag, store_serv.group_name,
remote_filename, meta_buffer)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
ret = th.status
except:
raise
finally:
self.pool.release(conn)
return ret
def storage_get_metadata(self, tracker_client, store_serv, remote_file_name):
store_conn = self.pool.get_connection()
th = Tracker_header()
remote_filename_len = len(remote_file_name)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + remote_filename_len
th.cmd = STORAGE_PROTO_CMD_GET_METADATA
try:
th.send_header(store_conn)
# meta_fmt: |-group_name(16)-filename(remote_filename_len)-|
meta_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, remote_filename_len)
send_buffer = struct.pack(meta_fmt, store_serv.group_name, remote_file_name.encode())
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
# if th.status == 2:
# raise DataError('[-] Error: Remote file %s has no meta data.'
# % (store_serv.group_name + __os_sep__.encode() + remote_file_name))
if th.status != 0:
raise DataError('[-] Error:%d, %s' % (th.status, os.strerror(th.status)))
if th.pkg_len == 0:
ret_dict = {}
meta_buffer, recv_size = tcp_recv_response(store_conn, th.pkg_len)
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = fdfs_unpack_metadata(meta_buffer)
return ret_dict
def _storage_do_append_file(self, tracker_client, store_serv, file_buffer, file_size, upload_type,
appended_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
appended_filename_len = len(appended_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appended_filename_len + file_size
th.cmd = STORAGE_PROTO_CMD_APPEND_FILE
try:
th.send_header(store_conn)
# append_fmt: |-appended_filename_len(8)-file_size(8)-appended_filename(len)
# -filecontent(filesize)-|
append_fmt = '!Q Q %ds' % appended_filename_len
send_buffer = struct.pack(append_fmt, appended_filename_len, file_size, appended_filename)
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
tcp_send_file(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, file_buffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
tcp_send_file_ex(store_conn, file_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Append file successed.'
ret_dict['Appender file name'] = store_serv.group_name + __os_sep__.encode() + appended_filename
ret_dict['Appended size'] = appromix(file_size)
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_append_by_filename(self, tracker_client, store_serv, local_filename, appended_filename):
file_size = os.stat(local_filename).st_size
return self._storage_do_append_file(tracker_client, store_serv, local_filename, file_size,
FDFS_UPLOAD_BY_FILENAME, appended_filename)
def storage_append_by_file(self, tracker_client, store_serv, local_filename, appended_filename):
file_size = os.stat(local_filename).st_size
return self._storage_do_append_file(tracker_client, store_serv, local_filename, file_size, FDFS_UPLOAD_BY_FILE,
appended_filename)
def storage_append_by_buffer(self, tracker_client, store_serv, file_buffer, appended_filename):
file_size = len(file_buffer)
return self._storage_do_append_file(tracker_client, store_serv, file_buffer, file_size, FDFS_UPLOAD_BY_BUFFER,
appended_filename)
def _storage_do_truncate_file(self, tracker_client, store_serv, truncated_filesize, appender_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_TRUNCATE_FILE
appender_filename_len = len(appender_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 2 + appender_filename_len
try:
th.send_header(store_conn)
# truncate_fmt:|-appender_filename_len(8)-truncate_filesize(8)
# -appender_filename(len)-|
truncate_fmt = '!Q Q %ds' % appender_filename_len
send_buffer = struct.pack(truncate_fmt, appender_filename_len, truncated_filesize, appender_filename)
tcp_send_data(store_conn, send_buffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Truncate successed.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_truncate_file(self, tracker_client, store_serv, truncated_filesize, appender_filename):
return self._storage_do_truncate_file(tracker_client, store_serv, truncated_filesize, appender_filename)
def _storage_do_modify_file(self, tracker_client, store_serv, upload_type, filebuffer, offset, filesize,
appender_filename):
store_conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = STORAGE_PROTO_CMD_MODIFY_FILE
appender_filename_len = len(appender_filename)
th.pkg_len = FDFS_PROTO_PKG_LEN_SIZE * 3 + appender_filename_len + filesize
try:
th.send_header(store_conn)
# modify_fmt: |-filename_len(8)-offset(8)-filesize(8)-filename(len)-|
modify_fmt = '!Q Q Q %ds' % appender_filename_len
send_buffer = struct.pack(modify_fmt, appender_filename_len, offset, filesize, appender_filename)
tcp_send_data(store_conn, send_buffer)
if upload_type == FDFS_UPLOAD_BY_FILENAME:
upload_size = tcp_send_file(store_conn, filebuffer)
elif upload_type == FDFS_UPLOAD_BY_BUFFER:
tcp_send_data(store_conn, filebuffer)
elif upload_type == FDFS_UPLOAD_BY_FILE:
upload_size = tcp_send_file_ex(store_conn, filebuffer)
th.recv_header(store_conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
except:
raise
finally:
self.pool.release(store_conn)
ret_dict = {}
ret_dict['Status'] = 'Modify successed.'
ret_dict['Storage IP'] = store_serv.ip_addr
return ret_dict
def storage_modify_by_filename(self, tracker_client, store_serv, filename, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_FILENAME, filename, offset,
filesize, appender_filename)
def storage_modify_by_file(self, tracker_client, store_serv, filename, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_FILE, filename, offset, filesize,
appender_filename)
def storage_modify_by_buffer(self, tracker_client, store_serv, filebuffer, offset, filesize, appender_filename):
return self._storage_do_modify_file(tracker_client, store_serv, FDFS_UPLOAD_BY_BUFFER, filebuffer, offset,
filesize, appender_filename)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: tracker_client.py
import struct
import socket
from datetime import datetime
from fdfs_client.fdfs_protol import *
from fdfs_client.connection import *
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
from fdfs_client.utils import *
def parse_storage_status(status_code):
try:
ret = {
FDFS_STORAGE_STATUS_INIT: lambda: 'INIT',
FDFS_STORAGE_STATUS_WAIT_SYNC: lambda: 'WAIT_SYNC',
FDFS_STORAGE_STATUS_SYNCING: lambda: 'SYNCING',
FDFS_STORAGE_STATUS_IP_CHANGED: lambda: 'IP_CHANGED',
FDFS_STORAGE_STATUS_DELETED: lambda: 'DELETED',
FDFS_STORAGE_STATUS_OFFLINE: lambda: 'OFFLINE',
FDFS_STORAGE_STATUS_ONLINE: lambda: 'ONLINE',
FDFS_STORAGE_STATUS_ACTIVE: lambda: 'ACTIVE',
FDFS_STORAGE_STATUS_RECOVERY: lambda: 'RECOVERY'
}[status_code]()
except KeyError:
ret = 'UNKNOW'
return ret
class Storage_info(object):
def __init__(self):
self.status = 0
self.id = ''
self.ip_addr = ''
self.domain_name = ''
self.src_id = ''
self.version = ''
self.join_time = datetime.fromtimestamp(0).isoformat()
self.up_time = datetime.fromtimestamp(0).isoformat()
self.totalMB = ''
self.freeMB = ''
self.upload_prio = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_write_path = 0
self.storage_port = 23000
self.storage_http_port = 80
self.alloc_count = 0
self.current_count = 0
self.max_count = 0
self.total_upload_count = 0
self.success_upload_count = 0
self.total_append_count = 0
self.success_append_count = 0
self.total_modify_count = 0
self.success_modify_count = 0
self.total_truncate_count = 0
self.success_truncate_count = 0
self.total_setmeta_count = 0
self.success_setmeta_count = 0
self.total_del_count = 0
self.success_del_count = 0
self.total_download_count = 0
self.success_download_count = 0
self.total_getmeta_count = 0
self.success_getmeta_count = 0
self.total_create_link_count = 0
self.success_create_link_count = 0
self.total_del_link_count = 0
self.success_del_link_count = 0
self.total_upload_bytes = 0
self.success_upload_bytes = 0
self.total_append_bytes = 0
self.success_append_bytes = 0
self.total_modify_bytes = 0
self.success_modify_bytes = 0
self.total_download_bytes = 0
self.success_download_bytes = 0
self.total_sync_in_bytes = 0
self.success_sync_in_bytes = 0
self.total_sync_out_bytes = 0
self.success_sync_out_bytes = 0
self.total_file_open_count = 0
self.success_file_open_count = 0
self.total_file_read_count = 0
self.success_file_read_count = 0
self.total_file_write_count = 0
self.success_file_write_count = 0
self.last_source_sync = datetime.fromtimestamp(0).isoformat()
self.last_sync_update = datetime.fromtimestamp(0).isoformat()
self.last_synced_time = datetime.fromtimestamp(0).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(0).isoformat()
self.if_trunk_server = ''
# fmt = |-status(1)-ipaddr(16)-domain(128)-srcipaddr(16)-ver(6)-52*8-|
self.fmt = '!B 16s 16s 128s 16s 6s 10Q 4s4s4s 42Q?'
def set_info(self, bytes_stream):
(self.status, self.id, ip_addr, domain_name, self.src_id, version, join_time, up_time, totalMB, freeMB,
self.upload_prio, self.store_path_count, self.subdir_count_per_path, self.curr_write_path, self.storage_port,
self.storage_http_port, self.alloc_count, self.current_count, self.max_count, self.total_upload_count,
self.success_upload_count, self.total_append_count, self.success_append_count, self.total_modify_count,
self.success_modify_count, self.total_truncate_count, self.success_truncate_count, self.total_setmeta_count,
self.success_setmeta_count, self.total_del_count, self.success_del_count, self.total_download_count,
self.success_download_count, self.total_getmeta_count, self.success_getmeta_count,
self.total_create_link_count, self.success_create_link_count, self.total_del_link_count,
self.success_del_link_count, self.total_upload_bytes, self.success_upload_bytes, self.total_append_bytes,
self.total_append_bytes, self.total_modify_bytes, self.success_modify_bytes, self.total_download_bytes,
self.success_download_bytes, self.total_sync_in_bytes, self.success_sync_in_bytes, self.total_sync_out_bytes,
self.success_sync_out_bytes, self.total_file_open_count, self.success_file_open_count,
self.total_file_read_count, self.success_file_read_count, self.total_file_write_count,
self.success_file_write_count, last_source_sync, last_sync_update, last_synced_time, last_heartbeat_time,
self.if_trunk_server,) = struct.unpack(self.fmt, bytes_stream)
try:
self.ip_addr = ip_addr.strip(b'\x00')
self.domain_name = domain_name.strip(b'\x00')
self.version = version.strip(b'\x00')
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError as e:
raise ResponseError('[-] Error: disk space overrun, can not represented it.')
self.join_time = datetime.fromtimestamp(join_time).isoformat()
self.up_time = datetime.fromtimestamp(up_time).isoformat()
self.last_source_sync = datetime.fromtimestamp(last_source_sync).isoformat()
self.last_sync_update = datetime.fromtimestamp(last_sync_update).isoformat()
self.last_synced_time = datetime.fromtimestamp(last_synced_time).isoformat()
self.last_heartbeat_time = datetime.fromtimestamp(last_heartbeat_time).isoformat()
return True
def __str__(self):
'''Transform to readable string.'''
s = 'Storage information:\n'
s += '\tip_addr = %s (%s)\n' % (self.ip_addr, parse_storage_status(self.status))
s += '\thttp domain = %s\n' % self.domain_name
s += '\tversion = %s\n' % self.version
s += '\tjoin time = %s\n' % self.join_time
s += '\tup time = %s\n' % self.up_time
s += '\ttotal storage = %s\n' % self.totalMB
s += '\tfree storage = %s\n' % self.freeMB
s += '\tupload priority = %d\n' % self.upload_prio
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.storage_http_port
s += '\tcurrent write path = %d\n' % self.curr_write_path
s += '\tsource ip_addr = %s\n' % self.ip_addr
s += '\tif_trunk_server = %d\n' % self.if_trunk_server
s += '\ttotal upload count = %ld\n' % self.total_upload_count
s += '\tsuccess upload count = %ld\n' % self.success_upload_count
s += '\ttotal download count = %ld\n' % self.total_download_count
s += '\tsuccess download count = %ld\n' % self.success_download_count
s += '\ttotal append count = %ld\n' % self.total_append_count
s += '\tsuccess append count = %ld\n' % self.success_append_count
s += '\ttotal modify count = %ld\n' % self.total_modify_count
s += '\tsuccess modify count = %ld\n' % self.success_modify_count
s += '\ttotal truncate count = %ld\n' % self.total_truncate_count
s += '\tsuccess truncate count = %ld\n' % self.success_truncate_count
s += '\ttotal delete count = %ld\n' % self.total_del_count
s += '\tsuccess delete count = %ld\n' % self.success_del_count
s += '\ttotal set_meta count = %ld\n' % self.total_setmeta_count
s += '\tsuccess set_meta count = %ld\n' % self.success_setmeta_count
s += '\ttotal get_meta count = %ld\n' % self.total_getmeta_count
s += '\tsuccess get_meta count = %ld\n' % self.success_getmeta_count
s += '\ttotal create link count = %ld\n' % self.total_create_link_count
s += '\tsuccess create link count = %ld\n' % self.success_create_link_count
s += '\ttotal delete link count = %ld\n' % self.total_del_link_count
s += '\tsuccess delete link count = %ld\n' % self.success_del_link_count
s += '\ttotal upload bytes = %ld\n' % self.total_upload_bytes
s += '\tsuccess upload bytes = %ld\n' % self.success_upload_bytes
s += '\ttotal download bytes = %ld\n' % self.total_download_bytes
s += '\tsuccess download bytes = %ld\n' % self.success_download_bytes
s += '\ttotal append bytes = %ld\n' % self.total_append_bytes
s += '\tsuccess append bytes = %ld\n' % self.success_append_bytes
s += '\ttotal modify bytes = %ld\n' % self.total_modify_bytes
s += '\tsuccess modify bytes = %ld\n' % self.success_modify_bytes
s += '\ttotal sync_in bytes = %ld\n' % self.total_sync_in_bytes
s += '\tsuccess sync_in bytes = %ld\n' % self.success_sync_in_bytes
s += '\ttotal sync_out bytes = %ld\n' % self.total_sync_out_bytes
s += '\tsuccess sync_out bytes = %ld\n' % self.success_sync_out_bytes
s += '\ttotal file open count = %ld\n' % self.total_file_open_count
s += '\tsuccess file open count = %ld\n' % self.success_file_open_count
s += '\ttotal file read count = %ld\n' % self.total_file_read_count
s += '\tsuccess file read count = %ld\n' % self.success_file_read_count
s += '\ttotal file write count = %ld\n' % self.total_file_write_count
s += '\tsucess file write count = %ld\n' % self.success_file_write_count
s += '\tlast heartbeat time = %s\n' % self.last_heartbeat_time
s += '\tlast source update = %s\n' % self.last_source_sync
s += '\tlast sync update = %s\n' % self.last_sync_update
s += '\tlast synced time = %s\n' % self.last_synced_time
return s
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Group_info(object):
def __init__(self):
self.group_name = ''
self.totalMB = ''
self.freeMB = ''
self.trunk_freeMB = ''
self.count = 0
self.storage_port = 0
self.store_http_port = 0
self.active_count = 0
self.curr_write_server = 0
self.store_path_count = 0
self.subdir_count_per_path = 0
self.curr_trunk_file_id = 0
self.fmt = '!%ds 11Q' % (FDFS_GROUP_NAME_MAX_LEN + 1)
return None
def __str__(self):
s = 'Group information:\n'
s += '\tgroup name = %s\n' % self.group_name
s += '\ttotal disk space = %s\n' % self.totalMB
s += '\tdisk free space = %s\n' % self.freeMB
s += '\ttrunk free space = %s\n' % self.trunk_freeMB
s += '\tstorage server count = %d\n' % self.count
s += '\tstorage port = %d\n' % self.storage_port
s += '\tstorage HTTP port = %d\n' % self.store_http_port
s += '\tactive server count = %d\n' % self.active_count
s += '\tcurrent write server index = %d\n' % self.curr_write_server
s += '\tstore path count = %d\n' % self.store_path_count
s += '\tsubdir count per path = %d\n' % self.subdir_count_per_path
s += '\tcurrent trunk file id = %d\n' % self.curr_trunk_file_id
return s
def set_info(self, bytes_stream):
(group_name, totalMB, freeMB, trunk_freeMB, self.count, self.storage_port, self.store_http_port,
self.active_count, self.curr_write_server, self.store_path_count, self.subdir_count_per_path,
self.curr_trunk_file_id) = struct.unpack(self.fmt, bytes_stream)
try:
self.group_name = group_name.strip(b'\x00')
self.freeMB = appromix(freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.totalMB = appromix(totalMB, FDFS_SPACE_SIZE_BASE_INDEX)
self.trunk_freeMB = appromix(trunk_freeMB, FDFS_SPACE_SIZE_BASE_INDEX)
except ValueError:
raise DataError('[-] Error disk space overrun, can not represented it.')
def get_fmt_size(self):
return struct.calcsize(self.fmt)
class Tracker_client(object):
'''Class Tracker client.'''
def __init__(self, pool):
self.pool = pool
def tracker_list_servers(self, group_name, storage_ip=None):
'''
List servers in a storage group
'''
conn = self.pool.get_connection()
th = Tracker_header()
ip_len = len(storage_ip) if storage_ip else 0
if ip_len >= IP_ADDRESS_SIZE:
ip_len = IP_ADDRESS_SIZE - 1
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + ip_len
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_STORAGE
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
store_ip_addr = storage_ip or ''
storage_ip_fmt = '!%ds' % ip_len
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name) + struct.pack(storage_ip_fmt, store_ip_addr)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
si = Storage_info()
si_fmt_size = si.get_fmt_size()
recv_size = len(recv_buffer)
if recv_size % si_fmt_size != 0:
errinfo = '[-] Error: response size not match, expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errinfo)
except ConnectionError:
raise
finally:
self.pool.release(conn)
num_storage = recv_size / si_fmt_size
si_list = []
i = 0
while num_storage:
si.set_info(recv_buffer[(i * si_fmt_size): ((i + 1) * si_fmt_size)])
si_list.append(si)
si = Storage_info()
num_storage -= 1
i += 1
ret_dict = {}
ret_dict['Group name'] = group_name
ret_dict['Servers'] = si_list
return ret_dict
def tracker_list_one_group(self, group_name):
conn = self.pool.get_connection()
th = Tracker_header()
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP
# group_fmt: |-group_name(16)-|
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
try:
th.send_header(conn)
send_buffer = struct.pack(group_fmt, group_name)
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
group_info = Group_info()
group_info.set_info(recv_buffer)
except ConnectionError:
raise
finally:
self.pool.release(conn)
return group_info
def tracker_list_all_groups(self):
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
except:
raise
finally:
self.pool.release(conn)
gi = Group_info()
gi_fmt_size = gi.get_fmt_size()
if recv_size % gi_fmt_size != 0:
errmsg = '[-] Error: Response size is mismatch, except: %d, actul: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
num_groups = recv_size / gi_fmt_size
ret_dict = {}
ret_dict['Groups count'] = num_groups
gi_list = []
i = 0
while num_groups:
gi.set_info(recv_buffer[i * gi_fmt_size: (i + 1) * gi_fmt_size])
gi_list.append(gi)
gi = Group_info()
i += 1
num_groups -= 1
ret_dict['Groups'] = gi_list
return ret_dict
def tracker_query_storage_stor_without_group(self):
'''Query storage server for upload, without group name.
Return: Storage_server object'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE
try:
th.send_header(conn)
th.recv_header(conn)
if th.status != 0:
raise DataError('[-] Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def tracker_query_storage_stor_with_group(self, group_name):
'''Query storage server for upload, based group name.
arguments:
@group_name: string
@Return Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
th.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN
th.send_header(conn)
group_fmt = '!%ds' % FDFS_GROUP_NAME_MAX_LEN
send_buffer = struct.pack(group_fmt, group_name)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_STORE_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (TRACKER_QUERY_STORAGE_STORE_BODY_LEN, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ipaddr(16-1)-port(8)-store_path_index(1)-|
recv_fmt = '!%ds %ds Q B' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group, ip_addr, store_serv.port, store_serv.store_path_index) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group.strip(b'\x00')
store_serv.ip_addr = ip_addr.strip(b'\x00')
return store_serv
def _tracker_do_query_storage(self, group_name, filename, cmd):
'''
core of query storage, based group name and filename.
It is useful download, delete and set meta.
arguments:
@group_name: string
@filename: string. remote file_id
@Return: Storage_server object
'''
conn = self.pool.get_connection()
th = Tracker_header()
file_name_len = len(filename)
th.pkg_len = FDFS_GROUP_NAME_MAX_LEN + file_name_len
th.cmd = cmd
th.send_header(conn)
# query_fmt: |-group_name(16)-filename(file_name_len)-|
query_fmt = '!%ds %ds' % (FDFS_GROUP_NAME_MAX_LEN, file_name_len)
send_buffer = struct.pack(query_fmt, group_name, filename)
try:
tcp_send_data(conn, send_buffer)
th.recv_header(conn)
if th.status != 0:
raise DataError('Error: %d, %s' % (th.status, os.strerror(th.status)))
recv_buffer, recv_size = tcp_recv_response(conn, th.pkg_len)
if recv_size != TRACKER_QUERY_STORAGE_FETCH_BODY_LEN:
errmsg = '[-] Error: Tracker response length is invaild, '
errmsg += 'expect: %d, actual: %d' % (th.pkg_len, recv_size)
raise ResponseError(errmsg)
except ConnectionError:
raise
finally:
self.pool.release(conn)
# recv_fmt: |-group_name(16)-ip_addr(16)-port(8)-|
recv_fmt = '!%ds %ds Q' % (FDFS_GROUP_NAME_MAX_LEN, IP_ADDRESS_SIZE - 1)
store_serv = Storage_server()
(group_name, ipaddr, store_serv.port) = struct.unpack(recv_fmt, recv_buffer)
store_serv.group_name = group_name.strip(b'\x00')
store_serv.ip_addr = ipaddr.strip(b'\x00')
return store_serv
def tracker_query_storage_update(self, group_name, filename):
'''
Query storage server to update(delete and set_meta).
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE)
def tracker_query_storage_fetch(self, group_name, filename):
'''
Query storage server to download.
'''
return self._tracker_do_query_storage(group_name, filename, TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE)
#!/usr/bin/env python
# -*- coding = utf-8 -*-
# filename: utils.py
import io
import os
import sys
import stat
import platform
import configparser
SUFFIX = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
__os_sep__ = "/" if platform.system() == 'Windows' else os.sep
def appromix(size, base=0):
'''Conver bytes stream size to human-readable format.
Keyword arguments:
size: int, bytes stream size
base: int, suffix index
Return: string
'''
multiples = 1024
if size < 0:
raise ValueError('[-] Error: number must be non-negative.')
if size < multiples:
return '{0:d}{1}'.format(size, SUFFIX[base])
for suffix in SUFFIX[base:]:
if size < multiples:
return '{0:.2f}{1}'.format(size, suffix)
size = size / float(multiples)
raise ValueError('[-] Error: number too big.')
def get_file_ext_name(filename, double_ext=True):
li = filename.split(os.extsep)
if len(li) <= 1:
return ''
else:
if li[-1].find(__os_sep__) != -1:
return ''
if double_ext:
if len(li) > 2:
if li[-2].find(__os_sep__) == -1:
return '%s.%s' % (li[-2], li[-1])
return li[-1]
class Fdfs_ConfigParser(configparser.RawConfigParser):
"""
Extends ConfigParser to allow files without sections.
This is done by wrapping read files and prepending them with a placeholder
section, which defaults to '__config__'
"""
def __init__(self, default_section=None, *args, **kwargs):
configparser.RawConfigParser.__init__(self, *args, **kwargs)
self._default_section = None
self.set_default_section(default_section or '__config__')
def get_default_section(self):
return self._default_section
def set_default_section(self, section):
self.add_section(section)
# move all values from the previous default section to the new one
try:
default_section_items = self.items(self._default_section)
self.remove_section(self._default_section)
except configparser.NoSectionError:
pass
else:
for (key, value) in default_section_items:
self.set(section, key, value)
self._default_section = section
def read(self, filenames):
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename) as fp:
self.readfp(fp)
except IOError:
continue
else:
read_ok.append(filename)
return read_ok
def readfp(self, fp, *args, **kwargs):
stream = io.StringIO()
try:
stream.name = fp.name
except AttributeError:
pass
stream.write('[' + self._default_section + ']\n')
stream.write(fp.read())
stream.seek(0, 0)
return self._read(stream, stream.name)
def write(self, fp):
# Write the items from the default section manually and then remove them
# from the data. They'll be re-added later.
try:
default_section_items = self.items(self._default_section)
self.remove_section(self._default_section)
for (key, value) in default_section_items:
fp.write("{0} = {1}\n".format(key, value))
fp.write("\n")
except configparser.NoSectionError:
pass
configparser.RawConfigParser.write(self, fp)
self.add_section(self._default_section)
for (key, value) in default_section_items:
self.set(self._default_section, key, value)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
if optname in cursect:
if not isinstance(cursect[optname], list):
cursect[optname] = [cursect[optname]]
cursect[optname].append(optval)
else:
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
def split_remote_fileid(remote_file_id):
'''
Splite remote_file_id to (group_name, remote_file_name)
arguments:
@remote_file_id: string
@return tuple, (group_name, remote_file_name)
'''
index = remote_file_id.find(b'/')
if -1 == index:
return None
return (remote_file_id[0:index], remote_file_id[(index + 1):])
def fdfs_check_file(filename):
ret = True
errmsg = ''
if not os.path.isfile(filename):
ret = False
errmsg = '[-] Error: %s is not a file.' % filename
elif not stat.S_ISREG(os.stat(filename).st_mode):
ret = False
errmsg = '[-] Error: %s is not a regular file.' % filename
return (ret, errmsg)
if __name__ == '__main__':
print(get_file_ext_name('/bc.tar.gz'))
......@@ -56,7 +56,7 @@ if __name__=="__main__":
url = "https://mp.weixin.qq.com/"
browser.get(url)
# 可改动
time.sleep(40)
time.sleep(20)
s = requests.session()
#获取到token和cookies
......
import time
import time
......@@ -56,8 +56,9 @@ opt.add_experimental_option("excludeSwitches", ["enable-automation"])
opt.add_experimental_option('excludeSwitches', ['enable-logging'])
opt.add_experimental_option('useAutomationExtension', False)
#opt.binary_location = r'C:/Program Files/Google/Chrome/Application/chrome.exe'
opt.binary_location = r'C:/Program Files/Google/Chrome/Application/chrome.exe'
chromedriver = r'E:/chromedriver_win32/117/chromedriver.exe'
# opt.binary_location = r'C:/Program Files/Google/Chrome/Application/chrome.exe'
opt.binary_location = r'D:/Google/Chrome/Application/chrome.exe'
chromedriver = r'D:/cmd100/chromedriver.exe'
service = Service(chromedriver)
browser = webdriver.Chrome(options=opt, service=service)
url = "https://wenshu.court.gov.cn/"
......
# -*- coding: utf-8 -*-
import os
import random
import sys
import time
import logbook
import logbook.more
# 核心工具包
import pymysql
from tqdm import tqdm
# 注意 程序退出前 调用BaseCore.close() 关闭相关资源
class BaseCore:
# 序列号
__seq = 0
# 代理池 数据库连接
__cnx_proxy =None
__cursor_proxy = None
# agent 池
__USER_AGENT_LIST = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.29 Safari/525.13',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/531.4 (KHTML, like Gecko) Chrome/3.0.194.0 Safari/531.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.11 Safari/534.16',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.50 Safari/525.19',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.211.7 Safari/532.0',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; Lunascape 5.0 alpha2)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Chrome/4.0.222.7 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru-RU) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.11 Safari/534.16',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.10 Safari/532.0',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Maxthon;',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/530.1 (KHTML, like Gecko) Chrome/2.0.169.0 Safari/530.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ja-JP; rv:1.7) Gecko/20040614 Firefox/0.9',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.810.0 Safari/535.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.211.0 Safari/532.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.3.4000 Chrome/30.0.1599.101 Safari/537.36',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.6 (KHTML, like Gecko) Chrome/7.0.500.0 Safari/534.6',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; TencentTraveler)',
'Mozilla/5.0 (Windows NT 6.0; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/534.4 (KHTML, like Gecko) Chrome/6.0.481.0 Safari/534.4',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.370.0 Safari/533.4',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.7.5) Gecko/20041107 Firefox/1.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/0.4.154.31 Safari/525.19',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB) AppleWebKit/534.1 (KHTML, like Gecko) Chrome/6.0.428.0 Safari/534.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; de-DE) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/7.0.540.0 Safari/534.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE) Chrome/4.0.223.3 Safari/532.2',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/12.0.702.0 Safari/534.24',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.42 Safari/525.19',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.3 (KHTML, like Gecko) Chrome/4.0.227.0 Safari/532.3',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.8 (KHTML, like Gecko) Chrome/16.0.912.63 Safari/535.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.460.0 Safari/534.3',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.463.0 Safari/534.3',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/528.9 (KHTML, like Gecko) Chrome/2.0.157.0 Safari/528.9',
'Mozilla/5.0 (Windows NT 5.2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.794.0 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.694.0 Safari/534.24',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.7.5) Gecko/20041107 Firefox/1.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Maxthon; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Chrome/4.0.223.4 Safari/532.2',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.65 Safari/535.11',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.21 (KHTML, like Gecko) Chrome/11.0.682.0 Safari/534.21',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/530.0 (KHTML, like Gecko) Chrome/2.0.182.0 Safari/531.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.9 (KHTML, like Gecko) Chrome/7.0.531.0 Safari/534.9',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; WOW64; Trident/6.0)',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.811.0 Safari/535.1',
'ozilla/5.0 (Windows; U; Windows NT 5.0; de-DE; rv:1.7.5) Gecko/20041108 Firefox/1.0',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.127 Safari/533.4',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E) QQBrowser/6.9.11079.201',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; zh-cn) Opera 8.50',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/7.0.0 Safari/700.13',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.211.4 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.53 Safari/525.19',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.6 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.1 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20041107 Firefox/0.9.2 StumbleUpon/1.994',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.7.5) Gecko/20041110 Firefox/1.0',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en) Opera 8.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1 QQBrowser/6.9.11079.201',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/530.5 (KHTML, like Gecko) Chrome/2.0.172.6 Safari/530.5',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.0.3705)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.21 Safari/532.0',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.792.0 Safari/535.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/530.1 (KHTML, like Gecko) Chrome/2.0.168.0 Safari/530.1',
'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; rv:1.7.3) Gecko/20040913 Firefox/0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/530.8 (KHTML, like Gecko) Chrome/2.0.177.1 Safari/530.8',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/530.5 (KHTML, like Gecko) Chrome/2.0.172.40 Safari/530.5',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.24 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/528.10 (KHTML, like Gecko) Chrome/2.0.157.2 Safari/528.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Chrome/4.0.223.2 Safari/532.2',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.75 Safari/535.7',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; T312461)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.461.0 Safari/534.3',
'Mozilla/5.0 (Windows; U; Windows NT 5.0; rv:1.7.3) Gecko/20041001 Firefox/0.10.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-DE) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.202.2 Safari/532.0',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:16.0) Gecko/16.0 Firefox/16.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/531.3 (KHTML, like Gecko) Chrome/3.0.193.2 Safari/531.3',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.864.0 Safari/535.2',
'Mozilla/5.0 (Windows NT 5.2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.813.0 Safari/535.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.6 Safari/532.0',
'Mozilla/5.0 (Windows NT 5.1; rv:2.1.1) Gecko/20110415 Firefox/4.0.2pre Fennec/4.0.1',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.801.0 Safari/535.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.212.0 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.27 Safari/525.13',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.697.0 Safari/534.24',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/7.0.548.0 Safari/534.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/534.17 (KHTML, like Gecko) Chrome/11.0.652.0 Safari/534.17',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.224 Safari/534.10 ChromePlus/1.5.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.0 Safari/532.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.211.7 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/533.2 (KHTML, like Gecko) Chrome/5.0.342.2 Safari/533.2',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.4 Safari/532.1',
'Mozilla/5.0 (Windows NT 6.0; rv:2.1.1) Gecko/20110415 Firefox/4.0.2pre Fennec/4.0.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/0.2.153.0 Safari/525.19',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; sv-SE; rv:1.7.5) Gecko/20041108 Firefox/1.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.462.0 Safari/534.3',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de-DE; rv:1.7.5) Gecko/20041122 Firefox/1.0',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; SV1; uZardWeb/1.0; Server_JP)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; HCI0449; .NET CLR 1.0.3705)',
'Mozilla/4.0 (compatible; MSIE 5.0; Windows 98; DigExt); Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1);',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/530.5 (KHTML, like Gecko) Chrome/2.0.172.23 Safari/530.5',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.208.0 Safari/532.0',
'Mozilla/5.0 (Windows NT 6.0; rv:14.0) Gecko/20100101 Firefox/14.0.1',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/530.7 (KHTML, like Gecko) Chrome/2.0.176.0 Safari/530.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.21 (KHTML, like Gecko) Chrome/11.0.678.0 Safari/534.21',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.21 Safari/532.0',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; InfoPath.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.55 Safari/525.19',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1 Fennec/7.0a1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.724.100 Safari/534.30',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.472.33 Safari/534.3 SE 2.X MetaSr 1.0',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; WOW64; SV1; uZardWeb/1.0; Server_HK)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 6.0) yi; AppleWebKit/345667.12221 (KHTML, like Gecko) Chrome/23.0.1271.26 Safari/453667.1221',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.2 (KHTML, like Gecko) Chrome/3.0.191.3 Safari/531.2',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/530.5 (KHTML, like Gecko) Chrome/2.0.172.39 Safari/530.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.1 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.38 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.27 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8b) Gecko/20050118 Firefox/1.0+',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ja-JP; rv:1.7) Gecko/20040707 Firefox/0.9.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.202.0 Safari/532.0',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/530.4 (KHTML, like Gecko) Chrome/2.0.171.0 Safari/530.4',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648)',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; nl-NL; rv:1.7.5) Gecko/20041202 Firefox/1.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.204.0 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Chrome/4.0.222.6 Safari/532.2',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/528.8 (KHTML, like Gecko) Chrome/1.0.156.0 Safari/528.8',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/6.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.0.3705; .NET CLR 2.0.50727; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.517.43 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13',
'Mozilla/5.0 (ipad Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.6 (KHTML, like Gecko) Chrome/7.0.498.0 Safari/534.6',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/530.5 (KHTML, like Gecko) Chrome/2.0.172.43 Safari/530.5',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.208.0 Safari/532.0',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.66 Safari/535.11',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.19 (KHTML, like Gecko) Chrome/11.0.661.0 Safari/534.19',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-CA) AppleWebKit/534.13 (KHTML like Gecko) Chrome/9.0.597.98 Safari/534.13',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.211.2 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.201.1 Safari/532.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.201.1 Safari/532.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.213.1 Safari/532.1',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/530.6 (KHTML, like Gecko) Chrome/2.0.174.0 Safari/530.6',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/0.3.154.6 Safari/525.19',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.599.0 Safari/534.13',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.8 (KHTML, like Gecko) Chrome/7.0.521.0 Safari/534.8',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5'
]
def close(self):
try:
self.__cursor_proxy.close()
self.__cnx_proxy.close()
except :
pass
def __init__(self):
self.__cnx_proxy = pymysql.connect(host='114.115.159.144', user='caiji', password='zzsn9988', db='clb_project',
charset='utf8mb4')
self.__cursor_proxy= self.__cnx_proxy.cursor()
pass
# 计算耗时
def getTimeCost(self,start, end):
seconds = int(end - start)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if (h > 0):
return "%d小时%d分钟%d秒" % (h, m, s)
elif (m > 0):
return "%d分钟%d秒" % (m, s)
elif (seconds > 0):
return "%d秒" % (s)
else:
ms = int((end - start) * 1000)
return "%d毫秒" % (ms)
# 当前时间格式化
# 1 : 2001-01-01 12:00:00 %Y-%m-%d %H:%M:%S
# 2 : 010101120000 %y%m%d%H%M%S
# 时间戳 3:1690179526555 精确到秒
def getNowTime(self, type):
now_time = ""
if type == 1:
now_time = time.strftime("%Y-%m-%d %H:%M:%S")
if type == 2:
now_time = time.strftime("%y%m%d%H%M%S")
if type == 3:
now_time = int(time.time() * 1000)
return now_time
# 日志格式
def logFormate(self,record, handler):
formate = "[{date}] [{level}] [{filename}] [{func_name}] [{lineno}] {msg}".format(
date=record.time, # 日志时间
level=record.level_name, # 日志等级
filename=os.path.split(record.filename)[-1], # 文件名
func_name=record.func_name, # 函数名
lineno=record.lineno, # 行号
msg=record.message # 日志内容
)
return formate
# 获取logger
def getLogger(self,fileLogFlag=True, stdOutFlag=True):
dirname, filename = os.path.split(os.path.abspath(sys.argv[0]))
dirname = os.path.join(dirname, "logs")
filename = filename.replace(".py", "") + ".log"
if not os.path.exists(dirname):
os.mkdir(dirname)
logbook.set_datetime_format('local')
logger = logbook.Logger(filename)
logger.handlers = []
if fileLogFlag: # 日志输出到文件
logFile = logbook.TimedRotatingFileHandler(os.path.join(dirname, filename), date_format='%Y-%m-%d',
bubble=True, encoding='utf-8')
logFile.formatter = self.logFormate
logger.handlers.append(logFile)
if stdOutFlag: # 日志打印到屏幕
logStd = logbook.more.ColorizedStderrHandler(bubble=True)
logStd.formatter = self.logFormate
logger.handlers.append(logStd)
return logger
# 获取随机的userAgent
def getRandomUserAgent(self):
return random.choice(self.__USER_AGENT_LIST)
# 获取代理
def get_proxy(self):
sql = "select proxy from clb_proxy"
self.__cursor_proxy.execute(sql)
proxy_lists = self.__cursor_proxy.fetchall()
ip_list = []
for proxy_ in proxy_lists:
ip_list.append(str(proxy_).replace("('", '').replace("',)", ''))
proxy_list = []
for str_ip in ip_list:
str_ip_list = str_ip.split('-')
proxyMeta = "http://%(host)s:%(port)s" % {
"host": str_ip_list[0],
"port": str_ip_list[1],
}
proxy = {
"HTTP": proxyMeta,
"HTTPS": proxyMeta
}
proxy_list.append(proxy)
return proxy_list[random.randint(0, 3)]
def get_proxy(self):
ip_list = []
with self.__cursor_proxy as cursor:
sql_str = '''select PROXY from clb_proxy where id={} '''.format(random.randint(1, 12))
print(sql_str)
cursor.execute(sql_str)
rows = cursor.fetchall()
for row in tqdm(rows):
str_ip = row[0]
str_ip_list = str_ip.split('-')
proxyMeta = "http://%(host)s:%(port)s" % {
"host": str_ip_list[0],
"port": str_ip_list[1],
}
proxy = {
"HTTP": proxyMeta,
"HTTPS": proxyMeta
}
ip_list.append(proxy)
return ip_list
def get_proxyIPPort(self):
ip_list = []
with self.__cursor_proxy as cursor:
sql_str = '''select PROXY from clb_proxy where id={} '''.format(random.randint(1, 12))
print(sql_str)
cursor.execute(sql_str)
rows = cursor.fetchall()
for row in tqdm(rows):
str_ip = row[0]
str_ip_list = str_ip.split('-')
proxy = {
"host": str_ip_list[0],
"port": str_ip_list[1],
}
ip_list.append(proxy)
return ip_list
\ No newline at end of file
......@@ -227,12 +227,12 @@ class QQnewsSpider(object):
except Exception as e:
print('时间解析异常!!')
return publishtime
@retry(retry=3,delay=1)
@retry(tries=3,delay=1)
def getrequest(self,session,real_url,keyword):
res=session.get(real_url,timeout=10)
res = session.get(real_url,timeout=10)
res.encoding = res.apparent_encoding
text=res.text
text = res.text
if keyword not in text:
raise ValueError("Invalid value")
return text
......
Markdown 格式
0%
您添加了 0 到此讨论。请谨慎行事。
请先完成此评论的编辑!
注册 或者 后发表评论