pythonpython爬取有道翻译译中的疑问呢?

Python调用有道翻译 - 简书
Python调用有道翻译
import requests
import json
import time
import hashlib
import random
if __name__ == '__main__':
S = requests.Session()
target_url = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
# http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule
target_headers = {
# 'Referer':'http://fanyi.youdao.com/',
# 'Content-Type':'application/x-www-form- charset=UTF-8',
# 'Origin':'http//fanyi.youdao.com',
# 'Host':'fanyi.youdao.com',
# 'Accept':'application/json, text/javascript, *',
# 'Accept':'application/json, text/javascript, */*; q=0.01',
# 'Connection':'keep-alive',
# 'Accept-Language':'zh-cn',
# 'Accept-Encoding':'gzip, deflate',
# 'User-Agent':'Mozilla/5.0 (M Intel Mac OS X 10_12_6) AppleWebKit/604.5.6 (KHTML, like Gecko) Version/11.0.3 Safari/604.5.6',
# 'X-Requested-With':'XMLHttpRequest',
# 'Proxy-Connection':'keep-alive',
# ============================
# 'Accept':'application/json, text/javascript, */*; q=0.01',
# 'Accept-Encoding':'gzip, deflate',
# 'Accept-Language':'zh-CN,q=0.9',
# 'Content-Length':'200',
# 'Connection':'keep-alive',
# 'Content-Type':'application/x-www-form- charset=UTF-8',
# 'Host':'fanyi.youdao.com',
# 'Origin':'http://fanyi.youdao.com',
'Referer':'http://fanyi.youdao.com/',
'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0. Safari/537.36',
# 'X-Requested-With':'XMLHttpRequest',
# salt = var r = "" + ((new Date).getTime() + parseInt(10 * Math.random(), 10));
r = str(int(time.time()*1000)+random.randint(1,10))
t = "接口"
# sign = n.md5("fanyideskweb" + t + r + "ebSeFb%=XZ%T[KZ)c(sy!");
src = "fanyideskweb" + t + r + "ebSeFb%=XZ%T[KZ)c(sy!";
print("src=%s"%src)
tank = hashlib.md5()
tank.update(src.encode("utf8"))
# TypeError: Unicode-objects must be encoded before hashing
# sign = hashlib.md5().update(src.encoded("utf8")).hexdigest();#这样不行
# AttributeError: 'str' object has no attribute 'encoded'
# sign = hashlib.md5().update(src.encode("utf8")).hexdigest();
sign = tank.hexdigest();
print("i=%s"%t)
print("salt=%s"%r)
print("sign=%s"%sign)
# Form_Data = {}
# Form_Data['from'] = 'AUTO'
# Form_Data['to'] = 'AUTO'
# Form_Data['smartresult'] = 'dict'
# Form_Data['client'] = 'fanyideskweb'
# Form_Data['salt'] = r
# Form_Data['sign'] = sign
# Form_Data['i'] = t
# Form_Data['doctype'] = 'json'
# Form_Data['version'] = '2.1'
# Form_Data['keyfrom'] = 'fanyi.web'
# #Form_Data['ue'] = 'ue:UTF-8'
# Form_Data['action'] = 'FY_BY_CLICKBUTTON'
# Form_Data['typoResult'] = 'false'
Form_Data = {
'from': 'AUTO',
'to': 'AUTO',
'smartresult': 'dict',
'client': 'fanyideskweb',
'salt': r,
'sign': sign,
'doctype': 'json',
'version': '2.1',
'keyfrom': 'fanyi.web',
'action': 'FY_BY_CLICKBUTTION',
'typoResult': 'false'
2 = 时间戳
target_headers['Cookie']= '___rl__test__cookies=2; fanyi-ad-closed=1; fanyi-ad-id=40789; JSESSIONID=aaaJ0epv-u81l5kbPS OUTFOX_SEARCH_USER_ID=-@10.168.8.61; _ga=GA1.2.2714379; OUTFOX_SEARCH_USER_ID_NCOO=19135'
# target_response = S.post(url = target_url, data = Form_Data, headers = target_headers)
target_response = S.post(url=target_url, data = Form_Data, headers = target_headers)
print (target_response.text)
# html = target_response.text
translateResult = json.loads(target_response.text)
print ("翻译返回是:%s"%translateResult)
print ("翻译返回是:%s"%translateResult['translateResult'][0])
print ("翻译返回是:%s"%translateResult['translateResult'][0][0]['tgt'])
经过试验 必须带三个头,其它可以不带
User-Agent
{'errorCode': 50}
src=fanyideskweb接口5ebSeFb%=XZ%T[KZ)c(sy!
sign=0f4a99fedc56b5b99d4521aea0a85ee9
{"translateResult":[[{"tgt":"interface","src":"接口"}]],"errorCode":0,"type":"zh-CHS2en","smartResult":{"entries":["","port\r\n","connector\r\n","hickey\r\n"],"type":1}}
翻译返回是:{'translateResult': [[{'tgt': 'interface', 'src': '接口'}]], 'errorCode': 0, 'type': 'zh-CHS2en', 'smartResult': {'entries': ['', 'port\r\n', 'connector\r\n', 'hickey\r\n'], 'type': 1}}
翻译返回是:[{'tgt': 'interface', 'src': '接口'}]
翻译返回是:interface
[Finished in 1.0s]
sign 和 salt 核心代码 js
t.asyRequest = function(e) {
var t = e.i,
i = "" + ((new Date).getTime() + parseInt(10 * Math.random(), 10)),
o = n.md5("fanyideskweb" + t + i + "ebSeFb%=XZ%T[KZ)c(sy!");
r && r.abort(), r = n.ajax({
type: "POST",
contentType: "application/x-www-form- charset=UTF-8",
url: "/bbk/translate_m.do",
client: "fanyideskweb",
tgt: e.tgt,
from: e.from,
doctype: "json",
version: "3.0",
dataType: "json",
success: function(t) {
t && 0 == t.errorCode ? e.success && e.success(t) : e.error && e.error(t)
error: function(e) {}
ps:日测试有效#!/usr/bin/env python
#-*-coding:utf-8-*-
import re,sys,urllib,urllib2
reload(sys)
sys.setdefaultencoding("utf-8")
url = "http://fanyi.youdao.com/openapi.do?keyfrom=&key=&type=data&doctype=json&version=1.1&q="
def tarn(txt):
x = urllib2.Request(url+urllib.quote(txt))
x.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0. Safari/535.19')
x.add_header('Host','fanyi.youdao.com')
x.add_header('Cookie','JSESSIONID=abcVYauywUcB-7YG-M0Ct; OUTFOX_SEARCH_USER_ID=*********@IP; SESSION_FROM=Buysz-blog')
c = urllib2.urlopen(x)
#print c.read()
e_t = re.sub('\"|:|\{|\[|\]|translation|','',c.read().split(',')[0])
return e_t
print tran('我是中国人')
打劫!!留下你的评论 &import urllib.request
import urllib.parse
import json
import hashlib
from datetime import datetime
while True:
d = input('translation:\n')
if d == 'quit':
heads = {}
heads['User-Agent'] = 'Mozilla/5.0 (M U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'
now = datetime.now()
now = now.timestamp()
a = re.match(r'(\d+)\.(\d+)', str(now))
b = a.group(1)+a.group(2)
f = b[:13]
#时间戳前13位
c = "rY0D^0'nM0}g5Mm1z%1G4"
u = 'fanyideskweb'
creatmd5 = u + d + f + c
md5 = hashlib.md5()
md5.update(creatmd5.encode('utf-8'))
sign = md5.hexdigest()
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&sessionFrom='
data['i'] = d
data['from'] = 'AUTO'
data['to'] = 'AUTO'
data['smartresult'] = 'dict'
data['client'] = 'fanyideskweb'
data['salt'] = f
data['sign'] = sign
data['doctype'] = 'json'
data['version'] = '<span style="color: #.1'
data['keyfrom'] = 'fanyi.web'
data['action'] = 'FY_BY_CLICKBUTTION'
data['typoResult'] = 'true'
data = urllib.parse.urlencode(data).encode('utf-8')
req = urllib.request.Request(url=url, data=data, method='POST',headers=heads)
#想要使用动态追加头headers,必须使Request类实例化,对象有动态追加函数req.add_headers()的方法
response = urllib.request.urlopen(req)
translateResult = response.read().decode('utf-8')
target = json.loads(translateResult)
print(target['translateResult'][0][0]['tgt'])
阅读(...) 评论()永远不要说太晚了,学习的终极技能就是——活到老,学到老
python爬虫(10)身边的翻译专家——获取有道翻译结果
本文目的:使用python 实现翻译效果
思路:有道翻译可以直接翻译内容,观察它的网页内容以及URL可以发现,它的基本url 和将要翻译的内容组合起来就是最后翻译的页面
比如: 有道中英文翻译的网址是:http://dict.youdao.com/
将要翻译的内容是: I'm a Chinese
点击一下翻译,然后出现的含有翻译结果页面的地址是:
http://dict.youdao.com/w/eng/I'm%20a%20chinese/#keyfrom=dict2.index
虽然这个网址后面跟了“#keyfrom=dict2.index” 但是不影响
http://dict.youdao.com/w/eng/I'm%20a%20chinese
也能看到翻译结果
因此总体思路如下:
1.获取将要翻译的内容
2.将翻译的内容和有道翻译网址组成新的url
3.获取这个url的页面内容
4.根据这个页面内容获取翻译结果
代码如下:
#!/usr/bin/python
#coding:utf-8
import HTMLParser
import urllib2
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
class BaiduFanyi:
def __init__(self,url):
self.url=url
def get_html_Pages(self,url):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.2; rv:16.0) Gecko/ Firefox/16.0'}
#构建请求的request
request=urllib2.Request(url,headers=headers)
#利用urlopen获取页面代码
response=urllib2.urlopen(request)
#将页面转化为UTF-8编码格式
html=response.read().decode('utf-8')
html=HTMLParser.HTMLParser().unescape(html)#处理网页内容, 可以将一些html类型的符号如" 转换回双引号
return html
#捕捉异常,防止程序直接死掉
except urllib2.URLError,e:
print u"连接失败,错误原因",e.reason
return None
except urllib2.HTTPError,e:
print u"连接失败,错误原因:%s " % e.code
return None
def get_finally_result(self,html):
result_pattern=re.compile('&div class="trans-container".*?&p&.*?&p&(.*?)&/p&.*?&/div&',re.S)
result=re.search(result_pattern,html)
trans_result= result.group(1)
return trans_result
def run(self):
html=self.get_html_Pages(self.url)
self.get_finally_result(html)
if __name__ == '__main__':
author_content='''
*****************************************************
welcome to spider of baidufanyi
@author: Jimy_Fengqi
http://blog.csdn.net/qiqiyingse?viewmode=contents
*****************************************************
print author_content
keywords=raw_input('please input the sentence that need translate:')
if not keywords:
keywords="I'm a Chinese"
base_url='http://www.youdao.com/w/eng/%s' % (keywords)
print base_url
mybaidufanyi=BaiduFanyi(base_url)
mybaidufanyi.run()
没有更多推荐了,
加入CSDN,享受更精准的内容推荐,与500万程序员共同成长!Python破解有道翻译爬虫
[问题点数:0分]
Python破解有道翻译爬虫
[问题点数:0分]
不显示删除回复
显示所有回复
显示星级回复
显示得分回复
只显示楼主
匿名用户不能发表回复!|}

我要回帖

更多关于 python 有道词典 的文章

更多推荐

版权声明:文章内容来源于网络,版权归原作者所有,如有侵权请点击这里与我们联系,我们将及时删除。

点击添加站长微信