当前位置 博文首页 > LY的博客:python自动采集财经信息翻译成英文一并上传到wordpres

    LY的博客:python自动采集财经信息翻译成英文一并上传到wordpres

    作者:[db:作者] 时间:2021-08-02 12:44

    #coded by 伊玛目的门徒
    #coding=utf-8
    from wordpress_xmlrpc import Client, WordPressPost
    from wordpress_xmlrpc.methods.posts import GetPosts, NewPost
    from wordpress_xmlrpc.methods.users import GetUserInfo
    import time
    import requests
    from bs4 import BeautifulSoup
    import re
    import datetime
    from googletrans import Translator
    from concurrent.futures import ThreadPoolExecutor
    import urllib.request
    import urllib.parse
    import json
    
    start = time.clock()  # 计时-开始
    urllist=[]
    titlelist=[]
    
    
    header={'user-agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.XXXX.XXX Safari/537.36'}
    
    
    
    
    def do(i):
        try:
    
            cd=[]
            html=requests.get('http://futures.hexun.com/domestic/index-'+str(i)+'.html',headers=header)
    
            html.encoding='gbk'
    
            Soup = BeautifulSoup(html.text, "lxml")
            #ab=Soup.select('li a[target="_blank"]')
            ab=Soup.select('div.temp01 ul li a[target="_blank"]')
    
            for x in range(len(ab)):
                if (x % 2) == 1:
                    cd.append (ab[x])
    
            #print ('--------------')
    
            pattern = re.compile(r'<a href="(.*?)" target="_blank">',re.S)   # 查找数字
            result1 = pattern.findall(str(cd))
            pattern2 = re.compile(r'target="_blank">(.*?)</a>',re.S)
            result2 = pattern2.findall(str(cd))
            #print (result1)
            urllist.extend(result1)
            #print (result2)
            titlelist.extend(result2)
    
    
            list1.remove(i)
    
    
    
        except:
            pass
    
    
    
    # 多线程
    def multithreading():
        sum=0
    
        while len(list1)>0:
            with ThreadPoolExecutor(max_workers=10) as executor:
                for result in executor.map(do, list1):
                    sum+=1
    
        return sum
    
    
    
    def onceindex():
    
    
        cd=[]
        html=requests.get('http://futures.hexun.com/domestic/index.html',headers=header)
    
        html.encoding='gbk'
    
        Soup = BeautifulSoup(html.text, "lxml")
        #ab=Soup.select('li a[target="_blank"]')
        ab=Soup.select('div.temp01 ul li a[target="_blank"]')
    
        for x in range(len(ab)):
            if (x % 2) == 1:
                cd.append (ab[x])
    
        #print ('--------------')
    
        pattern = re.compile(r'<a href="(.*?)" target="_blank">',re.S)   # 查找数字
        result1 = pattern.findall(str(cd))
        pattern2 = re.compile(r'target="_blank">(.*?)</a>',re.S)
        result2 = pattern2.findall(str(cd))
        #print (result1)
        urllist.extend(result1)
        #print (result2)
        titlelist.extend(result2)
    
    
    def fanyi(a0):
        if len(a0) ==0:
            pass
    
        else:
            url = 'http://fanyi.so.com/index/search'
    
            data = {
            'query':a0,
            'eng':'0'
            }
    
            data = urllib.parse.urlencode(data).encode('utf - 8')
            wy = urllib.request.urlopen(url,data)
            html = wy.read().decode('utf - 8')
            ta = json.loads(html)
            print(ta['data']['fanyi'])
            return ta['data']['fanyi']
    
    
    
    def fanyi0(a0):
            url = 'http://fy.iciba.com/ajax.php?a=fy'
    
            data = {
            'f':'auto',
            't':'auto',
            'w':a0
            }
    
            data = urllib.parse.urlencode(data).encode('utf - 8')
            wy = urllib.request.urlopen(url,data)
            html = wy.read().decode('utf - 8')
            ta = json.loads(html)
            print(ta['content']['out'])
            return ta['content']['out']
    
    
    
    
    
    
    def fanyi_google(a0):
        #必须开全局代理,否则无法连接
    
        fanyi_eng=''
    
    
        #单次字符2000字以下
        c=1500
        try:
            alist=list(a0[i:i+c] for i in range(0,len(a0),c))
            #print(len(alist))
            #print (alist[0])
    
    
            translator = Translator()
    
            for item in alist:
                #print (item)
                if len(item)>0:
                    #print (translator.translate(item,src='zh-cn' ).text)
                    item_eng=translator.translate(item,src='zh-cn' ).text
                    fanyi_eng=fanyi_eng+item_eng
    
            if len(fanyi_eng)>1:
                return fanyi_eng
            else:
                return 'null'
        except:
            return 'null'
    
    
    
    '''
    
    #全采集用此段
    #list1=list(range(1,393,1))
    list1=list(range(300,393,1))
    
    sum=multithreading()
    print ('还剩下{}页'.format(list1))
    
    
    
    end = time.clock()  # 计时-结束
    print (("爬取完成 用时:"))
    print ((end - start))
    
    
    print ('总爬取 %d 页 '%(sum))
    
    while None in titlelist:
        titlelist.remove(None)
    
    while None in urllist:
        urllist.remove(None)
    
    #print (titlelist)
    
    #print (urllist)
    '''
    
    
    
    
    #只采集index页用此段代码
    onceindex()
    #print (urllist)
    
    '''
    #可作为TXT输出
    with open("test.txt","w") as f:
            for thing in urllist:
    
                f.write(thing)
                f.write('\r\n')
    '''
    
    
    
    
    
    
    
    def getcontent(url,j):
        try:
            print (listj)
    
            html=requests.get(url,headers=header)
    
            html.encoding='gbk'
    
            Soup = BeautifulSoup(html.text, "lxml")
    
            con=Soup.select('div.art_contextBox ')
    
            date_n=Soup.select('div.tip.fl span')[0].string
    
    
    
    
    
            cont=''
            for y in con:
                #print (type(str(y)))
                cont=cont+str(y)
    
            #print (cont)
            #print (j)
    
            #print ('****')
            #print (listj)
    
    
            listj.remove(j)
    
            return cont,date_n
    
    
    
    
    
        except:
            pass
    
    
    def wpsend(title,content,date_n):
    
        wp = Client('http://www.6324.xyz/xmlrpc.php', '***', '***')
    
        #print (content)
        post = WordPressPost()
        post.title = title
    
        #date_n=datetime.datetime(date_n)
        date_n = datetime.datetime.strptime(date_n, "%Y-%m-%d %H:%M:%S")
        print (str(date_n))
        post.date = date_n
        post.date_modified = date_n
        #post.date_modified = datetime.datetime.now()
        #post.date =datetime.datetime(2018, 2, 16, 23, 44, 55)
    
        #post.content = " ''' "+ content +" ''' "
        post.content = "  "+ str(content) +"  "
        post.post_status = 'publish'
        post.terms_names = {
           'post_tag': ['操盘策略'],
          'category': [ '期货']
        }
        wp.call(NewPost(post))
        localtime = time.localtime(time.time())
        print ('文档已上传,执行时间 {}'.format(time.strftime("%Y-%m-%d %H:%M:%S",localtime)))
    
        #print (content)
        #上传英文文档
        fanyicontent=''
        fanyicontent=fanyi_google(content)
        post.post_status = 'publish'
        post.title =fanyi_google(title)
        post.content = "  "+ fanyicontent +"  "
        post.date_modified = date_n
        post.terms_names = {
           'post_tag': ['english '],
          'category': [ 'futures']
        }
        post.date = date_n
    
    
        wp.call(NewPost(post))
        localtime = time.localtime(time.time())
        print ('英文文档已上传,执行时间 {}'.format(time.strftime("%Y-%m-%d %H:%M:%S",localtime)))
    
    
    
    
    
    def work(j):
    
    
        url=urllist[j]
        title=titlelist[j]
        combine=getcontent(url,j)
        if combine is None:
            #去除无法处理的任务
            print ('该页为空,去除{} 号任务'.format(j))
            listj.remove(j)
            pass
        else:
            cont=combine[0]
            date_n=combine[1]
            wpsend(title,cont,date_n)
    
            print ('成功完成任务采集任务第 {}号任务'.format(j))
    
    
    
    
    # 多线程
    def multithreading_con():
        sum=0
        global listj
        listj=list(range(len(urllist)))
        #print (type(listj))
        #print (listj)
    
        #断点继续任务,若新任务去掉这一行:
        #listj=listj[8:]
    
        while len(listj)>0:
            with ThreadPoolExecutor(max_workers=2) as executor:
                for result in executor.map(work,listj ):
                    sum+=1
    
        return sum
    
    
    
    multithreading_con()
    
    end = time.clock()  # 计时-结束
    print ("全部上传完成 用时:")
    
    print ((end - start))

    成果可在www.6324.xyz 浏览

    cs