获取豆瓣电影TOP 250并存入文件

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# encoding=utf-8

import requests
from bs4 import BeautifulSoup
import csv
import codecs

DOWNLOAD_URL = 'http://movie.douban.com/top250'


def download_page(url):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0'}
    data = requests.get(url, headers=headers).content
    return data


def parse_html(html):
    #更换解析器,详情 https://www.crummy.com/software/BeautifulSoup/bs4/doc/index.zh.html#id9
    soup = BeautifulSoup(html, "lxml")
    movies_list_soup = soup.find('ol', attrs={'class': 'grid_view'})
    movie_name_list = []
    movie_rating_num_list = []
    movie_quote_list = []

    for movies_li in movies_list_soup.find_all('li'):
        movie_name = movies_li.find('div', attrs={'class': 'hd'}).find('span', attrs={'class': 'title'}).getText()
        movie_rating = movies_li.find('div', attrs={'class': 'bd'}).find('span', attrs={'class': 'rating_num',
                                                                                        'property': 'v:average'}).getText()
        #movie_quote = movies_li.find('div', attrs={'class': 'bd'}).find('span', attrs={'class': 'inq'}).getText()
        movie_quote_obj = movies_li.find('div', attrs={'class': 'bd'}).find('span', attrs={'class': 'inq'})
        ### 发现某部电影评价为空,此处判断评价是否为空
        if movie_quote_obj is None:
            movie_quote = ''
        else:
            movie_quote = movie_quote_obj.getText()

        movie_name_list.append(movie_name)
        movie_rating_num_list.append(movie_rating)
        movie_quote_list.append(movie_quote)

    next_page = soup.find('span', attrs={'class': 'next'}).find('a')
    if next_page:
        return movie_name_list, movie_rating_num_list, movie_quote_list, DOWNLOAD_URL + next_page['href']

    return movie_name_list, movie_rating_num_list, movie_quote_list, None


def main():
    url = DOWNLOAD_URL
    #utf_8_sig 解决windows下csv文件乱码问题
    with codecs.open('movies_top_250.csv', 'wb', encoding='utf_8_sig') as csvFile:
        writer = csv.writer(csvFile)
        writer.writerow((u'电影名', u'评分', u'理由'))
        while url:
            html = download_page(url)
            movies, rating, quote, url = parse_html(html)
            for i in range(len(movies)):
                writer.writerow((movies[i], rating[i], quote[i]))
                
if __name__ == '__main__':
    main()
Nickname
Email
Website
0/500
  • OωO
  • |´・ω・)ノ
  • ヾ(≧∇≦*)ゝ
  • (☆ω☆)
  • (╯‵□′)╯︵┴─┴
  •  ̄﹃ ̄
  • (/ω\)
  • ∠( ᐛ 」∠)_
  • (๑•̀ㅁ•́ฅ)
  • →_→
  • ୧(๑•̀⌄•́๑)૭
  • ٩(ˊᗜˋ*)و
  • (ノ°ο°)ノ
  • (´இ皿இ`)
  • ⌇●﹏●⌇
  • (ฅ´ω`ฅ)
  • (╯°A°)╯︵○○○
  • φ( ̄∇ ̄o)
  • ヾ(´・ ・`。)ノ"
  • ( ง ᵒ̌皿ᵒ̌)ง⁼³₌₃
  • (ó﹏ò。)
  • Σ(っ °Д °;)っ
  • ( ,,´・ω・)ノ"(´っω・`。)
  • ╮(╯▽╰)╭
  • o(*////▽////*)q
  • >﹏<
  • ( ๑´•ω•) "(ㆆᴗㆆ)
  • 😂
  • 😀
  • 😅
  • 😊
  • 🙂
  • 🙃
  • 😌
  • 😍
  • 😘
  • 😜
  • 😝
  • 😏
  • 😒
  • 🙄
  • 😳
  • 😡
  • 😔
  • 😫
  • 😱
  • 😭
  • 💩
  • 👻
  • 🙌
  • 🖕
  • 👍
  • 👫
  • 👬
  • 👭
  • 🌚
  • 🌝
  • 🙈
  • 💊
  • 😶
  • 🙏
  • 🍦
  • 🍉
  • 😣
  • 颜文字
  • Emoji
  • Bilibili
0 comments
No comment
一个默默无闻的工程师的日常
Built with Hugo
主题 StackJimmy 设计