Python crawler crawling pictures

First of all, I declare that my crawler is on the unbuttu16.04 system, mainly referring to the following BLOG
On this basis, some modifications have been made to improve it.

1. You need to know Python's requests library. You can get the original information of the web page by using the requests library, which is the kind of information you see when you open a web page to view the source code. (Google browser is recommended)
On this page, we can find the url of a picture. The next link is a link corresponding to a picture (PS: Obj url is used in this article). If we want to get this information, we can download the pictures. Learning links:

2, so how can I get the URL of this picture? At this time, Python has a powerful regular expression module, which can help us find the obj URL. Regular expression learning link

The code is ready to be released.

# -*-coding:utf-8-*-
import re
import requests
from urllib import error
import os

num = 0
numPicture = 0
file = ''
List = []

def Find(url):
    global List
    print('Please wait while the total number of pictures is detected.....')
    t = 0
    i = 1
    s = 0
    while t < 1000:  # Set the number of search pictures here
        Url = url + str(t)
            Result = requests.get(Url, timeout=7)
        except BaseException:
            t = t + 60
            result = Result.text
            pic_url = re.findall('"objURL":"(.*?)",', result, re.S)  # First use regular expression to find image url
            s += len(pic_url)
            if len(pic_url) == 0:
                t = t + 60
    return s

def dowmloadPicture(html, keyword):
    global num
    pic_url = re.findall('"objURL":"(.*?)",', html, re.S)  # First use regular expression to find image url
    print('Find keywords:' + keyword + 'About to start downloading pictures...')
    for each in pic_url:
        imsearch ='png', each)   # Filter JPG pictures
        if imsearch is not None:
        print('Downloading section' + str(num + 1) + 'Picture address:' + str(each))
            if each is not None:
                pic = requests.get(each, timeout=7)
        except BaseException:
            print('Error, the current picture cannot be downloaded')
            string = file + '/' + keyword + '_' + str(num) + '.jpg'
            fp = open(string, 'wb')
            num += 1

        if num >= numPicture:
            num = 0                    # Zero number of downloaded pictures per category

if __name__ == '__main__':  # Main function entry
    tm = int(input('Please enter the number of downloads for each type of picture '))
    numPicture = tm
    line_list = []
    with open('./name.txt', encoding='utf-8') as file:
        line_list = [k.strip() for k in file.readlines()]  # Remove the space at the end with strip()

    for word in line_list:
        if len(word) != 0:    # Check each line of name.txt
            url = '' + word + '&pn='
            tot = Find(url)
            print('After testing%s Class pictures%d Zhang' % (word, tot))
            file = word
            y = os.path.exists(file)
            if y == 1:
                print('The file already exists, please re-enter')
                file = word + 'Folder 2'
            t = 0
            tmp = url
            while t < numPicture:
                    url = tmp + str(t)
                    result = requests.get(url, timeout=10)
                except error.HTTPError as e:
                    print('network error,Please adjust the network and try again')
                    t = t + 60
                    dowmloadPicture(result.text, word)
                    t = t + 60
            print('End of current search,Thanks for using')

Keywords: Python network Google encoding

Added by ghe on Thu, 07 Nov 2019 00:52:18 +0200