defmain(url,pic,down): headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0'} r = requests.get(url,headers=headers) html = bs4.BeautifulSoup(r.text,'html.parser') src=html.findAll('a') for i in src: s = i.get('href') try: if s.split('/')[1] == 'pictures': url = pic + s.split('/')[-1] re = requests.get(url,headers=headers,stream=True) soup = bs4.BeautifulSoup(re.text,'html.parser') img = soup.findAll('a',attrs={'class':'download_icon'}) for j in img: m = j.get("href") print('\r[*]已找到图片链接{}\n'.format(m)) if m[-3] == 'p': q = requests.get(down+m,headers=headers,stream=True) filename = m.split('/')[-1] withopen(download_path+filename,'wb') as g: for k in q.iter_content(10000): g.write(k) print('\r[+]爬取完成,图片名{}'.format(filename),end="") else: q = requests.get(down+m,headers=headers,stream=True) filename = m.split('/')[-1] withopen(download_path+filename,'wb') as g: for k in q.iter_content(10000): g.write(k) print('\r[+]爬取完成,图片名{}'.format(filename),end="") except: pass for z inrange(int(n)): o = z url = 'https://anime-pictures.net/pictures/view_posts/{}'.format(o) pic = 'https://anime-pictures.net/pictures/view_post/' down = 'https://anime-pictures.net' main(url,pic,down)