urllib.robotparser — 互联网爬虫控制
优质
小牛编辑
143浏览
2023-12-01
测试访问权限
# urllib_robotparser_simple.py
from urllib import parse
from urllib import robotparser
AGENT_NAME = 'PyMOTW'
URL_BASE = 'https://pymotw.com/'
parser = robotparser.RobotFileParser()
parser.set_url(parse.urljoin(URL_BASE, 'robots.txt'))
parser.read()
PATHS = [
'/',
'/PyMOTW/',
'/admin/',
'/downloads/PyMOTW-1.92.tar.gz',
]
for path in PATHS:
print('{!r:>6} : {}'.format(
parser.can_fetch(AGENT_NAME, path), path))
url = parse.urljoin(URL_BASE, path)
print('{!r:>6} : {}'.format(
parser.can_fetch(AGENT_NAME, url), url))
print()
Long-lived Spiders
# urllib_robotparser_longlived.py
from urllib import robotparser
import time
AGENT_NAME = 'PyMOTW'
parser = robotparser.RobotFileParser()
# Using the local copy
parser.set_url('file:robots.txt')
parser.read()
parser.modified()
PATHS = [
'/',
'/PyMOTW/',
'/admin/',
'/downloads/PyMOTW-1.92.tar.gz',
]
for path in PATHS:
age = int(time.time() - parser.mtime())
print('age:', age, end=' ')
if age > 1:
print('rereading robots.txt')
parser.read()
parser.modified()
else:
print()
print('{!r:>6} : {}'.format(
parser.can_fetch(AGENT_NAME, path), path))
# Simulate a delay in processing
time.sleep(1)
print()