是一个Python的HTTP库,用于发送HTTP请求,获取相应。
主要用于测试服务端的响应,爬虫功能。
pip install requests
requests.请求方式(请求URL)
请求之后的响应对象Response
Response.status_code:状态码
Response.headers:响应头
Response.text:正文字符串
Response.encoding: 响应内容的编码
Response.request: 包含了请求数据的对象
Response.request.url: 这个响应对象的请求url
Response.request.method: 响应对象的请求方法
Response.request.headers: 请求头
Response.json(): 能够获取json数据
Response.cookies:存放响应cookies
import requests
def get_baidu():
# 获取百度首页的HTML响应内容
# 请求方式:GET
# 请求资源URL:https://www.baidu.com/
url = 'https://www.baidu.com/'
# 发送GET请求
resp = requests.get(url)
# Response.status_code:状态码
print(resp.status_code)
# Response.headers:响应头
print(resp.headers)
# Response.text:正文字符串
print(resp.text)
def get_xiaohua():
xiaohua_url = 'https://www.xiaohua.com/article/'
resp = requests.get(xiaohua_url)
print(resp.status_code)
print(resp.headers)
print(resp.text)
def get_oxiaohua():
url = 'http://ogaoxiao.com/'
resp = requests.get(url)
# 使用utf-8格式重新编码响应内容
resp.encoding = 'utf-8'
# 在响应字符串内容中查找子串,验证响应内容是否正确
print(resp.text.find('排行榜'))
if __name__ == '__main__':
get_oxiaohua()
import requests
def get_bing_zhoujielun():
# 请求URL: https://cn.bing.com/search?q=%E5%91%A8%E6%9D%B0%E4%BC%A6&form=QBLH&sp=-1&pq=%E5%91%A8%E6%9D%B0%E4%BC%A6&sc=9-3&qs=n&sk=&cvid=B9CB5864C86041E6877E8681F20AB728
url = 'https://cn.bing.com/search?q=%E5%91%A8%E6%9D%B0%E4%BC%A6&form=QBLH&sp=-1&pq=%E5%91%A8%E6%9D%B0%E4%BC%A6&sc=9-3&qs=n&sk=&cvid=B9CB5864C86041E6877E8681F20AB728'
# 方法是get
resp = requests.get(url)
print(resp.status_code)
# print(resp.text)
# Response.request.url用于获取请求的URL
print(resp.request.url)
# Response.request.method 用于获取请求的方式
print(resp.request.method)
def get_bing_wanglihong():
# 请求URL
aaaaurl = 'https://cn.bing.com/search'
# 构建一个参数字典
params = {
'q': '周杰伦',
'pq': '周杰伦'
}
# 将请求字典参数传给params
resp = requests.get(aaaaurl, params=params)
print(resp.status_code)
print(resp.request.url)
def get_xiaobudian_1():
url = 'https://www.xiaoso.net/m/search?wd=%E8%AF%BB%E8%80%85'
resp = requests.get(url)
print(resp.status_code)
print(resp.request.url)
print(resp.text)
def get_xiaobudian_2():
url = 'https://www.xiaoso.net/m/search'
# 构建参数字典
a = {
'wd': '读者'
}
resp = requests.get(url, params=a)
print(resp.status_code)
print(resp.request.url)
print(resp.text)
if __name__ == '__main__':
# get_bing_zhoujielun()
# get_bing_wanglihong()
# get_xiaobudian_1()
get_xiaobudian_2()
import requests
if __name__ == '__main__':
# 解决问题:使用requests以及fiddler代理工具,可能出现的响应异常
# 使用代理: proxies关键字
px = {
'http': 'http://127.0.0.1:8888',
'https': 'http://127.0.0.1:8888'
}
url = 'http://xawn.f3322.net:8060/woniusales/'
resp = requests.get(url, proxies=px, verify=False)
print(resp.status_code)
构建一个headers字典,传递给headers参数
import requests
if __name__ == '__main__':
url = 'http://xawn.f3322.net:8060/woniusales/'
# 构建一个请求头的字典,然后传给headers关键字参数
h = {
'User-Agent': 'Chrome/100.0.4896.127'
}
resp = requests.get(url, headers=h)
print(resp.status_code)
import requests
def get_woniusales():
url = 'http://xawn.f3322.net:8060/woniusales/user/login'
# headers = {
# 'Content-Type': 'application/json'
# }
# 请求方式post
# post请求构建参数字典,传给data关键字
data = {
'username': 'admin',
'password': 'Milor123',
'verifycode': '0000'
}
resp = requests.post(url, data=data)
print(resp.status_code)
print(resp.text)
def search_stock():
url = 'http://xawn.f3322.net:8060/woniusales/query/zerostored'
data = {
'page': 1
}
resp = requests.post(url, data=data)
print(resp.status_code)
print(resp.text)
if __name__ == '__main__':
# get_woniusales()
search_stock()
json:
字符串: “string”
数值: 10
布尔值: true, false
None: null
列表: [1, 2, 3]
对象: {
“name”: “wanglihong”,
“age”: 20
}
import requests
import json
def study_json():
url = 'http://xawn.f3322.net:8060/woniusales/goods/querybatch'
# 请求方式是post
data = {
'batchname': 'GB20222228'
}
resp = requests.post(url, data=data)
print(resp.status_code)
print(resp.text)
print(type(resp.text))
resp_data = json.loads(resp.text)
print(resp_data)
for item in resp.text:
print(item)
print('*********************')
for item in resp_data:
print(item)
print('*********************')
print('*********************')
print(resp_data[0]['costunitprice'])
print('*********************')
print('*********************')
print('*********************')
# json方法获取响应的json化数据
json_data = resp.json()
print(json_data[0]['quantity'])
if __name__ == '__main__':
study_json()
import requests
def test_boss():
url = 'http://192.168.184.128:8080/WoniuBoss/validate/evaluateStu'
# 自定义header中传入我们抓包获取的cookie
headers = {
'Cookie': 'JSESSIONID=1B5CFE59F0C488D7C05243310AD10E39; workId=WNCQ02'
'9; _jfinal_captcha=667284b851ab4791919f2b1217417a23; token='
'782DD675859C54149779972C309BFBF5'
}
resp = requests.post(url, headers=headers)
print(resp.status_code)
print(resp.text)
if __name__ == '__main__':
test_boss()
import requests
def test_boss():
login_url = 'http://192.168.184.128:8080/WoniuBoss/log/userLogin'
# 请求方式post
login_data = {
'userName': 'wncq029',
'userPass': 'woniu123',
'checkcode': '0000'
}
resp = requests.post(login_url, data=login_data)
cookies = resp.cookies
query_url = 'http://192.168.184.128:8080/WoniuBoss/validate/evaluateStu'
# 自定义header中传入我们抓包获取的cookie
# headers = {
# 'Cookie': 'JSESSIONID=1B5CFE59F0C488D7C05243310AD10E39; workId=WNCQ02'
# '9; _jfinal_captcha=667284b851ab4791919f2b1217417a23; token='
# '782DD675859C54149779972C309BFBF5'
# }
# 将响应cookies对象传给cookies
resp = requests.post(query_url, cookies=cookies)
print(resp.status_code)
print(resp.text)
if __name__ == '__main__':
test_boss()
import requests
def test_boss():
# 使用session管理cookie
session = requests.Session()
login_url = 'http://192.168.184.128:8080/WoniuBoss/log/userLogin'
login_data = {
'userName': 'wncq029',
'userPass': 'woniu123',
'checkcode': '0000'
}
resp = session.post(login_url, data=login_data)
print(resp.status_code)
query_url = 'http://192.168.184.128:8080/WoniuBoss/validate/evaluateStu'
resp = session.post(query_url)
print(resp.status_code)
print(resp.text)
session.close()
if __name__ == '__main__':
test_boss()
发送带文件的请求,是在发送请求时使用files关键字
import requests
def upload_file_1():
url = 'http://xawn.f3322.net:8060/woniusales/goods/upload'
data = {
'batchname': 'GB20220496'
}
headers = {
'Cookie': 'JSESSIONID=90150F62F118D6EF3A9CED61AD6B4335; _jfinal_captcha=c09f81ff82ea4807ac952afbce05ec16; username=admin; password=Milor123'
}
files = {'batchfile': open(r'C:\Users\he\Downloads\销售出库单-20171020-Test.xls', 'rb')}
resp = requests.post(url, data=data, files=files, headers=headers)
print(resp.text, resp.status_code)
def upload_file_2():
url = 'http://xawn.f3322.net:8060/woniusales/goods/upload'
headers = {
'Cookie': 'JSESSIONID=90150F62F118D6EF3A9CED61AD6B4335; _jfinal_captcha=c09f81ff82ea4807ac952afbce05ec16; username=admin; password=Milor123'
}
files = {
'batchname': (None, 'GB20220497'),
'batchfile': open(r'C:\Users\he\Downloads\销售出库单-20171020-Test.xls', 'rb')
}
resp = requests.post(url, files=files, headers=headers)
print(resp.text, resp.status_code)
if __name__ == '__main__':
# upload_file_1()
upload_file_2()
allow_redirects: 允许重定向
import requests
if __name__ == '__main__':
url = 'http://xawn.f3322.net:8060/woniusales'
# allow_redirects: 允许重定向
resp = requests.get(url, allow_redirects=False)
print(resp.status_code)
print(resp.text)
print(resp.url)