搞懂Python 爬虫抓取、分析、存储三要素和具体操作
抓取
分析
存储
import urllib.request
# 网站是https访问的需要引入ssl模块
import ssl
# 导入ssl时关闭证书验证
ssl._create_default_https_context = ssl._create_unverified_context
response = urllib.request.urlopen('https://www.lidihuo.com/python/spider-test.html')
print(response.read().decode('utf-8'))
<html lang="en">
<head>
<meta charset="UTF-8">
<title>python spider </title>
</head>
<body>
Hello,I am lidihuo
Welcome to Python spider
</body>
</html>
import urllib.request
# 网站是https访问的需要引入ssl模块
import ssl
# 导入ssl时关闭证书验证
ssl._create_default_https_context = ssl._create_unverified_context
url = 'https://www.lidihuo.com/python/spider-test.html'
url = url + '?' + key + '=' + value1 + '&' + key2 + '=' + value2
response = urllib.request.urlopen(url)
# print(response.read().decode('utf-8'))
import requests
# get请求
response = requests.get(url='https://www.lidihuo.com/python/spider-test.html')
print(response.text)
# 带参数的requests get请求 # response = requests.get(url='https://www.lidihuo.com/python/spider-test.html', params={'key1':'value1', 'key2':'value2'})
<html lang="en">
<head>
<meta charset="UTF-8">
<title>python spider </title>
</head>
<body>
Hello,I am lidihuo
Welcome to Python spider
</body>
</html>
params = {'username': 'root', 'passwd': 'root'}
response = requests.post("http:xxx.com/login", data=params)
for key,value in response.cookies.items():
print('key = ', key + ' ||| value :'+ value)
import urllib.request
import http.cookiejar
import http.cookiejar
# 网站是https访问的需要引入ssl模块
import ssl
# 导入ssl时关闭证书验证
ssl._create_default_https_context = ssl._create_unverified_context
"""
保存登录的cookie
"""
"""
MozillaCookieJar : cookiejar的子类
从FileCookieJar派生而来,创建与Mozilla浏览器 cookies.txt兼容的FileCookieJar实例。
"""
cookie = http.cookiejar.MozillaCookieJar('cookie.txt')
# 构建一个cookie的处理器
handler = urllib.request.HTTPCookieProcessor(cookie)
# 获取一个opener对象
opener = urllib.request.build_opener(handler)
# # 获取一个请求对象
request = urllib.request.Request('https://www.lidihuo.com/python/spider-test.html',headers={"Connection": "keep-alive"})
# 请求服务器,获取响应对象。cookie会在response里一起响应
response = opener.open(request)
# 保存cookie到文件
cookie.save(ignore_discard=True, ignore_expires=True)
"""
请求携带文件中的cookie
"""
import urllib.request
import http.cookiejar
# 网站是https访问的需要引入ssl模块
import ssl
# 导入ssl时关闭证书验证
ssl._create_default_https_context = ssl._create_unverified_context
cookie = http.cookiejar.MozillaCookieJar()
cookie.load('cookie.txt', ignore_discard=True, ignore_expires=True)
handler = urllib.request.HTTPCookieProcessor(cookie)
opener = urllib.request.build_opener(handler)
request = urllib.request.Request('https://www.lidihuo.com/python/spider-test.html')
html = opener.open(request).read().decode('utf-8')
print(html)
<html lang="en">
<head>
<meta charset="UTF-8">
<title>python spider </title>
</head>
<body>
Hello,I am lidihuo
Welcome to Python spider
</body>
</html>
# 设置请求头信息
headers = {
'Host': 'https://www.lidihuo.com',
'Referer': 'https://www.lidihuo.com/python/spider-test.html',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
}
response = requests.get("http://www.xxxxx.com", headers=headers)
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
Accept-Encoding: gzip, deflate, br
Accept-Language: zh-CN,zh;q=0.9,en;q=0.8
Cache-Control: max-age=0
Connection: keep-alive
Cookie: OUTFOX_SEARCH_USER_ID_NCOO=1091135338.9776888; JSESSIONID=FD8597A7CE469BD49E3BB2696FFA92EA
Host: www.lidihuo.com
If-Modified-Since: Mon, 27 Jul 2020 03:42:05 GMT
If-None-Match: W/"5f1e4d0d-81dc"
Sec-Fetch-Dest: document
Sec-Fetch-Mode: navigate
Sec-Fetch-Site: none
Sec-Fetch-User: ?1
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36
proxies = {
"http": "http://ip地址",
"https": "http://ip地址",
}
response =requests.get("http://www.xxxxx.com",proxies=random.choices(proxies))
import time
time.sleep(1)
User-agent: Baiduspider
Disallow: /
User-agent: baiduspider
Disallow: /
import json
dictObj = {
'aa':{
'age': 20,
'city': 'beijing',
},
'bb': {
'age': 24,
'city': 'shanghai',
}
}
jsObj = json.dumps(dictObj, ensure_ascii=False)
fileObject = open('jsonFile.json', 'w')
fileObject.write(jsObj)
fileObject.close()
import csv
with open('student.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['姓名', '年龄', '城市'])
writer.writerows([['小明', 20 , '北京'],['杰克', 22, '上海']])
# mongo服务
client = pymongo.MongoClient('mongodb://127.0.0.1:27017/')
# lidihuo数据库
db = client.lidihuo
# student表,没有自动创建
student_db = db.student
student_json = {
'name': '小明',
'age': 20,
'city': '北京'
}
student_db.insert(student_json)