Skip to content

Commit fc62ca2

Browse files
author
CodeJCSON
committedNov 28, 2017
Scrapy框架进行Spiders简单爬虫
1 parent b368faf commit fc62ca2

File tree

15 files changed

+3376
-0
lines changed

15 files changed

+3376
-0
lines changed
 

‎.DS_Store

0 Bytes
Binary file not shown.

‎teacherInfo/.idea/misc.xml

Lines changed: 4 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

‎teacherInfo/.idea/modules.xml

Lines changed: 8 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

‎teacherInfo/.idea/teacherInfo.iml

Lines changed: 11 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

‎teacherInfo/.idea/workspace.xml

Lines changed: 389 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

‎teacherInfo/begin.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
from scrapy import cmdline
2+
cmdline.execute('scrapy crawl myteacher'.split())

‎teacherInfo/scrapy.cfg

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Automatically created by: scrapy startproject
2+
#
3+
# For more information about the [deploy] section see:
4+
# https://scrapyd.readthedocs.org/en/latest/deploy.html
5+
6+
[settings]
7+
default = teacherInfo.settings
8+
9+
[deploy]
10+
#url = http://localhost:6800/
11+
project = teacherInfo

‎teacherInfo/teacher.json

Lines changed: 2716 additions & 0 deletions
Large diffs are not rendered by default.

‎teacherInfo/teacherInfo/__init__.py

Whitespace-only changes.

‎teacherInfo/teacherInfo/items.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define here the models for your scraped items
4+
#
5+
# See documentation in:
6+
# http://doc.scrapy.org/en/latest/topics/items.html
7+
8+
import scrapy
9+
10+
# Item 定义结构化数据字段,用来保存爬取到的数据
11+
class TeacherinfoItem(scrapy.Item):
12+
13+
# 获取名字
14+
name = scrapy.Field()
15+
# 职称
16+
position = scrapy.Field()
17+
# 个人信息
18+
info = scrapy.Field()
19+
20+
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define here the models for your spider middleware
4+
#
5+
# See documentation in:
6+
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
7+
8+
from scrapy import signals
9+
10+
11+
class TeacherinfoSpiderMiddleware(object):
12+
# Not all methods need to be defined. If a method is not defined,
13+
# scrapy acts as if the spider middleware does not modify the
14+
# passed objects.
15+
16+
@classmethod
17+
def from_crawler(cls, crawler):
18+
# This method is used by Scrapy to create your spiders.
19+
s = cls()
20+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
21+
return s
22+
23+
def process_spider_input(self, response, spider):
24+
# Called for each response that goes through the spider
25+
# middleware and into the spider.
26+
27+
# Should return None or raise an exception.
28+
return None
29+
30+
def process_spider_output(self, response, result, spider):
31+
# Called with the results returned from the Spider, after
32+
# it has processed the response.
33+
34+
# Must return an iterable of Request, dict or Item objects.
35+
for i in result:
36+
yield i
37+
38+
def process_spider_exception(self, response, exception, spider):
39+
# Called when a spider or process_spider_input() method
40+
# (from other spider middleware) raises an exception.
41+
42+
# Should return either None or an iterable of Response, dict
43+
# or Item objects.
44+
pass
45+
46+
def process_start_requests(self, start_requests, spider):
47+
# Called with the start requests of the spider, and works
48+
# similarly to the process_spider_output() method, except
49+
# that it doesn’t have a response associated.
50+
51+
# Must return only requests (not items).
52+
for r in start_requests:
53+
yield r
54+
55+
def spider_opened(self, spider):
56+
spider.logger.info('Spider opened: %s' % spider.name)

‎teacherInfo/teacherInfo/pipelines.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Define your item pipelines here
4+
#
5+
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
6+
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
7+
8+
import json
9+
import codecs
10+
class TeacherinfoPipeline(object):
11+
def __init__(self):
12+
self.filename = codecs.open('teacher.json','wb','utf-8')
13+
def process_item(self, item, spider):
14+
print(item)
15+
html = json.dumps(dict(item),ensure_ascii=False)
16+
self.filename.write(html + '\n')
17+
return item
18+
19+
def open_spider(self, spider):
20+
pass
21+
# self.filename.close()

‎teacherInfo/teacherInfo/settings.py

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
# -*- coding: utf-8 -*-
2+
3+
# Scrapy settings for teacherInfo project
4+
#
5+
# For simplicity, this file contains only settings considered important or
6+
# commonly used. You can find more settings consulting the documentation:
7+
#
8+
# http://doc.scrapy.org/en/latest/topics/settings.html
9+
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
10+
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
11+
12+
BOT_NAME = 'teacherInfo'
13+
14+
SPIDER_MODULES = ['teacherInfo.spiders']
15+
NEWSPIDER_MODULE = 'teacherInfo.spiders'
16+
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:56.0)'
17+
18+
# Crawl responsibly by identifying yourself (and your website) on the user-agent
19+
#USER_AGENT = 'teacherInfo (+http://www.yourdomain.com)'
20+
21+
# Obey robots.txt rules
22+
ROBOTSTXT_OBEY = True
23+
24+
# Configure maximum concurrent requests performed by Scrapy (default: 16)
25+
#CONCURRENT_REQUESTS = 32
26+
27+
# Configure a delay for requests for the same website (default: 0)
28+
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
29+
# See also autothrottle settings and docs
30+
#DOWNLOAD_DELAY = 3
31+
# The download delay setting will honor only one of:
32+
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
33+
#CONCURRENT_REQUESTS_PER_IP = 16
34+
35+
# Disable cookies (enabled by default)
36+
#COOKIES_ENABLED = False
37+
38+
# Disable Telnet Console (enabled by default)
39+
#TELNETCONSOLE_ENABLED = False
40+
41+
# Override the default request headers:
42+
#DEFAULT_REQUEST_HEADERS = {
43+
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
44+
# 'Accept-Language': 'en',
45+
#}
46+
47+
# Enable or disable spider middlewares
48+
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
49+
SPIDER_MIDDLEWARES = {
50+
'teacherInfo.middlewares.TeacherinfoSpiderMiddleware': 543,
51+
}
52+
53+
# Enable or disable downloader middlewares
54+
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
55+
#DOWNLOADER_MIDDLEWARES = {
56+
# 'teacherInfo.middlewares.MyCustomDownloaderMiddleware': 543,
57+
#}
58+
59+
# Enable or disable extensions
60+
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
61+
#EXTENSIONS = {
62+
# 'scrapy.extensions.telnet.TelnetConsole': None,
63+
#}
64+
65+
# Configure item pipelines
66+
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
67+
ITEM_PIPELINES = {
68+
'teacherInfo.pipelines.TeacherinfoPipeline': 300,
69+
}
70+
71+
# Enable and configure the AutoThrottle extension (disabled by default)
72+
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
73+
#AUTOTHROTTLE_ENABLED = True
74+
# The initial download delay
75+
#AUTOTHROTTLE_START_DELAY = 5
76+
# The maximum download delay to be set in case of high latencies
77+
#AUTOTHROTTLE_MAX_DELAY = 60
78+
# The average number of requests Scrapy should be sending in parallel to
79+
# each remote server
80+
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
81+
# Enable showing throttling stats for every response received:
82+
#AUTOTHROTTLE_DEBUG = False
83+
84+
# Enable and configure HTTP caching (disabled by default)
85+
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
86+
#HTTPCACHE_ENABLED = True
87+
#HTTPCACHE_EXPIRATION_SECS = 0
88+
#HTTPCACHE_DIR = 'httpcache'
89+
#HTTPCACHE_IGNORE_HTTP_CODES = []
90+
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# This package will contain the spiders of your Scrapy project
2+
#
3+
# Please refer to the documentation for information on how to create and manage
4+
# your spiders.
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
# -*- coding: utf-8 -*-
2+
import scrapy
3+
from teacherInfo.items import TeacherinfoItem
4+
5+
class MyteacherSpider(scrapy.Spider):
6+
name = 'myteacher'
7+
allowed_domains = ['itcast.cn']
8+
# start_urls = ("http://www.itcast.cn/channel/teacher.shtml",) 元组也可以
9+
start_urls = ['http://www.itcast.cn/channel/teacher.shtml#ac',
10+
'http://www.itcast.cn/channel/teacher.shtml#acloud',
11+
'http://www.itcast.cn/channel/teacher.shtml#adesign',
12+
'http://www.itcast.cn/channel/teacher.shtml#ads',
13+
'http://www.itcast.cn/channel/teacher.shtml#ajavaee',
14+
'http://www.itcast.cn/channel/teacher.shtml#anetmarket',
15+
'http://www.itcast.cn/channel/teacher.shtml#aphp',
16+
'http://www.itcast.cn/channel/teacher.shtml#apm',
17+
'http://www.itcast.cn/channel/teacher.shtml#apython',
18+
'http://www.itcast.cn/channel/teacher.shtml#astack',
19+
'http://www.itcast.cn/channel/teacher.shtml#atest',
20+
'http://www.itcast.cn/channel/teacher.shtml#aui',
21+
'http://www.itcast.cn/channel/teacher.shtml#auijp',
22+
'http://www.itcast.cn/channel/teacher.shtml#aweb']
23+
# 爬虫的约束区域
24+
def parse(self, response):
25+
# 存放老师信息的集合
26+
items = []
27+
print(response.body)
28+
for each in response.xpath("//div[@class='li_txt']"):
29+
# 将我们得到的数据封装到一个 `ItcastItem` 对象
30+
item = TeacherinfoItem()
31+
# extract()方法返回的都是unicode字符串
32+
name = each.xpath("h3/text()").extract()
33+
position = each.xpath("h4/text()").extract()
34+
info = each.xpath("p/text()").extract()
35+
36+
# xpath返回的是包含一个元素的列表
37+
item['name'] = name[0]
38+
item['position'] = position[0]
39+
item['info'] = info[0]
40+
41+
items.append(item)
42+
yield item
43+
# 直接返回最后数据
44+
# return items

0 commit comments

Comments
 (0)
Please sign in to comment.