目录

保存一下京东爬虫代码


目录

大强子哥的京东是一个很好的数据源,他家的京东不像淘宝这么坑,为了守着点数据还投毒,还要搞phantomjs的特征识别,对比一下京东对爬虫真的是很友好。

那么爬数据也是很简单的事了,以下给出的代码也很简单,放这里留个备份,利用了一个go-spider的爬虫框架,但是这个框架的pipe应该是有bug的,入口是https://www.jd.com/allSort.aspx 下的二级页面,经过分析,请求的商品价格等API地址是这个(http://pm.3.cn/prices/pcpmgets?type=1&skuIds=%v&origin=2 ) 输出是CSV格式文档。

代码如下,爬个手机笔记本电脑

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
//
package main

import (
	"encoding/csv"
	"fmt"
	"io/ioutil"
	"net/http"
	"net/url"
	"os"
	"strconv"
	"strings"

	"github.com/PuerkitoBio/goquery"
	"github.com/bitly/go-simplejson"
	"github.com/hu17889/go_spider/core/common/com_interfaces"
	"github.com/hu17889/go_spider/core/common/page"
	"github.com/hu17889/go_spider/core/common/page_items"
	"github.com/hu17889/go_spider/core/pipeline"
	"github.com/hu17889/go_spider/core/spider"
)

const (
	helpUrl     = "https://list.jd.com/"
	queryFmtUrl = "http://pm.3.cn/prices/pcpmgets?type=1&skuIds=%v&origin=2"
)

var filed = []string{"name", "sku", "price", "price_m", "price_op"}

type HelperPageProcesser struct {
	param interface{}
}

func NewHelperPageProcesser() *HelperPageProcesser {
	return &HelperPageProcesser{}
}

type ContentPageProcesser struct {
	pipe *ContentConsole
}

func NewContentPageProcesser() *ContentPageProcesser {
	return &ContentPageProcesser{}
}

type ContentConsole struct {
	chanOutput chan interface{}
	filePath   string
	csvHead    []string
	w          *csv.Writer
}

func NewContentConsole(chanNum int, filename string) *ContentConsole {
	outPath, _ := os.Getwd()
	outPath = outPath + string(os.PathSeparator) + filename
	return &ContentConsole{chanOutput: make(chan interface{}, chanNum), filePath: outPath}
}

func (this *ContentConsole) InitOutput(head []string) {
	f, err := os.Create(this.filePath)
	if err != nil {
		return
	}
	f.WriteString("\xEF\xBB\xBF") // Write UTF-8 BOM
	w := csv.NewWriter(f)
	w.Write(head)
	w.Flush()
	this.w = w
	this.csvHead = head

	go func() {
		for {
			var wbuff []string
			item := <-this.chanOutput
			if v, ok := item.(*simplejson.Json); ok {
				for _, heads := range this.csvHead {
					tmpstr, _ := v.Get(heads).String()
					wbuff = append(wbuff, tmpstr)
				}
			}
			this.w.Write(wbuff)
			this.w.Flush()
		}
	}()
}

//pipe 输出有bug,不能获取到所有添加项,因此不在这里默认处理输出
func (this *ContentConsole) Process(items *page_items.PageItems, t com_interfaces.Task) {
	//println("----------------------------------------------------------------------------------------------")
	//println("Crawled url :\t" + items.GetRequest().GetUrl() + "\n")
	//println("Crawled result : ")
	//	newjs := simplejson.New()
	//	for key, value := range items.GetAll() {
	//		newjs.Set(key, value)
	//		println(key + "\t:\t" + value)
	//	}
	//	this.chanOutput <- newjs

}

func (this *HelperPageProcesser) Process(p *page.Page) {
	query := p.GetHtmlParser()
	var urls []string
	maxnum := 0
	var hrefurl string
	query.Find("div[class=p-wrap] a").Each(func(i int, s *goquery.Selection) {
		num, err := strconv.Atoi(s.Text())
		if err == nil {
			if hrefurl == "" {
				href, _ := s.Attr("href")
				hrefurl = helpUrl + href
			}
			if num > maxnum {
				maxnum = num
			}
		}
	})
	listurl, _ := url.Parse(hrefurl)
	queryvalues := listurl.Query()

	for i := 1; i <= maxnum; i++ {
		queryvalues["page"] = []string{strconv.Itoa(i)}
		urls = append(urls, helpUrl+strings.TrimLeft(listurl.Path, "/")+"?"+url.Values(queryvalues).Encode())
	}

	pageProcess := NewContentPageProcesser()
	pageProcess.pipe, _ = this.param.(*ContentConsole)
	spider.NewSpider(pageProcess, "ContentTask").
		AddUrls(urls, "html").
		AddPipeline(pageProcess.pipe).
		SetThreadnum(5).
		Run()
}

func (this *ContentPageProcesser) Process(p *page.Page) {
	query := p.GetHtmlParser()
	query.Find("li[class=gl-item]").Each(func(i int, s *goquery.Selection) {
		skusel := s.Find("div")
		pname := s.Find("em")
		datasku, _ := skusel.Attr("data-sku")
		queryUrl := fmt.Sprintf(queryFmtUrl, datasku)

		client := &http.Client{}
		req, err := http.NewRequest("GET", queryUrl, nil)
		if err != nil {
			fmt.Println(err)
			return
		}
		resp, err := client.Do(req)
		if err != nil {
			fmt.Println(err)
			return
		}

		defer resp.Body.Close()

		body, err := ioutil.ReadAll(resp.Body)
		if err != nil {
			fmt.Println(err)
			return
		}

		jsContent, _ := simplejson.NewJson([]byte(body))
		jsInner := jsContent.GetIndex(0)

		//p.AddField(filed[0], pname.Text())
		//p.AddField(filed[1], datasku)
		//p.AddField(filed[2], jsInner.Get("p").MustString())
		//p.AddField(filed[3], jsInner.Get("m").MustString())
		//p.AddField(filed[4], jsInner.Get("op").MustString())

		newjs := simplejson.New()
		newjs.Set(filed[0], pname.Text())
		newjs.Set(filed[1], datasku)
		newjs.Set(filed[2], jsInner.Get("p").MustString())
		newjs.Set(filed[3], jsInner.Get("m").MustString())
		newjs.Set(filed[4], jsInner.Get("op").MustString())

		go func() {
			this.pipe.chanOutput <- newjs
		}()

		fmt.Print(pname.Text())
		fmt.Println(datasku)
		fmt.Println(jsInner.Get("p").MustString())
	})
}

func (this *ContentPageProcesser) Finish() {
}

func (this *HelperPageProcesser) Finish() {
	fmt.Printf("end spider \r\n")
}

func main() {
	contentPipe := NewContentConsole(100, "info.cvs")
	contentPipe.InitOutput(filed)
	helpProcess := NewHelperPageProcesser()
	helpProcess.param = contentPipe
	spider.NewSpider(helpProcess, "TaskName").
		//AddUrl("https://list.jd.com/list.html?cat=1319,1527,1557", "html").
		AddUrls([]string{"https://list.jd.com/list.html?cat=9987,653,655",
								"https://list.jd.com/list.html?cat=670,671,672"}, "html"). // start url, html is the responce type ("html" or "json")
		AddPipeline(pipeline.NewPipelineConsole()). // print result on screen
		SetThreadnum(1).                            // crawl request by three Coroutines
		Run()
}