nodejs做个爬虫爬取腾讯动漫内容简单实现
作者:紫气东来_姜波
这篇文章主要为大家介绍了nodejs做个爬虫爬取腾讯动漫内容简单实现,有需要的朋友可以借鉴参考下,希望能够有所帮助,祝大家多多进步,早日升职加薪
首先上package.json
{ "name": "Spider", "version": "1.0.0", "description": "spider ", "main": "index.js", "dependencies": { "async": "^1.5.0", "cheerio": "^0.19.0", "eventproxy": "^0.3.4", "superagent": "^1.4.0" }, "devDependencies": {}, "scripts": { "test": "node index", "start": "node server.js" } }
server.js
var http = require("http"); var cheerio = require("cheerio"); var fs = require('fs'); //Utility function that downloads a URL and invokes //callback with the data. function downloadPage(url, callback) { http.get(url, function(res) { var data = ""; res.on('data', function(chunk) { data += chunk; }); res.on("end", function() { callback(data); }); }).on("error", function() { callback(null); }); } function start() { var url = 'http://ac.qq.com/Comic/index/type/4/page/'; var url2 = 'http://ac.qq.com/ComicView/index/id/549690/cid/1'; var arr = []; for (var i = 1; i < 13; i++) { downloadPage(url + i, function(data) { if (data) { var $ = cheerio.load(data); $("div.ret-search-result > ul > li.ret-search-item").each(function(i, e) { var json = {}; json.tags = []; json.img = $(e).find('img').attr('data-original'); json.link = $(e).find('a.mod-cover-list-thumb').attr('href'); json.id = json.link.split('/').reverse()[0]; json.title = $(e).find('h3.ret-works-title > a').text(); json.author = $(e).find('p.ret-works-author').text(); json.popular = $(e).find('p.ret-works-tags> span > em').text(); json.description = $(e).find('p.ret-works-decs').text(); $(e).find('p.ret-works-tags>a').each(function(i, e) { json.tags.push($(e).text()); }); downloadImg(json.img); arr.push(json) console.log("done"); // console.log(arr) // fs.writeFileSync('./output.json', JSON.stringify(arr)); // }); }) } }) } } function downloadImg(url) { console.log('string') http.get(url, function(res) { var imgData = ""; res.setEncoding("binary"); //一定要设置response的编码为binary否则会下载下来的图片打不开 res.on("data", function(chunk) { imgData += chunk; }); res.on("end", function() { var d = new Date(); fs.writeFile("./downImgs/" + Math.floor(Math.random() * 10000000) + '.jpg', imgData, "binary", function(err) { if (err) { console.log(err); } console.log("down success"); }); }); }); } exports.start = start;
最后index.js
var server = require("./server"); server.start();
说明
- 引入必须的模块,http, cheerio, fs
- downloadPage函数接收URL,并在回调里处理数据。
- start函数里,定义url数据源,这里用的是腾讯动漫.
- for循环处理url数据内容,里面的downloadImg函数,即保存图片到本地。
以上就是nodejs做个爬虫爬取腾讯动漫内容简单实现的详细内容,更多关于nodejs爬虫爬取腾讯动漫的资料请关注脚本之家其它相关文章!