我想获得引荐锚文本链接。
我将如何从引荐URL获取传入的锚文本链接?

感谢您的时间!

from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

from wallspider.items import Website


class mydomainSpider(CrawlSpider):
    name = "mydomain"
    allowed_domains = ["www.mydomain"]
    start_urls = ["http://www.mydomain/cp/133162",]

    rules = (Rule (SgmlLinkExtractor(allow=('133162', ),deny=('/ip/', 'search_sort=', 'ic=60_0', 'customer_rating', 'special_offers', ),)
    , callback="parse_items", follow= True),
    )

    def parse_items(self, response):
        hxs = HtmlXPathSelector(response)
        sites = hxs.select('//*')
        items = []

        for site in sites:
            item = Website()
            item['referer'] = response.request.headers.get('Referer')
            item['url'] = response.url
            item['title'] = site.xpath('/html/head/title/text()').extract()
            item['description'] = site.select('//meta[@name="Description"]/@content').extract()
            items.append(item)

        return items


更新:以下是根据Guys建议的新代码:

from scrapy.contrib.spiders import CrawlSpider,Rule
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from wallspider.items import Website
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

class anchorspider(CrawlSpider):
    name = "anchor"
    allowed_domains = ["mydomain.com"]
    start_urls = ["http://www.mydomain.com/"]

    extractor = SgmlLinkExtractor()

    rules = (Rule (SgmlLinkExtractor(allow=('133162', ),deny=('/ip/', 'search_sort=', 'ic=60_0', 'customer_rating', 'special_offers', ),)
, callback="parse_items", follow= True),
)

    def parse_start_url(self, response):
        list(self.parse_links(response))

    def parse_links(self, response):
        hxs = HtmlXPathSelector(response)
        links = hxs.select('//a')
        for link in links:
            anchor_text = ''.join(link.select('./text()').extract())
            title = ''.join(link.select('./@title').extract())
            url = ''.join(link.select('./@href').extract())
            meta={'title':title,}
            meta={'anchor_text':anchor_text,}
            yield Request(url, callback = self.parse_page, meta=meta,)

    def parse_page(self, response):
        hxs = HtmlXPathSelector(response)
        item = Website()
        item['anchor_text']=response.meta['anchor_text']
        item['url'] = response.url
        item['title'] = response.meta['title']
        item['referer'] = response.request.headers.get('Referer')
        item['description'] = site.select('//meta[@name="Description"]/@content').extract()

        return item


我收到以下错误:引发ValueError('请求网址中缺少方案:%s'%self._url)

最佳答案

实际上响应对象中有response.meta.get('link_text')

关于python - 粗暴的传入 anchor 文本链接,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/20482684/

10-12 16:54