Need To Extract Contents Of Subpages Using Scrapy

I'm fairly new to scrapy but have made a few simple scrapers work for me.

I'm trying to go to the next level by getting all the links from one page and scraping the content of the subpages. I've read up a few different examples and Q&As but can't seem to get this code to work for me.

import scrapy

from ..items import remoteworkhub_jobs

class remoteworkhub(scrapy.Spider):
    name = 'remoteworkhub'
    allowed_domains = ['']
    #start_urls = ['']
    start_urls = ['']

     # Scrape the individual job urls and pass them to the spider
    def parse(self, response):
        links = response.xpath('//a[@class="jobList-title"]/@href').extract()
        for jobs in links:
            base_url = ''
            Url = base_url + jobs
            yield scrapy.Request(Url, callback=self.parsejobpage)

    def parsejobpage(self, response):
            #Extracting the content using css selectors
            titles = response.xpath('//h1[@class="u-mv--remove u-textH2"]/text()').extract()
            companys = response.xpath('/html/body/div[4]/div/div/div[1]/div[1]/div[1]/div[2]/div[2]/div/div[1]/strong/a/text()').extract()
            categories = response.xpath('/html/body/div[4]/div/div/div[1]/div[1]/div[1]/div[3]/ul/li/a/text()').extract()
            worktype = response.xpath('/html/body/div[4]/div/div/div[1]/div[1]/div[1]/div[5]/div[2]/span/text()').extract()
            job_decription = response.xpath('//div[@class="job-body"]//text()').extract()

            #titles = response.css('.jobDetail-headerIntro::text').extract()
            #titles = response.xpath('//title').get()
            #votes = response.css('.score.unvoted::text').extract()
            #times = response.css('time::attr(title)').extract()
            #comments = response.css('.comments::text').extract()

            item = remoteworkhub_jobs()
            #item['jobUrl'] = jobUrl
            item['title'] = titles
            #item['company'] = companys
            #item['category'] = categories
            #item['worktype'] = worktype
            #item['job_description'] = job_decription

            #yield or give the scraped info to scrapy
            yield item


Check out the following implementation which should let you parse job title and their concerning company names from that site. The way you have defined xpaths are error prone. However, I've modified them so that they can work in the right way. Give it a shot:

import scrapy

class remoteworkhub(scrapy.Spider):
    name = 'remoteworkhub'
    start_urls = ['']

    def parse(self, response):
        for job_link in response.xpath("//*[contains(@class,'job-listing')]//*[@class='jobList-title']/@href").extract():
            Url = response.urljoin(job_link)
            yield scrapy.Request(Url, callback=self.parsejobpage)

    def parsejobpage(self, response):
        d = {}
        d['title'] = response.xpath("//*[@class='jobDetail-headerIntro']/h1/text()").get()
        d['company'] = response.xpath("//*[@class='jobDetail-headerIntro']//strong//text()").get()
        yield d

This is the kind of output I can see in the console if I use print instead of yield:

{'title': 'Sr Full Stack Developer, Node/React - Remote', 'company': 'Clevertech'}
{'title': 'Subject Matter Expert, Customer Experience - Remote', 'company': 'Qualtrics'}
{'title': 'Employee Experience Enterprise Account Executive - Academic and Government - Remote', 'company': 'Qualtrics'}
{'title': 'Senior Solutions Consultant, Brand Experience - Remote', 'company': 'Qualtrics'}
{'title': 'Data Analyst - Remote', 'company': 'Railsware'}
{'title': 'Recruitment Manager - Remote', 'company': 'Railsware'}