Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Manutenção] Novo raspador para Coruripe-AL #1317

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 56 additions & 0 deletions data_collection/gazette/spiders/al/al_coruripe.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
from datetime import date, datetime

from scrapy import Request, Selector

from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider


class AlCoruripeSpider(BaseGazetteSpider):
name = "al_coruripe"
TERRITORY_ID = ""
Copy link

@firefueled firefueled Dec 3, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

O TERRITORY_ID é obrigatório. Note que o campo vazio aqui resulta numa coluna vazia correspondente no csv.
Veja na documentação tudo o que precisa fazer para garantir que o raspador seja integrado.

allowed_domains = ["diario.coruripe.al.gov.br"]
start_urls = ["https://diario.coruripe.al.gov.br"]
start_date = date(2021, 12, 3)
BASE_URL = "https://diario.coruripe.al.gov.br"
is_extra_list = []

def start_requests(self):
url = f"{self.BASE_URL}/busca?term=&onde=tudo&data=qualquer&jornal=extra"
yield Request(url=url, method="GET", callback=self.parse)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nota pro futuro: Os valores que definiu para method e callback são os padrões, então não precisava ter passado eles explicitamente :)


def parse(self, response):
items = response.css(".accordion-item").getall()
for item in items:
item_selector = Selector(text=item)
self.is_extra_list.append(
item_selector.xpath(
'//button[@class="accordion-button"]/text()'
).re_first(r"nº\r\n\s*(\d+)")
)

url = f"{self.BASE_URL}/busca?term=&onde=tudo&data=qualquer&jornal=tudo"
yield Request(url=url, method="GET", callback=self._get_all_gazette)

def _get_all_gazette(self, response):
items = response.css(".accordion-item").getall()
for item in items:
item_selector = Selector(text=item)
date_str = item_selector.xpath(
'//div[@class="me-auto p-2 bd-highlight"]/text()'
).re_first(r"\d{2}\/\d{2}\/\d{4}")
date = datetime.strptime(date_str, "%d/%m/%Y").date()
file_urls = [item_selector.xpath("//a/@href").get()]
edition_number = item_selector.xpath(
'//button[@class="accordion-button"]/text()'
).re_first(r"nº\r\n\s*(\d+)")
scraped_at = datetime.now()
yield Gazette(
date=date,
edition_number=edition_number,
is_extra_edition=edition_number in self.is_extra_list,
file_urls=file_urls,
power="executive",
scraped_at=scraped_at,
territory_id="2702306",
)
Loading