From b66496c6400dc972486c574d2c6e5153b8588dcc Mon Sep 17 00:00:00 2001 From: Dominik Date: Wed, 31 Mar 2021 23:39:30 +0200 Subject: [PATCH] Final Update -> fixed the "title" variable --- README.md | 4 ++-- app/aneks.hs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5c871a5..619812c 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,11 @@ # Zadanie "robot haskell" ## Przykładowy output: -```ShadowItem {url = Just "https://aneks.kulturaliberalna.pl/wp-content/uploads/2016/02/51%C3%94%C3%87%C3%B452-With-Watermark.pdf", title = "\nNr 51\8211\&52 1988\n Wy\347wietl ca\322y numer (PDF)", itype = "periodical", originalDate = Just "2016", creator = Nothing, format = Just "pdf", lang = Just "pol", finalUrl = "https://aneks.kulturaliberalna.pl/wp-content/uploads/2016/02/51%C3%94%C3%87%C3%B452-With-Watermark.pdf", description = Nothing}``` +```ShadowItem {url = Just "https://aneks.kulturaliberalna.pl/wp-content/uploads/2016/02/51%C3%94%C3%87%C3%B452-With-Watermark.pdf", title = "Aneks Nr 51\8211\&52 1988", itype = "periodical", originalDate = Just "2016", creator = Nothing, format = Just "pdf", lang = Just "pol", finalUrl = "https://aneks.kulturaliberalna.pl/wp-content/uploads/2016/02/51%C3%94%C3%87%C3%B452-With-Watermark.pdf", description = Nothing}``` ## Ekstrakcja info o .pdf ze strony https://aneks.kulturaliberalna.pl/archiwum-aneksu/: ```haskell extractRecords = extractLinksWithText "//a[contains(@title,'Aneks') and contains(text(),'Nr')]" - >>> second (arr $ replace "\r\n " " ") + >>> second (arr $ replace "\n" "") >>> first (extractLinksWithText "//div/a[contains(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego -- ostatecznie wyjdą trójki? ((Link, tekst: "Wyświetl cały numer"), Numer Magazynu)``` diff --git a/app/aneks.hs b/app/aneks.hs index f24a270..8c650e0 100644 --- a/app/aneks.hs +++ b/app/aneks.hs @@ -13,7 +13,7 @@ import Text.Printf extractRecords = extractLinksWithText "//a[contains(@title,'Aneks') and contains(text(),'Nr')]" - >>> second (arr $ replace "\r\n " " ") + >>> second (arr $ replace "\n" "") >>> first (extractLinksWithText "//div/a[contains(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego -- ostatecznie wyjdą trójki? ((Link, tekst: "Wyświetl cały numer"), Numer Magazynu) @@ -27,7 +27,7 @@ toShadowItem ((url, blank), yearlyTitle) = format = Just "pdf", finalUrl = url } - where title = yearlyTitle -- ++ " " ++ blank + where title = "Aneks " ++ yearlyTitle date = getDate url getDate url =