2022-04-16 21:07:17 +02:00
{- # LANGUAGE Arrows, NoMonomorphismRestriction # -}
import ShadowLibrary.Core
import Text.XML.HXT.Core
import Text.XML.HXT.XPath
-- import Text.XML.HXT.Curl
import Data.List
import Data.List.Utils ( replace )
import Text.Regex.Posix
import Text.Printf
2022-05-05 20:30:14 +02:00
extractRecords = extractLinksWithText " //div[@class='span4']//h2[@itemprop='name']/a[contains(@href,'o-nas')] " -- pary adres-tytuł
2022-04-16 21:07:17 +02:00
>>> first ( extractLinksWithText " //a[contains(@href,'.pdf')] " ) -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
-- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika)
toShadowItem :: ( ( String , String ) , String ) -> ShadowItem
toShadowItem ( ( url , articleTitle ) , yearlyTitle ) =
( defaultShadowItem url title ) {
originalDate = Just date ,
itype = " periodical " ,
format = Just " pdf " ,
finalUrl = url
2022-05-05 20:30:14 +02:00
}
where title = replace " \ " " " ' " articleTitle
2022-04-16 21:07:17 +02:00
date = getDate yearlyTitle
getDate yearlyTitle =
case yearlyTitle =~~ " (19[0-9][0-9]|20[0-9][0-9]) " :: Maybe [ [ String ] ] of
Just [ [ _ , year ] ] -> year
otherwise -> " unexpected yearlyTitle " ++ yearlyTitle
main = do
let start = " http://moksir.chelmek.pl/o-nas/echo-chelmka "
let shadowLibrary = ShadowLibrary { logoUrl = Nothing ,
lname = " Chelmek " ,
abbrev = " Chelmek " ,
lLevel = 0 ,
webpage = start }
extractItemsStartingFromUrl shadowLibrary start ( extractRecords >>> arr toShadowItem )