2021-04-07 02:12:02 +02:00
{- # LANGUAGE Arrows, NoMonomorphismRestriction # -}
import ShadowLibrary.Core
import Text.XML.HXT.Core
import Text.XML.HXT.XPath
2021-04-07 03:54:56 +02:00
2021-04-07 02:12:02 +02:00
import Data.List
import Data.List.Utils ( replace )
import Text.Regex.Posix
import Text.Printf
2021-04-07 02:37:50 +02:00
getLinkAndText xpathCondition = proc doc -> do
xpathTrees <- getXPathTrees xpathCondition -< doc
2021-04-07 03:36:28 +02:00
name <- getElemName -< xpathTrees
2021-04-07 03:54:56 +02:00
txt <- ( listA ( deep isText >>> getText ) >>> arr ( intercalate " " ) ) -< xpathTrees
2021-04-07 02:37:50 +02:00
href <- ( getXPathTrees " //a " >>> getAttrValue " href " ) -< xpathTrees
2021-04-07 04:07:35 +02:00
returnA -< ( href , txt )
2021-04-07 02:37:50 +02:00
2021-04-07 03:00:31 +02:00
extractNestedLinksWithText xpathCondition = proc url -> do
2021-04-07 02:41:21 +02:00
doc <- downloadDocument -< url
( link , text ) <- getLinkAndText xpathCondition -< doc
uriFixed <- expandURIFixed -< ( link , url )
returnA -< ( uriFixed , text )
2021-04-07 02:37:50 +02:00
2021-04-07 12:04:07 +02:00
extractRecords = proc startUrl -> do
2021-04-07 12:20:34 +02:00
-- (catUrl, catText) <- extractLinksWithText "(//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1]" -< startUrl -- pary adres-tytuł podstrony
2021-04-07 12:04:07 +02:00
( catUrl , catText ) <- extractLinksWithText " //aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'] " -< startUrl -- pary adres-tytuł podstrony
( collUrl , collText ) <- ( extractLinksWithText " //aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'] " ) -< catUrl -- pobieramy podstronę kategorii i kolejne podstrony z menu
2021-04-07 12:20:34 +02:00
( relUrl , relText ) <- ( extractNestedLinksWithText " //big[a[contains(@href,'.pdf') and contains(text(), 'Pobierz PDF')]] " ) -< collUrl -- pobieramy stronę z wydaniami z danej kolekcji i linki do PDFów
2021-04-07 12:04:07 +02:00
returnA -< ( ( ( relUrl , relText ) , collText ) , catText ) -- ostatecznie wyjdą krotki (((adres URL PDFa wydania, tytuł wydania), tytuł zbioru), tytuł kategorii)
2021-04-07 02:20:30 +02:00
2021-04-07 02:12:02 +02:00
-- ... a tutaj te krotki przerabiamy do docelowej struktury ShadowItem
toShadowItem :: ( ( ( String , String ) , String ) , String ) -> ShadowItem
toShadowItem ( ( ( url , releaseTitle ) , collectionTitle ) , categoryTitle ) =
( defaultShadowItem url title ) {
originalDate = Just date ,
itype = " periodical " ,
format = Just " pdf " ,
finalUrl = url
}
where title = categoryTitle ++ ( " " ++ collectionTitle )
date = getDate $ releaseTitle
getDate yearlyTitle =
2021-04-07 12:04:07 +02:00
case yearlyTitle =~~ " /(19[0-9][0-9]|20[0-9][0-9])/ " :: Maybe [ [ String ] ] of
2021-04-07 02:12:02 +02:00
Just [ [ _ , year ] ] -> year
-- otherwise -> error $ "unexpected yearlyTitle: " ++ yearlyTitle
otherwise -> yearlyTitle
main = do
let start = " http://zborbielawa.pl/archiwum/ "
let shadowLibrary = ShadowLibrary { logoUrl = Nothing ,
lname = " Zbór Bielawa " ,
abbrev = " ZboBiel " ,
lLevel = 0 ,
webpage = start }
2021-04-07 03:00:31 +02:00
extractItemsStartingFromUrl shadowLibrary start ( extractRecords >>> arr toShadowItem )