2021-04-07 02:12:02 +02:00
2021-04-07 13:22:42 +02:00
{- # LANGUAGE Arrows, NoMonomorphismRestriction, TemplateHaskell # -}
2021-04-07 02:12:02 +02:00
import ShadowLibrary.Core
import Text.XML.HXT.Core
import Text.XML.HXT.XPath
2021-04-07 03:54:56 +02:00
2021-04-07 02:12:02 +02:00
import Data.List
import Data.List.Utils ( replace )
2021-04-07 13:19:42 +02:00
-- import Text.Regex.Posix
2021-04-07 02:12:02 +02:00
import Text.Printf
2021-04-07 12:43:50 +02:00
import Control.Lens.Regex.Text
2021-04-07 02:37:50 +02:00
getLinkAndText xpathCondition = proc doc -> do
xpathTrees <- getXPathTrees xpathCondition -< doc
2021-04-07 03:36:28 +02:00
name <- getElemName -< xpathTrees
2021-04-07 03:54:56 +02:00
txt <- ( listA ( deep isText >>> getText ) >>> arr ( intercalate " " ) ) -< xpathTrees
2021-04-07 02:37:50 +02:00
href <- ( getXPathTrees " //a " >>> getAttrValue " href " ) -< xpathTrees
2021-04-07 04:07:35 +02:00
returnA -< ( href , txt )
2021-04-07 02:37:50 +02:00
2021-04-07 03:00:31 +02:00
extractNestedLinksWithText xpathCondition = proc url -> do
2021-04-07 02:41:21 +02:00
doc <- downloadDocument -< url
( link , text ) <- getLinkAndText xpathCondition -< doc
uriFixed <- expandURIFixed -< ( link , url )
returnA -< ( uriFixed , text )
2021-04-07 02:37:50 +02:00
2021-04-07 12:04:07 +02:00
extractRecords = proc startUrl -> do
2021-04-07 12:20:34 +02:00
-- (catUrl, catText) <- extractLinksWithText "(//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1]" -< startUrl -- pary adres-tytuł podstrony
2021-04-07 12:04:07 +02:00
( catUrl , catText ) <- extractLinksWithText " //aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'] " -< startUrl -- pary adres-tytuł podstrony
( collUrl , collText ) <- ( extractLinksWithText " //aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'] " ) -< catUrl -- pobieramy podstronę kategorii i kolejne podstrony z menu
2021-04-07 12:20:34 +02:00
( relUrl , relText ) <- ( extractNestedLinksWithText " //big[a[contains(@href,'.pdf') and contains(text(), 'Pobierz PDF')]] " ) -< collUrl -- pobieramy stronę z wydaniami z danej kolekcji i linki do PDFów
2021-04-07 12:04:07 +02:00
returnA -< ( ( ( relUrl , relText ) , collText ) , catText ) -- ostatecznie wyjdą krotki (((adres URL PDFa wydania, tytuł wydania), tytuł zbioru), tytuł kategorii)
2021-04-07 02:20:30 +02:00
2021-04-07 02:12:02 +02:00
-- ... a tutaj te krotki przerabiamy do docelowej struktury ShadowItem
toShadowItem :: ( ( ( String , String ) , String ) , String ) -> ShadowItem
toShadowItem ( ( ( url , releaseTitle ) , collectionTitle ) , categoryTitle ) =
( defaultShadowItem url title ) {
originalDate = Just date ,
itype = " periodical " ,
format = Just " pdf " ,
finalUrl = url
2021-04-07 12:43:50 +02:00
}
2021-04-07 02:12:02 +02:00
where title = categoryTitle ++ ( " " ++ collectionTitle )
2021-04-07 13:38:47 +02:00
date = releaseTitle
-- date = getDate $ releaseTitle
2021-04-07 02:12:02 +02:00
2021-04-07 13:38:47 +02:00
-- getDate txt = txt ^? [regex|19[0-9][0-9]|20[0-9][0-9]|] . match
2021-04-07 02:12:02 +02:00
main = do
let start = " http://zborbielawa.pl/archiwum/ "
let shadowLibrary = ShadowLibrary { logoUrl = Nothing ,
lname = " Zbór Bielawa " ,
abbrev = " ZboBiel " ,
lLevel = 0 ,
webpage = start }
2021-04-07 03:00:31 +02:00
extractItemsStartingFromUrl shadowLibrary start ( extractRecords >>> arr toShadowItem )