2021-04-07 02:12:02 +02:00
{- # LANGUAGE Arrows, NoMonomorphismRestriction # -}
import ShadowLibrary.Core
import Text.XML.HXT.Core
import Text.XML.HXT.XPath
-- import Text.XML.HXT.Curl
import Data.List
import Data.List.Utils ( replace )
import Text.Regex.Posix
import Text.Printf
2021-04-07 02:53:08 +02:00
import Debug.Trace
2021-04-07 02:12:02 +02:00
extractNestedLinksWithText xpathCondition = ( downloadDocument &&& this )
>>> first ( getXPathTrees xpathCondition
2021-04-07 02:20:30 +02:00
>>> (
( getXPathTrees " //a " >>> getAttrValue " href " )
&&& ( listA ( deep isText >>> getText )
>>> arr ( intercalate " " ) )
) )
2021-04-07 02:37:50 +02:00
>>> arr rotateSecTh -- ((a, b), c) -> ((a, c), b)
2021-04-07 02:12:02 +02:00
>>> first expandURIFixed
2021-04-07 02:37:50 +02:00
getLinkAndText xpathCondition = proc doc -> do
xpathTrees <- getXPathTrees xpathCondition -< doc
href <- ( getXPathTrees " //a " >>> getAttrValue " href " ) -< xpathTrees
txt <- ( listA ( deep isText >>> getText ) >>> arr ( intercalate " " ) ) -< xpathTrees
2021-04-07 02:53:08 +02:00
returnA -< traceShowId ( href , txt )
2021-04-07 02:37:50 +02:00
2021-04-07 02:41:21 +02:00
extractNestedLinksWithText2 xpathCondition = proc url -> do
doc <- downloadDocument -< url
( link , text ) <- getLinkAndText xpathCondition -< doc
uriFixed <- expandURIFixed -< ( link , url )
returnA -< ( uriFixed , text )
2021-04-07 02:37:50 +02:00
2021-04-07 02:12:02 +02:00
extractRecords = extractLinksWithText " (//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1] " -- pary adres-tytuł podstrony
>>> first ( extractLinksWithText " (//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1] " ) -- pobieramy podstronę i kolejne podstrony z menu
>>> first ( first ( extractNestedLinksWithText " //big/a[contains(@href,'.pdf')][img] " ) ) -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
-- ostatecznie wyjdą krotki (((adres URL, tytuł nr-u), tytuł podstrony 2), tytuł podstrony 1)
2021-04-07 02:20:30 +02:00
extractRecords2 = proc x -> do
( a , b ) <- extractLinksWithText " (//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1] " -< x -- pary adres-tytuł podstrony
2021-04-07 02:22:55 +02:00
( a' , b' ) <- ( extractLinksWithText " (//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1] " ) -< a -- pobieramy podstronę i kolejne podstrony z menu
2021-04-07 02:20:30 +02:00
a'' <- ( extractNestedLinksWithText " //big/a[contains(@href,'.pdf')][img] " ) -< a' -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
2021-04-07 02:56:44 +02:00
returnA -< ( ( a'' , b' ) , b )
2021-04-07 02:20:30 +02:00
-- ostatecznie wyjdą krotki (((adres URL, tytuł nr-u), tytuł podstrony 2), tytuł podstrony 1)
2021-04-07 02:12:02 +02:00
-- ... a tutaj te krotki przerabiamy do docelowej struktury ShadowItem
toShadowItem :: ( ( ( String , String ) , String ) , String ) -> ShadowItem
toShadowItem ( ( ( url , releaseTitle ) , collectionTitle ) , categoryTitle ) =
( defaultShadowItem url title ) {
originalDate = Just date ,
itype = " periodical " ,
format = Just " pdf " ,
finalUrl = url
}
where title = categoryTitle ++ ( " " ++ collectionTitle )
date = getDate $ releaseTitle
getDate yearlyTitle =
case yearlyTitle =~~ " /(19[0-9][0-9]|20[0-9][0-9])/ " :: Maybe [ [ String ] ] of
Just [ [ _ , year ] ] -> year
-- otherwise -> error $ "unexpected yearlyTitle: " ++ yearlyTitle
otherwise -> yearlyTitle
main = do
let start = " http://zborbielawa.pl/archiwum/ "
let shadowLibrary = ShadowLibrary { logoUrl = Nothing ,
lname = " Zbór Bielawa " ,
abbrev = " ZboBiel " ,
lLevel = 0 ,
webpage = start }
2021-04-07 02:53:08 +02:00
extractItemsStartingFromUrl shadowLibrary start ( extractRecords2 >>> arr toShadowItem )