twilight-library/app/ZborBielawa.hs
nlitkowski 141728f14f WIP
2021-04-07 02:37:50 +02:00

82 lines
3.8 KiB
Haskell

{-# LANGUAGE Arrows, NoMonomorphismRestriction #-}
import ShadowLibrary.Core
import Text.XML.HXT.Core
import Text.XML.HXT.XPath
-- import Text.XML.HXT.Curl
import Data.List
import Data.List.Utils (replace)
import Text.Regex.Posix
import Text.Printf
extractNestedLinksWithText xpathCondition = (downloadDocument &&& this)
>>> first (getXPathTrees xpathCondition
>>> (
(getXPathTrees "//a" >>> getAttrValue "href")
&&& (listA (deep isText >>> getText)
>>> arr (intercalate " "))
))
>>> arr rotateSecTh -- ((a, b), c) -> ((a, c), b)
>>> first expandURIFixed
getLinkAndText xpathCondition = proc doc -> do
xpathTrees <- getXPathTrees xpathCondition -< doc
href <- (getXPathTrees "//a" >>> getAttrValue "href") -< xpathTrees
txt <- (listA (deep isText >>> getText) >>> arr (intercalate " ")) -< xpathTrees
returnA -< (href, txt)
extractNestedLinksWithText2 xpathCondition = proc x -> do
doc <- downloadDocument -< x
thisValue <- this -< x
((a,b),c) <- getLinkAndText xpathCondition -< doc
uriFixed <- expandURIFixed -< (a,c)
returnA -< (uriFixed, b)
extractRecords = extractLinksWithText "(//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1]" -- pary adres-tytuł podstrony
>>> first (extractLinksWithText "(//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1]") -- pobieramy podstronę i kolejne podstrony z menu
>>> first (first (extractNestedLinksWithText "//big/a[contains(@href,'.pdf')][img]")) -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
-- ostatecznie wyjdą krotki (((adres URL, tytuł nr-u), tytuł podstrony 2), tytuł podstrony 1)
extractRecords2 = proc x -> do
(a, b) <- extractLinksWithText "(//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1]" -< x -- pary adres-tytuł podstrony
(a', b') <- (extractLinksWithText "(//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1]") -< a -- pobieramy podstronę i kolejne podstrony z menu
a'' <- (extractNestedLinksWithText "//big/a[contains(@href,'.pdf')][img]") -< a' -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
returnA -< a''
-- ostatecznie wyjdą krotki (((adres URL, tytuł nr-u), tytuł podstrony 2), tytuł podstrony 1)
-- ... a tutaj te krotki przerabiamy do docelowej struktury ShadowItem
toShadowItem :: (((String, String), String), String) -> ShadowItem
toShadowItem (((url, releaseTitle), collectionTitle), categoryTitle) =
(defaultShadowItem url title) {
originalDate = Just date,
itype = "periodical",
format = Just "pdf",
finalUrl = url
}
where title = categoryTitle ++ (" " ++ collectionTitle)
date = getDate $ releaseTitle
getDate yearlyTitle =
case yearlyTitle =~~ "/(19[0-9][0-9]|20[0-9][0-9])/" :: Maybe [[String]] of
Just [[_, year]] -> year
-- otherwise -> error $ "unexpected yearlyTitle: " ++ yearlyTitle
otherwise -> yearlyTitle
main = do
let start = "http://zborbielawa.pl/archiwum/"
let shadowLibrary = ShadowLibrary {logoUrl=Nothing,
lname="Zbór Bielawa",
abbrev="ZboBiel",
lLevel=0,
webpage=start}
extractItemsStartingFromUrl shadowLibrary start (extractRecords >>> arr toShadowItem)