forked from filipg/twilight-library
65 lines
3.2 KiB
Haskell
65 lines
3.2 KiB
Haskell
|
|
{-# LANGUAGE Arrows, NoMonomorphismRestriction, TemplateHaskell, QuasiQuotes #-}
|
|
import ShadowLibrary.Core
|
|
|
|
import Text.XML.HXT.Core
|
|
import Text.XML.HXT.XPath
|
|
|
|
import Data.List
|
|
import Data.List.Utils (replace)
|
|
|
|
-- import Text.Regex.Posix
|
|
import Text.Printf
|
|
|
|
import Control.Lens.Regex.Text
|
|
import Control.Lens ((^?))
|
|
import qualified Data.Text as T
|
|
|
|
getLinkAndText xpathCondition = proc doc -> do
|
|
xpathTrees <- getXPathTrees xpathCondition -< doc
|
|
name <- getElemName -< xpathTrees
|
|
txt <- (getXPathTrees "//a/../text()" >>> listA (deep isText >>> getText) >>> arr (intercalate " ")) -< xpathTrees
|
|
href <- (getXPathTrees "//a" >>> getAttrValue "href") -< xpathTrees
|
|
returnA -< (href, txt)
|
|
|
|
|
|
extractNestedLinksWithText xpathCondition = proc url -> do
|
|
doc <- downloadDocument -< url
|
|
(link, text) <- getLinkAndText xpathCondition -< doc
|
|
uriFixed <- expandURIFixed -< (link, url)
|
|
returnA -< (uriFixed, text)
|
|
|
|
|
|
extractRecords = proc startUrl -> do
|
|
-- (catUrl, catText) <- extractLinksWithText "(//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[2]" -< startUrl -- pary adres-tytuł podstrony
|
|
(catUrl, catText) <- extractLinksWithText "//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link']" -< startUrl -- pary adres-tytuł podstrony
|
|
(collUrl, collText) <- (extractLinksWithText "(//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1]") -< catUrl -- pobieramy podstronę kategorii i kolejne podstrony z menu
|
|
-- (collUrl, collText) <- (extractLinksWithText "//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link']") -< catUrl -- pobieramy podstronę kategorii i kolejne podstrony z menu
|
|
(relUrl, relText) <- (extractNestedLinksWithText "(//big[a[contains(@href,'.pdf')]])[1]") -< collUrl -- pobieramy stronę z wydaniami z danej kolekcji i linki do PDFów
|
|
-- (relUrl, relText) <- (extractNestedLinksWithText "//big[a[contains(@href,'.pdf')]]") -< collUrl -- pobieramy stronę z wydaniami z danej kolekcji i linki do PDFów
|
|
returnA -< (((relUrl, relText), collText), catText) -- ostatecznie wyjdą krotki (((adres URL PDFa wydania, tytuł wydania), tytuł zbioru), tytuł kategorii)
|
|
|
|
-- ... a tutaj te krotki przerabiamy do docelowej struktury ShadowItem
|
|
toShadowItem :: (((String, String), String), String) -> ShadowItem
|
|
toShadowItem (((url, releaseTitle), collectionTitle), categoryTitle) =
|
|
(defaultShadowItem url title) {
|
|
originalDate = T.unpack <$> date,
|
|
itype = "periodical",
|
|
format = Just "pdf",
|
|
finalUrl = url
|
|
}
|
|
where title = categoryTitle ++ (" " ++ collectionTitle)
|
|
date = getDate $ T.pack $ releaseTitle
|
|
|
|
|
|
getDate txt = txt ^? [regex|19[0-9][0-9]|20[0-9][0-9]|] . match
|
|
|
|
main = do
|
|
let start = "http://zborbielawa.pl/archiwum/"
|
|
let shadowLibrary = ShadowLibrary {logoUrl=Nothing,
|
|
lname="Zbór Bielawa",
|
|
abbrev="ZboBiel",
|
|
lLevel=0,
|
|
webpage=start}
|
|
extractItemsStartingFromUrl shadowLibrary start (extractRecords >>> arr toShadowItem)
|