{-# LANGUAGE Arrows, NoMonomorphismRestriction, TemplateHaskell #-} import ShadowLibrary.Core import Text.XML.HXT.Core import Text.XML.HXT.XPath import Data.List import Data.List.Utils (replace) -- import Text.Regex.Posix import Text.Printf import Control.Lens.Regex.Text import Control.Lens.Regex import Control.Lens getLinkAndText xpathCondition = proc doc -> do xpathTrees <- getXPathTrees xpathCondition -< doc name <- getElemName -< xpathTrees txt <- (listA (deep isText >>> getText) >>> arr (intercalate " ")) -< xpathTrees href <- (getXPathTrees "//a" >>> getAttrValue "href") -< xpathTrees returnA -< (href, txt) extractNestedLinksWithText xpathCondition = proc url -> do doc <- downloadDocument -< url (link, text) <- getLinkAndText xpathCondition -< doc uriFixed <- expandURIFixed -< (link, url) returnA -< (uriFixed, text) extractRecords = proc startUrl -> do -- (catUrl, catText) <- extractLinksWithText "(//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link'])[1]" -< startUrl -- pary adres-tytuł podstrony (catUrl, catText) <- extractLinksWithText "//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link']" -< startUrl -- pary adres-tytuł podstrony (collUrl, collText) <- (extractLinksWithText "//aside[@class='widget widget_maxmegamenu']//a[@class='mega-menu-link']") -< catUrl -- pobieramy podstronę kategorii i kolejne podstrony z menu (relUrl, relText) <- (extractNestedLinksWithText "//big[a[contains(@href,'.pdf') and contains(text(), 'Pobierz PDF')]]") -< collUrl -- pobieramy stronę z wydaniami z danej kolekcji i linki do PDFów returnA -< (((relUrl, relText), collText), catText) -- ostatecznie wyjdą krotki (((adres URL PDFa wydania, tytuł wydania), tytuł zbioru), tytuł kategorii) -- ... a tutaj te krotki przerabiamy do docelowej struktury ShadowItem toShadowItem :: (((String, String), String), String) -> ShadowItem toShadowItem (((url, releaseTitle), collectionTitle), categoryTitle) = (defaultShadowItem url title) { originalDate = Just date, itype = "periodical", format = Just "pdf", finalUrl = url } where title = categoryTitle ++ (" " ++ collectionTitle) date = getDate $ releaseTitle getDate txt = txt ^? [regex|19[0-9][0-9]|20[0-9][0-9]|] . match main = do let start = "http://zborbielawa.pl/archiwum/" let shadowLibrary = ShadowLibrary {logoUrl=Nothing, lname="Zbór Bielawa", abbrev="ZboBiel", lLevel=0, webpage=start} extractItemsStartingFromUrl shadowLibrary start (extractRecords >>> arr toShadowItem)