{-# LANGUAGE Arrows, NoMonomorphismRestriction #-} import ShadowLibrary.Core import Text.XML.HXT.Core import Text.XML.HXT.XPath -- import Text.XML.HXT.Curl import Data.List import Data.List.Utils (replace) import Text.Regex.Posix import Text.Printf import Data.Char (toLower) class Nothingish a where nada :: a instance Nothingish [a] where nada = [] eliminate :: (Nothingish a) => Maybe a -> a eliminate (Just a) = a eliminate Nothing = nada toLowerString :: [Char] -> [Char] toLowerString str = [ toLower x | x <- str] mToString :: Maybe String -> String mToString n | n == Nothing = "" | otherwise = eliminate n extractMonth :: String -> String extractMonth n = case n =~~ ("[A-za-z]+" :: String) of Just month -> "-" ++ eliminate (baseMonthNameToNumber (toLowerString month)) otherwise -> "" mExtractYear :: String -> String mExtractYear n = case n =~~ ("(1[6789]|20)[0-9][0-9]" :: String) of Just year -> year otherwise -> "" changeDate :: String -> Maybe String changeDate a = Just (eliminate (extractYear a) ++ extractMonth a) extractLinksWithArticleTitle xpathCondition = (downloadDocument &&& this) >>> first (getXPathTrees xpathCondition >>> ( (getXPathTrees "//div[@class='text']" >>> (listA (deep isText >>> getText) >>> arr (intercalate " "))) &&& (getXPathTrees "//div[@class='files__item']/a[contains(@href,'.pdf')]" >>> (getAttrValue "href")) )) --extractRecords = extractLinksWithText "(//a[@class='magazine-list__year-item'])[last()]" -- pary adres-tytuł extractRecords = extractLinksWithText "//a[@class='magazine-list__year-item']" -- pary adres-tytuł >>> second (arr $ replace "\r\n " "") -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków >>> second (arr $ replace " " "") -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków -- >>> first (arr ((++"tr") . init)) -- modyfikujemy pierwszy element pary, czyli adres URL >>> first (extractLinksWithText "//div[@class='magazine-list__item']/a") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego >>> first (second (arr $ replace "\r\n " "")) >>> first (first ( -- extractLinksWithArticleTitle "//div[@class='files__item']/a[contains(@href,'.pdf')]" (extractLinksWithArticleTitle "//div[@class='magazine-single__content-title article text']") >>> first ( first (arr $ replace " " "") >>> first (arr $ replace "\r\n " "") ) -- >>> first (arr $ replace "\r\n" "") -- >>> first (arr $ replace "//" "/") ) -- >>> second (arr $ changeDate) -- Zmiana nazwy miesiąca na wartość liczbową >>> second (arr $ replace " " "") >>> second (arr $ replace " " "") ) -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego -- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika) -- ... a tutaj te trójki przerabiamy do docelowej struktury ShadowItem toShadowItem :: ((((String, String), String), String), String) -> ShadowItem toShadowItem ((((chapterTitle, url), finalUrl), articleTitle), yearlyTitle) = (defaultShadowItem url title) { originalDate = changeDate articleTitle, itype = "periodical", format = Just "pdf", finalUrl = finalUrl } where title = "Elektronika Praktyczna " ++ articleTitle ++ " - " ++ chapterTitle date = yearlyTitle getDate yearlyTitle = case yearlyTitle =~~ "/(19[0-9][0-9]|20[0-9][0-9])/" :: Maybe [[String]] of Just [[_, year]] -> year otherwise -> error $ "unexpected yearlyTitle: " ++ yearlyTitle main = do let start = "https://ep.com.pl/archiwum/" let shadowLibrary = ShadowLibrary {logoUrl=Nothing, lname="Elektronika praktyczna", abbrev="EP", lLevel=0, webpage=start} extractItemsStartingFromUrl shadowLibrary start (extractRecords >>> arr toShadowItem)