diff --git a/app/czasopisma.hs b/app/czasopisma.hs new file mode 100644 index 0000000..1250135 --- /dev/null +++ b/app/czasopisma.hs @@ -0,0 +1,44 @@ + +{-# LANGUAGE Arrows, NoMonomorphismRestriction #-} +import ShadowLibrary.Core + +import Text.XML.HXT.Core +import Text.XML.HXT.XPath +-- import Text.XML.HXT.Curl +import Data.List +import Data.List.Utils (replace) + +import Text.Regex.Posix +import Text.Printf + + +extractRecords = extractLinksWithText "" -- pary adres-tytuł + >>> first (extractLinksWithText "//a[@target='_blank'][contains(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego + -- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika) + + +toShadowItem :: ((String, String), String) -> ShadowItem +toShadowItem ((url, articleTitle), yearlyTitle) = + (defaultShadowItem url title) { + originalDate = Just date, + itype = "periodical", + format = Just "pdf", + finalUrl = url + } + where title = replace "\"" "'" (replace "\t\t\t\t\t" "" (replace "\n" "" yearlyTitle)) ++ "articleTitle: " ++ articleTitle + date = getDate url + +getDate url = + case url =~~ "_(19[0-9][0-9]|20[0-9][0-9])" :: Maybe [[String]] of + Just [[_, year]] -> year + otherwise -> error $ "unexpected url: " ++ url + + +main = do + let start = "https://www.czasopisma.centralnabibliotekapttk.pl" + let shadowLibrary = ShadowLibrary {logoUrl=Nothing, + lname="Chelmek", + abbrev="Chelmek", + lLevel=0, + webpage=start} + extractItemsStartingFromUrl shadowLibrary start (extractRecords >>> arr toShadowItem) diff --git a/app/kulturaparyska.hs b/app/kulturaparyska.hs deleted file mode 100644 index 2d308b4..0000000 --- a/app/kulturaparyska.hs +++ /dev/null @@ -1,96 +0,0 @@ - -{-# LANGUAGE Arrows, NoMonomorphismRestriction #-} -import ShadowLibrary.Core - -import Text.XML.HXT.Core -import Text.XML.HXT.XPath --- import Text.XML.HXT.Curl -import Data.List -import Data.List.Utils (replace) - -import Text.Regex.Posix -import Text.Printf - - - -extractRecords = extractLinksWithText "//a[@class='year-anchor ']" -- pary adres-tytuł - >>> second (arr $ replace "\r\n " " ") -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków - -- >>> first (arr ((++"f") . init)) -- modyfikujemy pierwszy element pary, czyli adres URL - >>> first (extractLinksWithText "//a[@class='pdf-exist'][contains(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego - -- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika) - --- extractRecords2 = extractLinksWithText "//a[@class='year-anchor']" -- pary adres-tytuł --- >>> second (arr $ replace "\r\n " " ") -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków --- -- >>> first (arr ((++"f") . init)) -- modyfikujemy pierwszy element pary, czyli adres URL --- >>> first (extractLinksWithText "//a[ends-with(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego --- -- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika) - --- ... a tutaj te trójki przerabiamy do docelowej struktury ShadowItem -toShadowItem :: ((String, String), String) -> ShadowItem -toShadowItem ((url, articleTitle), yearlyTitle) = - (defaultShadowItem url title) { - originalDate = Just date, - itype = "periodical", - format = Just "pdf", - finalUrl = url - } - where title = "Kultura Paryska " ++ yearlyTitle ++ " " ++ (replace "\r\n" "" (replace "\r\n " "" articleTitle)) - date = yearlyTitle - -getDate url = - case url =~~ "/(19[0-9][0-9]|20[0-9][0-9])/" :: Maybe [[String]] of - Just [[_, year]] -> year - otherwise -> error $ "unexpected url: " ++ url - - -main = do - let start = "https://kulturaparyska.com/pl/publication/4/year/1946" - let shadowLibrary = ShadowLibrary {logoUrl=Nothing, - lname="Kultura Paryska", - abbrev="kultParys", - lLevel=0, - webpage=start} - putStrLn "Program started" - extractItemsStartingFromUrl shadowLibrary start (extractRecords >>> arr toShadowItem) - - -- First time using haskell and for loop in haskell is hard - - putStrLn "New link" - let start2 = "https://kulturaparyska.com/pl/publication/1/year/1946" - extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem) - - putStrLn "New link" - let start2 = "https://kulturaparyska.com/pl/publication/2/year/1946" - extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem) - - putStrLn "New link" - let start2 = "https://kulturaparyska.com/pl/publication/3/year/1946" - extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem) - - putStrLn "New link" - let start2 = "https://kulturaparyska.com/pl/publication/5/year/1946" - extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem) - - putStrLn "New link" - let start2 = "https://kulturaparyska.com/pl/publication/6/year/1946" - extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem) - - putStrLn "New link" - let start2 = "https://kulturaparyska.com/pl/publication/7/year/1946" - extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem) - - putStrLn "New link" - let start2 = "https://kulturaparyska.com/pl/publication/8/year/1946" - extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem) - - putStrLn "New link" - let start2 = "https://kulturaparyska.com/pl/publication/9/year/1946" - extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem) - - putStrLn "New link" - let start2 = "https://kulturaparyska.com/pl/publication/10/year/1946" - extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem) - - putStrLn "New link" - let start2 = "https://kulturaparyska.com/pl/publication/11/year/1946" - extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem) \ No newline at end of file