2022-04-14 16:10:57 +02:00
{- # LANGUAGE Arrows, NoMonomorphismRestriction # -}
import ShadowLibrary.Core
import Text.XML.HXT.Core
import Text.XML.HXT.XPath
-- import Text.XML.HXT.Curl
import Data.List
import Data.List.Utils ( replace )
import Text.Regex.Posix
import Text.Printf
2022-05-05 13:46:32 +02:00
2022-04-14 16:10:57 +02:00
extractRecords = extractLinksWithText " //a[@class='title'] " -- pary adres-tytuł
2022-05-05 13:46:32 +02:00
>>> second ( arr $ replace " \ n \ t \ t \ t \ t \ t " " " ) -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków
2022-04-14 16:10:57 +02:00
>>> first ( extractLinksWithText " //a[@class='obj_galley_link pdf'] " ) -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
2022-05-05 13:46:32 +02:00
2022-04-14 16:10:57 +02:00
-- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika)
2022-05-05 13:46:32 +02:00
-- ... a tutaj te trójki przerabiamy do docelowej struktury ShadowItem
2022-04-14 16:10:57 +02:00
toShadowItem :: ( ( String , String ) , String ) -> ShadowItem
toShadowItem ( ( url , articleTitle ) , yearlyTitle ) =
( defaultShadowItem url title ) {
2022-05-05 13:46:32 +02:00
originalDate = Just date ,
2022-04-14 16:10:57 +02:00
itype = " periodical " ,
format = Just " pdf " ,
finalUrl = url
}
2022-05-05 13:46:32 +02:00
where title = replace " \ n \ t \ t \ t " " " yearlyTitle ++ " " ++ ( replace " \ n \ n \ t " " " ( replace " \ n \ n \ t \ t \ n \ t " " " articleTitle ) )
date = getDate yearlyTitle
getDate yearlyTitle =
case yearlyTitle =~~ " (19[0-9][0-9]|20[0-9][0-9]) " :: Maybe [ [ String ] ] of
2022-04-14 16:10:57 +02:00
Just [ [ _ , year ] ] -> year
2022-05-05 13:46:32 +02:00
otherwise -> error $ " unexpected yearlyTitle " ++ yearlyTitle
2022-04-14 16:10:57 +02:00
main = do
let start = " https://etyka.uw.edu.pl/index.php/etyka/issue/archive "
let start2 = " https://etyka.uw.edu.pl/index.php/etyka/issue/archive/2 "
let start3 = " https://etyka.uw.edu.pl/index.php/etyka/issue/archive/3 "
let shadowLibrary = ShadowLibrary { logoUrl = Nothing ,
lname = " Tom " ,
abbrev = " AlmMusz " ,
lLevel = 0 ,
webpage = start }
extractItemsStartingFromUrl shadowLibrary start ( extractRecords >>> arr toShadowItem )
extractItemsStartingFromUrl shadowLibrary start2 ( extractRecords >>> arr toShadowItem )
extractItemsStartingFromUrl shadowLibrary start3 ( extractRecords >>> arr toShadowItem )
2022-05-05 13:46:32 +02:00