2021-04-06 17:51:18 +02:00
{- # LANGUAGE Arrows, NoMonomorphismRestriction # -}
import ShadowLibrary.Core
import Text.XML.HXT.Core
import Text.XML.HXT.XPath
-- import Text.XML.HXT.Curl
import Data.List
import Data.List.Utils ( replace )
import Text.Regex.Posix
import Text.Printf
2021-04-17 18:07:25 +02:00
import Data.Char ( toLower )
2021-04-06 17:51:18 +02:00
2021-04-17 18:07:25 +02:00
class Nothingish a where
nada :: a
instance Nothingish [ a ] where
nada = []
eliminate :: ( Nothingish a ) => Maybe a -> a
eliminate ( Just a ) = a
eliminate Nothing = nada
toLowerString :: [ Char ] -> [ Char ]
toLowerString str = [ toLower x | x <- str ]
mToString :: Maybe String -> String
mToString n
| n == Nothing = " "
| otherwise = eliminate n
extractMonth :: String -> String
extractMonth n =
case n =~~ ( " [A-za-z]+ " :: String ) of
Just month -> mToString ( baseMonthNameToNumber ( toLowerString month ) )
otherwise -> " "
mExtractYear :: String -> String
mExtractYear n =
case n =~~ ( " (1[6789]|20)[0-9][0-9] " :: String ) of
Just year -> year
otherwise -> " "
changeDate :: String -> String
changeDate a = mExtractYear a ++ " - " ++ extractMonth a
extractRecords = extractLinksWithText " (//a[@class='magazine-list__year-item'])[last()] " -- pary adres-tytuł
--extractRecords = extractLinksWithText "//a[@class='magazine-list__year-item']" -- pary adres-tytuł
2021-04-06 20:54:18 +02:00
>>> second ( arr $ replace " \ r \ n " " " ) -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków
>>> second ( arr $ replace " " " " ) -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków
2021-04-06 17:51:18 +02:00
-- >>> first (arr ((++"tr") . init)) -- modyfikujemy pierwszy element pary, czyli adres URL
2021-04-06 20:54:18 +02:00
>>> first ( extractLinksWithText " //div[@class='magazine-list__item']/a " ) -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
>>> first ( second ( arr $ replace " \ r \ n " " " ) )
>>> first ( first (
extractLinksWithText " //div[@class='files__item']/a[contains(@href,'.pdf')] "
>>> second ( arr $ replace " \ r \ n " " " )
2021-04-06 21:20:43 +02:00
-- >>> first (arr $ replace "//" "/")
2021-04-06 20:54:18 +02:00
)
2021-04-17 18:07:25 +02:00
>>> second ( arr $ changeDate )
2021-04-06 20:54:18 +02:00
) -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
2021-04-06 17:51:18 +02:00
-- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika)
-- ... a tutaj te trójki przerabiamy do docelowej struktury ShadowItem
2021-04-06 20:54:18 +02:00
toShadowItem :: ( ( ( String , String ) , String ) , String ) -> ShadowItem
toShadowItem ( ( ( url , chapterTitle ) , articleTitle ) , yearlyTitle ) =
2021-04-06 17:51:18 +02:00
( defaultShadowItem url title ) {
originalDate = Just date ,
itype = " periodical " ,
format = Just " pdf " ,
finalUrl = url
}
2021-04-06 21:20:43 +02:00
where title = " Elektronika Praktyczna " ++ ( replace " " " " articleTitle )
2021-04-06 20:54:18 +02:00
date = yearlyTitle
2021-04-06 17:51:18 +02:00
2021-04-06 20:54:18 +02:00
getDate yearlyTitle =
case yearlyTitle =~~ " /(19[0-9][0-9]|20[0-9][0-9])/ " :: Maybe [ [ String ] ] of
2021-04-06 17:51:18 +02:00
Just [ [ _ , year ] ] -> year
2021-04-06 20:54:18 +02:00
otherwise -> error $ " unexpected yearlyTitle: " ++ yearlyTitle
2021-04-06 17:51:18 +02:00
main = do
let start = " https://ep.com.pl/archiwum/ "
let shadowLibrary = ShadowLibrary { logoUrl = Nothing ,
lname = " Elektronika praktyczna " ,
abbrev = " EP " ,
lLevel = 0 ,
webpage = start }
2021-04-17 18:07:25 +02:00
extractItemsStartingFromUrl shadowLibrary start ( extractRecords ) -- >>> arr toShadowItem)