forked from filipg/twilight-library
Add crawler for czasopisma...
This commit is contained in:
parent
0231c45a80
commit
76146d4e09
44
app/czasopisma.hs
Normal file
44
app/czasopisma.hs
Normal file
@ -0,0 +1,44 @@
|
||||
|
||||
{-# LANGUAGE Arrows, NoMonomorphismRestriction #-}
|
||||
import ShadowLibrary.Core
|
||||
|
||||
import Text.XML.HXT.Core
|
||||
import Text.XML.HXT.XPath
|
||||
-- import Text.XML.HXT.Curl
|
||||
import Data.List
|
||||
import Data.List.Utils (replace)
|
||||
|
||||
import Text.Regex.Posix
|
||||
import Text.Printf
|
||||
|
||||
|
||||
extractRecords = extractLinksWithText "" -- pary adres-tytuł
|
||||
>>> first (extractLinksWithText "//a[@target='_blank'][contains(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
|
||||
-- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika)
|
||||
|
||||
|
||||
toShadowItem :: ((String, String), String) -> ShadowItem
|
||||
toShadowItem ((url, articleTitle), yearlyTitle) =
|
||||
(defaultShadowItem url title) {
|
||||
originalDate = Just date,
|
||||
itype = "periodical",
|
||||
format = Just "pdf",
|
||||
finalUrl = url
|
||||
}
|
||||
where title = replace "\"" "'" (replace "\t\t\t\t\t" "" (replace "\n" "" yearlyTitle)) ++ "articleTitle: " ++ articleTitle
|
||||
date = getDate url
|
||||
|
||||
getDate url =
|
||||
case url =~~ "_(19[0-9][0-9]|20[0-9][0-9])" :: Maybe [[String]] of
|
||||
Just [[_, year]] -> year
|
||||
otherwise -> error $ "unexpected url: " ++ url
|
||||
|
||||
|
||||
main = do
|
||||
let start = "https://www.czasopisma.centralnabibliotekapttk.pl"
|
||||
let shadowLibrary = ShadowLibrary {logoUrl=Nothing,
|
||||
lname="Chelmek",
|
||||
abbrev="Chelmek",
|
||||
lLevel=0,
|
||||
webpage=start}
|
||||
extractItemsStartingFromUrl shadowLibrary start (extractRecords >>> arr toShadowItem)
|
@ -1,96 +0,0 @@
|
||||
|
||||
{-# LANGUAGE Arrows, NoMonomorphismRestriction #-}
|
||||
import ShadowLibrary.Core
|
||||
|
||||
import Text.XML.HXT.Core
|
||||
import Text.XML.HXT.XPath
|
||||
-- import Text.XML.HXT.Curl
|
||||
import Data.List
|
||||
import Data.List.Utils (replace)
|
||||
|
||||
import Text.Regex.Posix
|
||||
import Text.Printf
|
||||
|
||||
|
||||
|
||||
extractRecords = extractLinksWithText "//a[@class='year-anchor ']" -- pary adres-tytuł
|
||||
>>> second (arr $ replace "\r\n " " ") -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków
|
||||
-- >>> first (arr ((++"f") . init)) -- modyfikujemy pierwszy element pary, czyli adres URL
|
||||
>>> first (extractLinksWithText "//a[@class='pdf-exist'][contains(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
|
||||
-- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika)
|
||||
|
||||
-- extractRecords2 = extractLinksWithText "//a[@class='year-anchor']" -- pary adres-tytuł
|
||||
-- >>> second (arr $ replace "\r\n " " ") -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków
|
||||
-- -- >>> first (arr ((++"f") . init)) -- modyfikujemy pierwszy element pary, czyli adres URL
|
||||
-- >>> first (extractLinksWithText "//a[ends-with(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
|
||||
-- -- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika)
|
||||
|
||||
-- ... a tutaj te trójki przerabiamy do docelowej struktury ShadowItem
|
||||
toShadowItem :: ((String, String), String) -> ShadowItem
|
||||
toShadowItem ((url, articleTitle), yearlyTitle) =
|
||||
(defaultShadowItem url title) {
|
||||
originalDate = Just date,
|
||||
itype = "periodical",
|
||||
format = Just "pdf",
|
||||
finalUrl = url
|
||||
}
|
||||
where title = "Kultura Paryska " ++ yearlyTitle ++ " " ++ (replace "\r\n" "" (replace "\r\n " "" articleTitle))
|
||||
date = yearlyTitle
|
||||
|
||||
getDate url =
|
||||
case url =~~ "/(19[0-9][0-9]|20[0-9][0-9])/" :: Maybe [[String]] of
|
||||
Just [[_, year]] -> year
|
||||
otherwise -> error $ "unexpected url: " ++ url
|
||||
|
||||
|
||||
main = do
|
||||
let start = "https://kulturaparyska.com/pl/publication/4/year/1946"
|
||||
let shadowLibrary = ShadowLibrary {logoUrl=Nothing,
|
||||
lname="Kultura Paryska",
|
||||
abbrev="kultParys",
|
||||
lLevel=0,
|
||||
webpage=start}
|
||||
putStrLn "Program started"
|
||||
extractItemsStartingFromUrl shadowLibrary start (extractRecords >>> arr toShadowItem)
|
||||
|
||||
-- First time using haskell and for loop in haskell is hard
|
||||
|
||||
putStrLn "New link"
|
||||
let start2 = "https://kulturaparyska.com/pl/publication/1/year/1946"
|
||||
extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem)
|
||||
|
||||
putStrLn "New link"
|
||||
let start2 = "https://kulturaparyska.com/pl/publication/2/year/1946"
|
||||
extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem)
|
||||
|
||||
putStrLn "New link"
|
||||
let start2 = "https://kulturaparyska.com/pl/publication/3/year/1946"
|
||||
extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem)
|
||||
|
||||
putStrLn "New link"
|
||||
let start2 = "https://kulturaparyska.com/pl/publication/5/year/1946"
|
||||
extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem)
|
||||
|
||||
putStrLn "New link"
|
||||
let start2 = "https://kulturaparyska.com/pl/publication/6/year/1946"
|
||||
extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem)
|
||||
|
||||
putStrLn "New link"
|
||||
let start2 = "https://kulturaparyska.com/pl/publication/7/year/1946"
|
||||
extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem)
|
||||
|
||||
putStrLn "New link"
|
||||
let start2 = "https://kulturaparyska.com/pl/publication/8/year/1946"
|
||||
extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem)
|
||||
|
||||
putStrLn "New link"
|
||||
let start2 = "https://kulturaparyska.com/pl/publication/9/year/1946"
|
||||
extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem)
|
||||
|
||||
putStrLn "New link"
|
||||
let start2 = "https://kulturaparyska.com/pl/publication/10/year/1946"
|
||||
extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem)
|
||||
|
||||
putStrLn "New link"
|
||||
let start2 = "https://kulturaparyska.com/pl/publication/11/year/1946"
|
||||
extractItemsStartingFromUrl shadowLibrary start2 (extractRecords >>> arr toShadowItem)
|
Loading…
Reference in New Issue
Block a user