init-project

This commit is contained in:
Jakub Adamski 2022-03-23 12:43:37 +01:00
parent 9282b3b89a
commit e8a02c5f07
3 changed files with 66 additions and 8 deletions

View File

@ -33,7 +33,7 @@ import Data.Tree.NTree.TypeDefs
import Data.Maybe
import Control.Monad.Trans
import Text.XML.HXT.XPath
-- import Text.XML.HXT.Curl
import Text.XML.HXT.Curl
import Text.XML.HXT.HTTP
import Text.Regex.TDFA
@ -42,7 +42,7 @@ import Data.List (isInfixOf, intercalate)
import Data.List.Utils (replace)
-- import Network.Curl.Opts
--import Network.Curl.Opts
polishTimeZone = TimeZone {
timeZoneMinutes = 120,
@ -64,8 +64,8 @@ downloadDocument = readFromDocument [withParseHTML yes,
withEncodingErrors no,
withPreserveComment yes,
withStrictInput yes,
withHTTP []
-- withCurl [("curl--user-agent","AMU Digital Libraries Indexing Agent")]
-- withHTTP []
withCurl [("curl--user-agent","AMU Digital Libraries Indexing Agent")]
]
downloadDocumentWithEncoding enc = readFromDocument [withParseHTML yes,
@ -73,13 +73,13 @@ downloadDocumentWithEncoding enc = readFromDocument [withParseHTML yes,
withEncodingErrors no,
withPreserveComment yes,
withInputEncoding enc,
withHTTP []]
-- withCurl []]
-- withHTTP []]
withCurl []]
downloadXmlDocument = readFromDocument [withWarnings no,
withEncodingErrors no,
withHTTP []]
-- withCurl [] ]
-- withHTTP []]
withCurl [] ]
data ShadowLibrary = ShadowLibrary { logoUrl :: Maybe String,

46
app/pbsociety.hs Normal file
View File

@ -0,0 +1,46 @@
{-# LANGUAGE Arrows, NoMonomorphismRestriction #-}
import ShadowLibrary.Core
import Text.XML.HXT.Core
import Text.XML.HXT.XPath
-- import Text.XML.HXT.Curl
import Data.List
import Data.List.Utils (replace)
import Text.Regex.Posix
import Text.Printf
extractRecords = extractLinksWithText "//a" -- pary adres-tytuł
-- >>> second (arr $ replace "\r\n " " ") -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków
-- >>> first (arr ((++"tr") . init)) -- modyfikujemy pierwszy element pary, czyli adres URL
-- >>> first (extractLinksWithText "//li/a[contains(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
-- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika)
-- ... a tutaj te trójki przerabiamy do docelowej struktury ShadowItem
toShadowItem :: ((String, String), String) -> ShadowItem
toShadowItem ((url, articleTitle), yearlyTitle) =
(defaultShadowItem url title) {
originalDate = Just date,
itype = "periodical",
format = Just "pdf",
finalUrl = url
}
where title = "Almanach Muszyny " ++ yearlyTitle ++ " " ++ (replace "\r\n" "" (replace "\r\n " "" articleTitle))
date = getDate url
getDate url =
case url =~~ "/(19[0-9][0-9]|20[0-9][0-9])/" :: Maybe [[String]] of
Just [[_, year]] -> year
otherwise -> error $ "unexpected url: " ++ url
main = do
let start = "https://pbsociety.org.pl/repository/"
let shadowLibrary = ShadowLibrary {logoUrl=Nothing,
lname="Polskie Towarzystwo Botaniczne",
abbrev="PBSociety",
lLevel=0,
webpage=start}
extractItemsStartingFromUrl shadowLibrary start extractRecords

View File

@ -19,6 +19,7 @@ library
build-depends: base >= 4.7 && < 5
, HTTP
, hxt
, hxt-curl
, hxt-http
, hxt-xpath
, MissingH
@ -59,6 +60,17 @@ executable almanachmuszyny
, shadow-library
default-language: Haskell2010
executable pbsociety
hs-source-dirs: app
main-is: pbsociety.hs
ghc-options: -threaded -rtsopts -with-rtsopts=-N
build-depends: base
, hxt
, hxt-xpath
, MissingH
, regex-posix
, shadow-library
default-language: Haskell2010
source-repository head
type: git