forked from filipg/twilight-library
Compare commits
6 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
78e11f7e70 | ||
|
cca2ef59d3 | ||
aa45331cf7 | |||
|
6153264fcb | ||
|
53fff21f4d | ||
107793cbf1 |
@ -33,7 +33,7 @@ import Data.Tree.NTree.TypeDefs
|
||||
import Data.Maybe
|
||||
import Control.Monad.Trans
|
||||
import Text.XML.HXT.XPath
|
||||
-- import Text.XML.HXT.Curl
|
||||
import Text.XML.HXT.Curl
|
||||
import Text.XML.HXT.HTTP
|
||||
|
||||
import Text.Regex.TDFA
|
||||
@ -64,8 +64,8 @@ downloadDocument = readFromDocument [withParseHTML yes,
|
||||
withEncodingErrors no,
|
||||
withPreserveComment yes,
|
||||
withStrictInput yes,
|
||||
withHTTP []
|
||||
-- withCurl [("curl--user-agent","AMU Digital Libraries Indexing Agent")]
|
||||
-- withHTTP []
|
||||
withCurl [("curl--user-agent","AMU Digital Libraries Indexing Agent")]
|
||||
]
|
||||
|
||||
downloadDocumentWithEncoding enc = readFromDocument [withParseHTML yes,
|
||||
@ -73,13 +73,13 @@ downloadDocumentWithEncoding enc = readFromDocument [withParseHTML yes,
|
||||
withEncodingErrors no,
|
||||
withPreserveComment yes,
|
||||
withInputEncoding enc,
|
||||
withHTTP []]
|
||||
-- withCurl []]
|
||||
-- withHTTP []]
|
||||
withCurl []]
|
||||
|
||||
downloadXmlDocument = readFromDocument [withWarnings no,
|
||||
withEncodingErrors no,
|
||||
withHTTP []]
|
||||
-- withCurl [] ]
|
||||
-- withHTTP []]
|
||||
withCurl [] ]
|
||||
|
||||
|
||||
data ShadowLibrary = ShadowLibrary { logoUrl :: Maybe String,
|
||||
|
@ -11,14 +11,11 @@ import Data.List.Utils (replace)
|
||||
import Text.Regex.Posix
|
||||
import Text.Printf
|
||||
|
||||
extractRecords = extractLinksWithText "//a[@class='roczniki']"
|
||||
>>> second (arr $ replace "\r\n " "")
|
||||
>>> first (arr ((++"tr") . init))
|
||||
>>> first (extractLinksWithText "//li/a[contains(@href,'.pdf')]")
|
||||
|
||||
extractRecords = extractLinksWithText "//a[@class='roczniki']" -- pary adres-tytuł
|
||||
>>> second (arr $ replace "\r\n " " ") -- czyścimy drugi element pary, czyli tytuł z niepotrzebnych białych znaków
|
||||
>>> first (arr ((++"tr") . init)) -- modyfikujemy pierwszy element pary, czyli adres URL
|
||||
>>> first (extractLinksWithText "//li/a[contains(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
|
||||
-- ostatecznie wyjdą trójki ((adres URL, tytuł artykułu), tytuł rocznika)
|
||||
|
||||
-- ... a tutaj te trójki przerabiamy do docelowej struktury ShadowItem
|
||||
toShadowItem :: ((String, String), String) -> ShadowItem
|
||||
toShadowItem ((url, articleTitle), yearlyTitle) =
|
||||
(defaultShadowItem url title) {
|
||||
@ -31,7 +28,7 @@ toShadowItem ((url, articleTitle), yearlyTitle) =
|
||||
date = getDate url
|
||||
|
||||
getDate url =
|
||||
case url =~~ "/(19[0-9][0-9]|20[0-9][0-9])/" :: Maybe [[String]] of
|
||||
case url =~~ "/(20[0-9][0-9])/" :: Maybe [[String]] of
|
||||
Just [[_, year]] -> year
|
||||
otherwise -> error $ "unexpected url: " ++ url
|
||||
|
||||
|
37
app/inspektoratpracy.hs
Normal file
37
app/inspektoratpracy.hs
Normal file
@ -0,0 +1,37 @@
|
||||
{-# LANGUAGE Arrows, NoMonomorphismRestriction #-}
|
||||
import ShadowLibrary.Core
|
||||
|
||||
import Text.XML.HXT.Core
|
||||
import Text.XML.HXT.XPath
|
||||
-- import Text.XML.HXT.Curl
|
||||
import Data.List
|
||||
import Data.List.Utils (replace)
|
||||
|
||||
import Text.Regex.Posix
|
||||
import Text.Printf
|
||||
|
||||
extractRecords = extractLinksWithText "//td/a[contains(@href,'.pdf')]"
|
||||
|
||||
toShadowItem :: (String, String) -> ShadowItem
|
||||
toShadowItem (url, monthlytitle) =
|
||||
(defaultShadowItem url title) {
|
||||
originalDate = Just date,
|
||||
itype = "periodical",
|
||||
format = Just "pdf",
|
||||
finalUrl = url
|
||||
}
|
||||
where title = "IP" ++ date ++ " " ++ (replace "\r" "" (replace "\n" "" (replace "\t" "" monthlytitle)))
|
||||
date = getDate $ replace "%20" " " url
|
||||
|
||||
getDate :: String -> String
|
||||
getDate url = date where
|
||||
date = url Text.Regex.Posix.=~ "(202[0-2]|20[0-1][0-9])" :: String
|
||||
|
||||
main = do
|
||||
let start = "https://www.pip.gov.pl/pl/inspektor-pracy/66546,archiwum-inspektora-pracy-.html"
|
||||
let shadowLibrary = ShadowLibrary {logoUrl=Nothing,
|
||||
lname="Inspektor Pracy",
|
||||
abbrev="InspPrac",
|
||||
lLevel=0,
|
||||
webpage=start}
|
||||
extractItemsStartingFromUrl shadowLibrary start (extractRecords >>> arr toShadowItem)
|
@ -20,6 +20,7 @@ library
|
||||
, HTTP
|
||||
, hxt
|
||||
, hxt-http
|
||||
, hxt-curl
|
||||
, hxt-xpath
|
||||
, MissingH
|
||||
, monad-logger
|
||||
@ -51,6 +52,19 @@ executable almanachmuszyny
|
||||
hs-source-dirs: app
|
||||
main-is: almanachmuszyny.hs
|
||||
ghc-options: -threaded -rtsopts -with-rtsopts=-N
|
||||
build-depends: base
|
||||
, hxt
|
||||
, hxt-xpath
|
||||
, hxt-curl
|
||||
, MissingH
|
||||
, regex-posix
|
||||
, shadow-library
|
||||
default-language: Haskell2010
|
||||
|
||||
executable inspektoratpracy
|
||||
hs-source-dirs: app
|
||||
main-is: inspektoratpracy.hs
|
||||
ghc-options: -threaded -rtsopts -with-rtsopts=-N
|
||||
build-depends: base
|
||||
, hxt
|
||||
, hxt-xpath
|
||||
|
19
stack.yaml.lock
Normal file
19
stack.yaml.lock
Normal file
@ -0,0 +1,19 @@
|
||||
# This file was autogenerated by Stack.
|
||||
# You should not edit this file by hand.
|
||||
# For more information, please see the documentation at:
|
||||
# https://docs.haskellstack.org/en/stable/lock_files
|
||||
|
||||
packages:
|
||||
- completed:
|
||||
hackage: hxt-xpath-9.1.2.2@sha256:9cd590ae93a04573db8f90fa4094625ebd97dded45da7667c577ce6b38a42900,1999
|
||||
pantry-tree:
|
||||
size: 2225
|
||||
sha256: aee2f75974e868ff429b8ff349a29667536c60397098f5dfedc968d1951511bb
|
||||
original:
|
||||
hackage: hxt-xpath-9.1.2.2
|
||||
snapshots:
|
||||
- completed:
|
||||
size: 507596
|
||||
url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/11/9.yaml
|
||||
sha256: 42f472dbf06482da1b3319241f3e3b3593a45bd7d4f537d2789f21386b9b2ad3
|
||||
original: lts-11.9
|
Loading…
Reference in New Issue
Block a user