Compare commits

...

8 Commits

Author SHA1 Message Date
Dominik
b66496c640 Final Update
-> fixed the "title" variable
2021-03-31 23:39:30 +02:00
Dominik
1d4cb55557 Updated "README" 2021-03-30 22:45:49 +02:00
05c993ec51 added Readme 2021-03-30 22:36:00 +02:00
Dominik
03ffdeb1f7 Version 1.0 2021-03-30 22:32:28 +02:00
Dominik
d8e664eba4 Final before fix 2021-03-27 22:36:30 +01:00
Dominik
6ee937aa12 Merge branch 'withcurl' of git://gonito.net/twilight-library 2021-03-27 22:26:55 +01:00
Dominik
5cf2c07416 My own .hs file (aneks.hs) 2021-03-27 22:26:33 +01:00
107793cbf1 Change to Curl 2019-03-17 21:52:13 +01:00
6 changed files with 100 additions and 8 deletions

11
README.md Normal file
View File

@ -0,0 +1,11 @@
# Zadanie "robot haskell"
## Przykładowy output:
```ShadowItem {url = Just "https://aneks.kulturaliberalna.pl/wp-content/uploads/2016/02/51%C3%94%C3%87%C3%B452-With-Watermark.pdf", title = "Aneks Nr 51\8211\&52 1988", itype = "periodical", originalDate = Just "2016", creator = Nothing, format = Just "pdf", lang = Just "pol", finalUrl = "https://aneks.kulturaliberalna.pl/wp-content/uploads/2016/02/51%C3%94%C3%87%C3%B452-With-Watermark.pdf", description = Nothing}```
## Ekstrakcja info o .pdf ze strony https://aneks.kulturaliberalna.pl/archiwum-aneksu/:
```haskell
extractRecords = extractLinksWithText "//a[contains(@title,'Aneks') and contains(text(),'Nr')]"
>>> second (arr $ replace "\n" "")
>>> first (extractLinksWithText "//div/a[contains(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
-- ostatecznie wyjdą trójki? ((Link, tekst: "Wyświetl cały numer"), Numer Magazynu)```

View File

@ -33,7 +33,7 @@ import Data.Tree.NTree.TypeDefs
import Data.Maybe
import Control.Monad.Trans
import Text.XML.HXT.XPath
-- import Text.XML.HXT.Curl
import Text.XML.HXT.Curl
import Text.XML.HXT.HTTP
import Text.Regex.TDFA
@ -64,8 +64,8 @@ downloadDocument = readFromDocument [withParseHTML yes,
withEncodingErrors no,
withPreserveComment yes,
withStrictInput yes,
withHTTP []
-- withCurl [("curl--user-agent","AMU Digital Libraries Indexing Agent")]
-- withHTTP []
withCurl [("curl--user-agent","AMU Digital Libraries Indexing Agent")]
]
downloadDocumentWithEncoding enc = readFromDocument [withParseHTML yes,
@ -73,13 +73,13 @@ downloadDocumentWithEncoding enc = readFromDocument [withParseHTML yes,
withEncodingErrors no,
withPreserveComment yes,
withInputEncoding enc,
withHTTP []]
-- withCurl []]
-- withHTTP []]
withCurl []]
downloadXmlDocument = readFromDocument [withWarnings no,
withEncodingErrors no,
withHTTP []]
-- withCurl [] ]
-- withHTTP []]
withCurl [] ]
data ShadowLibrary = ShadowLibrary { logoUrl :: Maybe String,

View File

@ -4,7 +4,7 @@ import ShadowLibrary.Core
import Text.XML.HXT.Core
import Text.XML.HXT.XPath
-- import Text.XML.HXT.Curl
--import Text.XML.HXT.Curl
import Data.List
import Data.List.Utils (replace)

46
app/aneks.hs Normal file
View File

@ -0,0 +1,46 @@
{-# LANGUAGE Arrows, NoMonomorphismRestriction #-}
import ShadowLibrary.Core
import Text.XML.HXT.Core
import Text.XML.HXT.XPath
import Text.XML.HXT.Curl
import Data.List
import Data.List.Utils (replace)
import Text.Regex.Posix
import Text.Printf
extractRecords = extractLinksWithText "//a[contains(@title,'Aneks') and contains(text(),'Nr')]"
>>> second (arr $ replace "\n" "")
>>> first (extractLinksWithText "//div/a[contains(@href,'.pdf')]") -- pobieramy stronę z adresu URL i wyciągamy linki z tej strony pasujące do wyrażenia XPathowego
-- ostatecznie wyjdą trójki? ((Link, tekst: "Wyświetl cały numer"), Numer Magazynu)
-- ... a tutaj te trójki przerabiamy do docelowej struktury ShadowItem
toShadowItem :: ((String, String), String) -> ShadowItem
toShadowItem ((url, blank), yearlyTitle) =
(defaultShadowItem url title) {
originalDate = Just date,
itype = "periodical",
format = Just "pdf",
finalUrl = url
}
where title = "Aneks " ++ yearlyTitle
date = getDate url
getDate url =
case url =~~ "/(19[0-9][0-9]|20[0-9][0-9])/" :: Maybe [[String]] of
Just [[_, year]] -> year
otherwise -> error $ "unexpected url: " ++ url
main = do
let start = "https://aneks.kulturaliberalna.pl/archiwum-aneksu/"
let shadowLibrary = ShadowLibrary {logoUrl=Nothing,
lname="Archiwum Aneksu",
abbrev="ArchAnek",
lLevel=0,
webpage=start}
extractItemsStartingFromUrl shadowLibrary start (extractRecords >>> arr toShadowItem)

View File

@ -20,6 +20,7 @@ library
, HTTP
, hxt
, hxt-http
, hxt-curl
, hxt-xpath
, MissingH
, monad-logger
@ -54,12 +55,27 @@ executable almanachmuszyny
build-depends: base
, hxt
, hxt-xpath
, hxt-curl
, MissingH
, regex-posix
, shadow-library
default-language: Haskell2010
executable aneks
hs-source-dirs: app
main-is: aneks.hs
ghc-options: -threaded -rtsopts -with-rtsopts=-N
build-depends: base
, hxt
, hxt-xpath
, hxt-curl
, MissingH
, regex-posix
, shadow-library
default-language: Haskell2010
source-repository head
type: git
location: https://github.com/name/project

19
stack.yaml.lock Normal file
View File

@ -0,0 +1,19 @@
# This file was autogenerated by Stack.
# You should not edit this file by hand.
# For more information, please see the documentation at:
# https://docs.haskellstack.org/en/stable/lock_files
packages:
- completed:
hackage: hxt-xpath-9.1.2.2@sha256:9cd590ae93a04573db8f90fa4094625ebd97dded45da7667c577ce6b38a42900,1999
pantry-tree:
size: 2225
sha256: aee2f75974e868ff429b8ff349a29667536c60397098f5dfedc968d1951511bb
original:
hackage: hxt-xpath-9.1.2.2
snapshots:
- completed:
size: 507596
url: https://raw.githubusercontent.com/commercialhaskell/stackage-snapshots/master/lts/11/9.yaml
sha256: 42f472dbf06482da1b3319241f3e3b3593a45bd7d4f537d2789f21386b9b2ad3
original: lts-11.9