mirror of
https://gitlab.com/news-flash/article_scraper.git
synced 2025-07-08 00:19:59 +02:00
Async
This commit is contained in:
parent
5f82872d1f
commit
9c35fb9fa8
4 changed files with 128 additions and 139 deletions
|
@ -7,12 +7,13 @@ edition = "2018"
|
||||||
[dependencies]
|
[dependencies]
|
||||||
failure = "0.1"
|
failure = "0.1"
|
||||||
libxml = { git = "https://github.com/KWARC/rust-libxml.git" }
|
libxml = { git = "https://github.com/KWARC/rust-libxml.git" }
|
||||||
reqwest = "0.9"
|
reqwest = { version = "0.10.0-alpha.2", features = ["json"] }
|
||||||
url = "1.7"
|
tokio = { version = "=0.2.0-alpha.6" }
|
||||||
|
url = "2.1"
|
||||||
regex = "1.3"
|
regex = "1.3"
|
||||||
encoding_rs = "0.8"
|
encoding_rs = "0.8"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
htmlescape = "0.3"
|
htmlescape = "0.3"
|
||||||
base64 = "0.10"
|
base64 = "0.11"
|
||||||
image = "0.22"
|
image = "0.22"
|
||||||
log = "0.4"
|
log = "0.4"
|
|
@ -16,7 +16,7 @@ use self::error::{ImageDownloadError, ImageDownloadErrorKind};
|
||||||
use base64;
|
use base64;
|
||||||
use std;
|
use std;
|
||||||
use image;
|
use image;
|
||||||
use super::ScraperErrorKind;
|
use crate::ArticleScraper;
|
||||||
|
|
||||||
mod error;
|
mod error;
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ impl ImageDownloader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn download_images_from_string(&self, html: &str) -> Result<String, ImageDownloadError> {
|
pub async fn download_images_from_string(&self, html: &str) -> Result<String, ImageDownloadError> {
|
||||||
let parser = Parser::default_html();
|
let parser = Parser::default_html();
|
||||||
let doc = parser.parse_string(html).map_err(|_| {
|
let doc = parser.parse_string(html).map_err(|_| {
|
||||||
error!("Failed to parse HTML string");
|
error!("Failed to parse HTML string");
|
||||||
|
@ -46,7 +46,7 @@ impl ImageDownloader {
|
||||||
ImageDownloadErrorKind::HtmlParse
|
ImageDownloadErrorKind::HtmlParse
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
self.download_images_from_context(&xpath_ctx)?;
|
self.download_images_from_context(&xpath_ctx).await?;
|
||||||
|
|
||||||
let options = SaveOptions {
|
let options = SaveOptions {
|
||||||
format: false,
|
format: false,
|
||||||
|
@ -61,19 +61,20 @@ impl ImageDownloader {
|
||||||
Ok(doc.to_string_with_options(options))
|
Ok(doc.to_string_with_options(options))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn download_images_from_context(&self, context: &Context) -> Result<(), ImageDownloadError> {
|
pub async fn download_images_from_context(&self, context: &Context) -> Result<(), ImageDownloadError> {
|
||||||
let xpath = "//img";
|
let xpath = "//img";
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
let node_vec = ArticleScraper::evaluate_xpath(context, xpath, false)
|
||||||
|
.context(ImageDownloadErrorKind::HtmlParse)?;
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
if let Some(url) = node.get_property("src") {
|
if let Some(url) = node.get_property("src") {
|
||||||
if !url.starts_with("data:") {
|
if !url.starts_with("data:") {
|
||||||
if let Ok(url) = url::Url::parse(&url) {
|
if let Ok(url) = url::Url::parse(&url) {
|
||||||
let parent_url = match self.check_image_parent(&node, &url) {
|
let parent_url = match self.check_image_parent(&node, &url).await {
|
||||||
Ok(url) => Some(url),
|
Ok(url) => Some(url),
|
||||||
Err(_) => None,
|
Err(_) => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Ok((small_image, big_image)) = self.save_image(&url, &parent_url) {
|
if let Ok((small_image, big_image)) = self.save_image(&url, &parent_url).await {
|
||||||
if let Err(_) = node.set_property("src", &small_image) {
|
if let Err(_) = node.set_property("src", &small_image) {
|
||||||
return Err(ImageDownloadErrorKind::HtmlParse)?;
|
return Err(ImageDownloadErrorKind::HtmlParse)?;
|
||||||
}
|
}
|
||||||
|
@ -91,9 +92,9 @@ impl ImageDownloader {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_image(&self, image_url: &url::Url, parent_url: &Option<url::Url>) -> Result<(String, Option<String>), ImageDownloadError> {
|
async fn save_image(&self, image_url: &url::Url, parent_url: &Option<url::Url>) -> Result<(String, Option<String>), ImageDownloadError> {
|
||||||
|
|
||||||
let mut response = self.client.get(image_url.clone()).send().map_err(|err| {
|
let response = self.client.get(image_url.clone()).send().await.map_err(|err| {
|
||||||
error!("GET {} failed - {}", image_url.as_str(), err.description());
|
error!("GET {} failed - {}", image_url.as_str(), err.description());
|
||||||
err
|
err
|
||||||
}).context(ImageDownloadErrorKind::Http)?;
|
}).context(ImageDownloadErrorKind::Http)?;
|
||||||
|
@ -103,23 +104,27 @@ impl ImageDownloader {
|
||||||
.context(ImageDownloadErrorKind::ContentType)?;
|
.context(ImageDownloadErrorKind::ContentType)?;
|
||||||
let mut content_type_big : Option<String> = None;
|
let mut content_type_big : Option<String> = None;
|
||||||
|
|
||||||
let mut small_image : Vec<u8> = Vec::new();
|
let mut small_image = response
|
||||||
|
.bytes()
|
||||||
|
.await
|
||||||
|
.context(ImageDownloadErrorKind::IO)?
|
||||||
|
.as_ref()
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
let mut big_image : Option<Vec<u8>> = None;
|
let mut big_image : Option<Vec<u8>> = None;
|
||||||
|
|
||||||
response.copy_to(&mut small_image)
|
|
||||||
.context(ImageDownloadErrorKind::IO)?;
|
|
||||||
|
|
||||||
if let Some(parent_url) = parent_url {
|
if let Some(parent_url) = parent_url {
|
||||||
let mut response_big = self.client.get(parent_url.clone()).send()
|
let response_big = self.client.get(parent_url.clone()).send().await
|
||||||
.context(ImageDownloadErrorKind::Http)?;
|
.context(ImageDownloadErrorKind::Http)?;
|
||||||
content_type_big = Some(ImageDownloader::check_image_content_type(&response)?
|
content_type_big = Some(ImageDownloader::check_image_content_type(&response_big)?
|
||||||
.to_str()
|
.to_str()
|
||||||
.context(ImageDownloadErrorKind::ContentType)?
|
.context(ImageDownloadErrorKind::ContentType)?
|
||||||
.to_owned());
|
.to_owned());
|
||||||
let mut big_buffer : Vec<u8> = Vec::new();
|
big_image = Some(response_big
|
||||||
response_big.copy_to(&mut big_buffer)
|
.bytes()
|
||||||
.context(ImageDownloadErrorKind::IO)?;
|
.await
|
||||||
big_image = Some(big_buffer);
|
.context(ImageDownloadErrorKind::IO)?
|
||||||
|
.to_vec());
|
||||||
}
|
}
|
||||||
|
|
||||||
if content_type_small != "image/svg+xml" && content_type_small != "image/gif" {
|
if content_type_small != "image/svg+xml" && content_type_small != "image/gif" {
|
||||||
|
@ -215,14 +220,14 @@ impl ImageDownloader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_image_parent(&self, node: &Node, child_url: &url::Url) -> Result<url::Url, ImageDownloadError> {
|
async fn check_image_parent(&self, node: &Node, child_url: &url::Url) -> Result<url::Url, ImageDownloadError> {
|
||||||
if let Some(parent) = node.get_parent() {
|
if let Some(parent) = node.get_parent() {
|
||||||
if parent.get_name() == "a" {
|
if parent.get_name() == "a" {
|
||||||
if let Some(url) = parent.get_property("href") {
|
if let Some(url) = parent.get_property("href") {
|
||||||
let parent_url = url::Url::parse(&url).context(ImageDownloadErrorKind::ParentDownload)?;
|
let parent_url = url::Url::parse(&url).context(ImageDownloadErrorKind::ParentDownload)?;
|
||||||
let parent_response = self.client.head(parent_url.clone()).send().context(ImageDownloadErrorKind::ParentDownload)?;
|
let parent_response = self.client.head(parent_url.clone()).send().await.context(ImageDownloadErrorKind::ParentDownload)?;
|
||||||
let _ = ImageDownloader::check_image_content_type(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?;
|
let _ = ImageDownloader::check_image_content_type(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?;
|
||||||
let child_response = self.client.get(child_url.clone()).send().context(ImageDownloadErrorKind::ParentDownload)?;
|
let child_response = self.client.get(child_url.clone()).send().await.context(ImageDownloadErrorKind::ParentDownload)?;
|
||||||
let parent_length = Self::get_content_lenght(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?;
|
let parent_length = Self::get_content_lenght(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?;
|
||||||
let child_length = Self::get_content_lenght(&child_response).context(ImageDownloadErrorKind::ParentDownload)?;
|
let child_length = Self::get_content_lenght(&child_response).context(ImageDownloadErrorKind::ParentDownload)?;
|
||||||
|
|
||||||
|
@ -260,12 +265,13 @@ mod tests {
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
#[test]
|
#[tokio::test]
|
||||||
pub fn close_tags() {
|
async fn close_tags() {
|
||||||
let image_dowloader = ImageDownloader::new((2048, 2048));
|
let image_dowloader = ImageDownloader::new((2048, 2048));
|
||||||
let hdyleaflet = fs::read_to_string(r"./resources/tests/planetGnome/fedora31.html")
|
let hdyleaflet = fs::read_to_string(r"./resources/tests/planetGnome/fedora31.html")
|
||||||
.expect("Failed to read HTML");
|
.expect("Failed to read HTML");
|
||||||
let result = image_dowloader.download_images_from_string(&hdyleaflet)
|
let result = image_dowloader.download_images_from_string(&hdyleaflet)
|
||||||
|
.await
|
||||||
.expect("Failed to downalod images");
|
.expect("Failed to downalod images");
|
||||||
let mut file = fs::File::create(r"./resources/tests/planetGnome/fedora31_images_downloaded.html")
|
let mut file = fs::File::create(r"./resources/tests/planetGnome/fedora31_images_downloaded.html")
|
||||||
.expect("Failed to create output file");
|
.expect("Failed to create output file");
|
||||||
|
|
155
src/lib.rs
155
src/lib.rs
|
@ -1,5 +1,3 @@
|
||||||
#[macro_use]
|
|
||||||
mod macros;
|
|
||||||
mod config;
|
mod config;
|
||||||
mod error;
|
mod error;
|
||||||
mod article;
|
mod article;
|
||||||
|
@ -60,10 +58,10 @@ impl ArticleScraper {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse(&self, url: url::Url, download_images: bool) -> Result<Article, ScraperError> {
|
pub async fn parse(&self, url: url::Url, download_images: bool) -> Result<Article, ScraperError> {
|
||||||
|
|
||||||
info!("Scraping article: {}", url.as_str());
|
info!("Scraping article: {}", url.as_str());
|
||||||
let response = self.client.head(url.clone()).send()
|
let response = self.client.head(url.clone()).send().await
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
error!("Failed head request to: {} - {}", url.as_str(), err.description());
|
error!("Failed head request to: {} - {}", url.as_str(), err.description());
|
||||||
err
|
err
|
||||||
|
@ -105,7 +103,7 @@ impl ArticleScraper {
|
||||||
|
|
||||||
ArticleScraper::generate_head(&mut root, &document)?;
|
ArticleScraper::generate_head(&mut root, &document)?;
|
||||||
|
|
||||||
self.parse_first_page(&mut article, &url, &mut root, config)?;
|
self.parse_pages(&mut article, &url, &mut root, config).await?;
|
||||||
|
|
||||||
let context = Context::new(&document).map_err(|()| {
|
let context = Context::new(&document).map_err(|()| {
|
||||||
error!("Failed to create xpath context for extracted article");
|
error!("Failed to create xpath context for extracted article");
|
||||||
|
@ -123,7 +121,7 @@ impl ArticleScraper {
|
||||||
}
|
}
|
||||||
|
|
||||||
if download_images {
|
if download_images {
|
||||||
if let Err(error) = self.image_downloader.download_images_from_context(&context) {
|
if let Err(error) = self.image_downloader.download_images_from_context(&context).await {
|
||||||
error!("Downloading images failed: {}", error);
|
error!("Downloading images failed: {}", error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -145,20 +143,20 @@ impl ArticleScraper {
|
||||||
Ok(article)
|
Ok(article)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_first_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
|
async fn parse_pages(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
|
||||||
|
|
||||||
let mut html = ArticleScraper::download(&url, &self.client)?;
|
let html = ArticleScraper::download(&url, &self.client).await?;
|
||||||
parse_html!(html, config, xpath_ctx);
|
let mut document = Self::parse_html(html, config)?;
|
||||||
|
let mut xpath_ctx = Self::get_xpath_ctx(&document)?;
|
||||||
|
|
||||||
// check for single page link
|
// check for single page link
|
||||||
let mut xpath_ctx = xpath_ctx;
|
|
||||||
if let Some(xpath_single_page_link) = config.single_page_link.clone() {
|
if let Some(xpath_single_page_link) = config.single_page_link.clone() {
|
||||||
debug!("Single page link xpath specified in config {}", xpath_single_page_link);
|
debug!("Single page link xpath specified in config {}", xpath_single_page_link);
|
||||||
if let Ok(result) = xpath_ctx.findvalue(&xpath_single_page_link, None) {
|
if let Ok(result) = xpath_ctx.findvalue(&xpath_single_page_link, None) {
|
||||||
// parse again with single page url
|
// parse again with single page url
|
||||||
debug!("Single page link found {}", result);
|
debug!("Single page link found {}", result);
|
||||||
let single_page_url = url::Url::parse(&result).context(ScraperErrorKind::Url)?;
|
let single_page_url = url::Url::parse(&result).context(ScraperErrorKind::Url)?;
|
||||||
return self.parse_single_page(article, &single_page_url, root, config);
|
return self.parse_single_page(article, &single_page_url, root, config).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,23 +164,67 @@ impl ArticleScraper {
|
||||||
ArticleScraper::strip_junk(&xpath_ctx, config, &url);
|
ArticleScraper::strip_junk(&xpath_ctx, config, &url);
|
||||||
ArticleScraper::extract_body(&xpath_ctx, root, config)?;
|
ArticleScraper::extract_body(&xpath_ctx, root, config)?;
|
||||||
|
|
||||||
self.check_for_next_page(&xpath_ctx, config, root)
|
loop {
|
||||||
|
if let Some(url) = self.check_for_next_page(&xpath_ctx, config) {
|
||||||
|
let html = ArticleScraper::download(&url, &self.client).await?;
|
||||||
|
document = Self::parse_html(html, config)?;
|
||||||
|
xpath_ctx = Self::get_xpath_ctx(&document)?;
|
||||||
|
ArticleScraper::strip_junk(&xpath_ctx, config, &url);
|
||||||
|
ArticleScraper::extract_body(&xpath_ctx, root, config)?;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_next_page(&self, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
|
fn parse_html(html: String, config: &GrabberConfig) -> Result<Document, ScraperError> {
|
||||||
|
// replace matches in raw html
|
||||||
let mut html = ArticleScraper::download(&url, &self.client)?;
|
|
||||||
parse_html!(html, config, xpath_ctx);
|
|
||||||
ArticleScraper::strip_junk(&xpath_ctx, config, &url);
|
|
||||||
ArticleScraper::extract_body(&xpath_ctx, root, config)?;
|
|
||||||
|
|
||||||
self.check_for_next_page(&xpath_ctx, config, root)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_single_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
|
|
||||||
|
|
||||||
let mut html = ArticleScraper::download(&url, &self.client)?;
|
let mut html = html;
|
||||||
parse_html!(html, config, xpath_ctx);
|
for replace in &config.replace {
|
||||||
|
html = html.replace(&replace.to_replace, &replace.replace_with);
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse html
|
||||||
|
let parser = Parser::default_html();
|
||||||
|
Ok(parser.parse_string(html.as_str()).map_err(|err| {
|
||||||
|
error!("Parsing HTML failed for downloaded HTML {:?}", err);
|
||||||
|
ScraperErrorKind::Xml
|
||||||
|
})?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_xpath_ctx(doc: &Document) -> Result<Context, ScraperError> {
|
||||||
|
Ok(Context::new(&doc).map_err(|()| {
|
||||||
|
error!("Creating xpath context failed for downloaded HTML");
|
||||||
|
ScraperErrorKind::Xml
|
||||||
|
})?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn evaluate_xpath(xpath_ctx: &Context, xpath: &str, thorw_if_empty: bool) -> Result<Vec<Node>, ScraperError> {
|
||||||
|
let res = xpath_ctx.evaluate(xpath).map_err(|()| {
|
||||||
|
error!("Evaluation of xpath {} yielded no results", xpath);
|
||||||
|
ScraperErrorKind::Xml
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let node_vec = res.get_nodes_as_vec();
|
||||||
|
|
||||||
|
if node_vec.len() == 0 {
|
||||||
|
error!("Evaluation of xpath {} yielded no results", xpath);
|
||||||
|
if thorw_if_empty {
|
||||||
|
return Err(ScraperErrorKind::Xml)?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(node_vec)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn parse_single_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
|
||||||
|
|
||||||
|
let html = ArticleScraper::download(&url, &self.client).await?;
|
||||||
|
let document = Self::parse_html(html, config)?;
|
||||||
|
let xpath_ctx = Self::get_xpath_ctx(&document)?;
|
||||||
ArticleScraper::extract_metadata(&xpath_ctx, config, article);
|
ArticleScraper::extract_metadata(&xpath_ctx, config, article);
|
||||||
ArticleScraper::strip_junk(&xpath_ctx, config, &url);
|
ArticleScraper::strip_junk(&xpath_ctx, config, &url);
|
||||||
ArticleScraper::extract_body(&xpath_ctx, root, config)?;
|
ArticleScraper::extract_body(&xpath_ctx, root, config)?;
|
||||||
|
@ -190,9 +232,9 @@ impl ArticleScraper {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn download(url: &url::Url, client: &reqwest::Client) -> Result<String, ScraperError> {
|
async fn download(url: &url::Url, client: &reqwest::Client) -> Result<String, ScraperError> {
|
||||||
|
|
||||||
let mut response = client.get(url.as_str()).send()
|
let response = client.get(url.as_str()).send().await
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
error!("Downloading HTML failed: GET {} - {}", url.as_str(), err.description());
|
error!("Downloading HTML failed: GET {} - {}", url.as_str(), err.description());
|
||||||
err
|
err
|
||||||
|
@ -200,13 +242,14 @@ impl ArticleScraper {
|
||||||
.context(ScraperErrorKind::Http)?;
|
.context(ScraperErrorKind::Http)?;
|
||||||
|
|
||||||
if response.status().is_success() {
|
if response.status().is_success() {
|
||||||
let text = response.text().context(ScraperErrorKind::Http)?;
|
let headers = response.headers().clone();
|
||||||
|
let text = response.text().await.context(ScraperErrorKind::Http)?;
|
||||||
{
|
{
|
||||||
if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_html(&text)) {
|
if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_html(&text)) {
|
||||||
return Ok(decoded_html)
|
return Ok(decoded_html)
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_http_header(response.headers())) {
|
if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_http_header(&headers)) {
|
||||||
return Ok(decoded_html)
|
return Ok(decoded_html)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -315,9 +358,7 @@ impl ArticleScraper {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extract_value(context: &Context, xpath: &str) -> Result<String, ScraperError> {
|
fn extract_value(context: &Context, xpath: &str) -> Result<String, ScraperError> {
|
||||||
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
|
||||||
xpath_result_empty!(node_vec, xpath);
|
|
||||||
if let Some(val) = node_vec.get(0) {
|
if let Some(val) = node_vec.get(0) {
|
||||||
return Ok(val.get_content())
|
return Ok(val.get_content())
|
||||||
}
|
}
|
||||||
|
@ -326,9 +367,7 @@ impl ArticleScraper {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extract_value_merge(context: &Context, xpath: &str) -> Result<String, ScraperError> {
|
fn extract_value_merge(context: &Context, xpath: &str) -> Result<String, ScraperError> {
|
||||||
|
let node_vec = Self::evaluate_xpath(context, xpath, true)?;
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
|
||||||
xpath_result_empty!(node_vec, xpath);
|
|
||||||
let mut val = String::new();
|
let mut val = String::new();
|
||||||
for node in node_vec {
|
for node in node_vec {
|
||||||
val.push_str(&node.get_content());
|
val.push_str(&node.get_content());
|
||||||
|
@ -338,14 +377,13 @@ impl ArticleScraper {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn strip_node(context: &Context, xpath: &String) -> Result<(), ScraperError> {
|
fn strip_node(context: &Context, xpath: &String) -> Result<(), ScraperError> {
|
||||||
|
|
||||||
let mut ancestor = xpath.clone();
|
let mut ancestor = xpath.clone();
|
||||||
if ancestor.starts_with("//") {
|
if ancestor.starts_with("//") {
|
||||||
ancestor = ancestor.chars().skip(2).collect();
|
ancestor = ancestor.chars().skip(2).collect();
|
||||||
}
|
}
|
||||||
|
|
||||||
let query = &format!("{}[not(ancestor::{})]", xpath, ancestor);
|
let query = &format!("{}[not(ancestor::{})]", xpath, ancestor);
|
||||||
evaluate_xpath!(context, query, node_vec);
|
let node_vec = Self::evaluate_xpath(context, query, false)?;
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
node.unlink();
|
node.unlink();
|
||||||
}
|
}
|
||||||
|
@ -353,9 +391,8 @@ impl ArticleScraper {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn strip_id_or_class(context: &Context, id_or_class: &String) -> Result<(), ScraperError> {
|
fn strip_id_or_class(context: &Context, id_or_class: &String) -> Result<(), ScraperError> {
|
||||||
|
|
||||||
let xpath = &format!("//*[contains(@class, '{}') or contains(@id, '{}')]", id_or_class, id_or_class);
|
let xpath = &format!("//*[contains(@class, '{}') or contains(@id, '{}')]", id_or_class, id_or_class);
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
node.unlink();
|
node.unlink();
|
||||||
}
|
}
|
||||||
|
@ -365,7 +402,7 @@ impl ArticleScraper {
|
||||||
fn fix_lazy_images(context: &Context, class: &str, property_url: &str) -> Result<(), ScraperError> {
|
fn fix_lazy_images(context: &Context, class: &str, property_url: &str) -> Result<(), ScraperError> {
|
||||||
|
|
||||||
let xpath = &format!("//img[contains(@class, '{}')]", class);
|
let xpath = &format!("//img[contains(@class, '{}')]", class);
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
if let Some(correct_url) = node.get_property(property_url) {
|
if let Some(correct_url) = node.get_property(property_url) {
|
||||||
if let Err(_) = node.set_property("src", &correct_url) {
|
if let Err(_) = node.set_property("src", &correct_url) {
|
||||||
|
@ -379,7 +416,7 @@ impl ArticleScraper {
|
||||||
fn fix_iframe_size(context: &Context, site_name: &str) -> Result<(), ScraperError> {
|
fn fix_iframe_size(context: &Context, site_name: &str) -> Result<(), ScraperError> {
|
||||||
|
|
||||||
let xpath = &format!("//iframe[contains(@src, '{}')]", site_name);
|
let xpath = &format!("//iframe[contains(@src, '{}')]", site_name);
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
if let Some(mut parent) = node.get_parent() {
|
if let Some(mut parent) = node.get_parent() {
|
||||||
if let Ok(mut video_wrapper) = parent.new_child(None, "div") {
|
if let Ok(mut video_wrapper) = parent.new_child(None, "div") {
|
||||||
|
@ -414,7 +451,7 @@ impl ArticleScraper {
|
||||||
};
|
};
|
||||||
|
|
||||||
let xpath = &format!("//{}[@{}]", xpath_tag, attribute);
|
let xpath = &format!("//{}[@{}]", xpath_tag, attribute);
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
if let Err(_) = node.remove_property(attribute) {
|
if let Err(_) = node.remove_property(attribute) {
|
||||||
return Err(ScraperErrorKind::Xml)?
|
return Err(ScraperErrorKind::Xml)?
|
||||||
|
@ -431,7 +468,7 @@ impl ArticleScraper {
|
||||||
};
|
};
|
||||||
|
|
||||||
let xpath = &format!("//{}", xpath_tag);
|
let xpath = &format!("//{}", xpath_tag);
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
if let Err(_) = node.set_attribute(attribute, value) {
|
if let Err(_) = node.set_attribute(attribute, value) {
|
||||||
return Err(ScraperErrorKind::Xml)?
|
return Err(ScraperErrorKind::Xml)?
|
||||||
|
@ -441,9 +478,7 @@ impl ArticleScraper {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_attribute(context: &Context, xpath: &str, attribute: &str) -> Result<String, ScraperError> {
|
fn get_attribute(context: &Context, xpath: &str, attribute: &str) -> Result<String, ScraperError> {
|
||||||
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
|
||||||
xpath_result_empty!(node_vec, xpath);
|
|
||||||
for node in node_vec {
|
for node in node_vec {
|
||||||
if let Some(value) = node.get_attribute(attribute) {
|
if let Some(value) = node.get_attribute(attribute) {
|
||||||
return Ok(value)
|
return Ok(value)
|
||||||
|
@ -454,8 +489,7 @@ impl ArticleScraper {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn repair_urls(context: &Context, xpath: &str, attribute: &str, article_url: &url::Url) -> Result<(), ScraperError> {
|
fn repair_urls(context: &Context, xpath: &str, attribute: &str, article_url: &url::Url) -> Result<(), ScraperError> {
|
||||||
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
if let Some(val) = node.get_attribute(attribute) {
|
if let Some(val) = node.get_attribute(attribute) {
|
||||||
if let Err(url::ParseError::RelativeUrlWithoutBase) = url::Url::parse(&val) {
|
if let Err(url::ParseError::RelativeUrlWithoutBase) = url::Url::parse(&val) {
|
||||||
|
@ -596,8 +630,7 @@ impl ArticleScraper {
|
||||||
|
|
||||||
let mut found_something = false;
|
let mut found_something = false;
|
||||||
{
|
{
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
xpath_result_empty!(node_vec, xpath);
|
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
if node.get_property("style").is_some() {
|
if node.get_property("style").is_some() {
|
||||||
if let Err(_) = node.remove_property("style") {
|
if let Err(_) = node.remove_property("style") {
|
||||||
|
@ -619,18 +652,18 @@ impl ArticleScraper {
|
||||||
Ok(found_something)
|
Ok(found_something)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_for_next_page(&self, context: &Context, config: &GrabberConfig, root: &mut Node) -> Result<(), ScraperError> {
|
fn check_for_next_page(&self, context: &Context, config: &GrabberConfig) -> Option<url::Url> {
|
||||||
|
|
||||||
if let Some(next_page_xpath) = config.next_page_link.clone() {
|
if let Some(next_page_xpath) = config.next_page_link.clone() {
|
||||||
if let Ok(next_page_string) = ArticleScraper::get_attribute(&context, &next_page_xpath, "href") {
|
if let Ok(next_page_string) = ArticleScraper::get_attribute(&context, &next_page_xpath, "href") {
|
||||||
if let Ok(next_page_url) = url::Url::parse(&next_page_string) {
|
if let Ok(next_page_url) = url::Url::parse(&next_page_string) {
|
||||||
return self.parse_next_page(&next_page_url, root, config)
|
return Some(next_page_url)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// last page reached
|
// last page reached
|
||||||
Ok(())
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_head(root: &mut Node, document: &Document) -> Result<(), ScraperError> {
|
fn generate_head(root: &mut Node, document: &Document) -> Result<(), ScraperError> {
|
||||||
|
@ -654,7 +687,7 @@ impl ArticleScraper {
|
||||||
// this prevents libxml from self closing non void elements such as iframe
|
// this prevents libxml from self closing non void elements such as iframe
|
||||||
|
|
||||||
let xpath = "//*[not(node())]";
|
let xpath = "//*[not(node())]";
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
if node.get_name() == "meta" {
|
if node.get_name() == "meta" {
|
||||||
continue
|
continue
|
||||||
|
@ -669,7 +702,7 @@ impl ArticleScraper {
|
||||||
fn eliminate_noscrip_tag(context: &Context) -> Result<(), ScraperError> {
|
fn eliminate_noscrip_tag(context: &Context) -> Result<(), ScraperError> {
|
||||||
|
|
||||||
let xpath = "//noscript";
|
let xpath = "//noscript";
|
||||||
evaluate_xpath!(context, xpath, node_vec);
|
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
|
||||||
|
|
||||||
for mut node in node_vec {
|
for mut node in node_vec {
|
||||||
if let Some(mut parent) = node.get_parent() {
|
if let Some(mut parent) = node.get_parent() {
|
||||||
|
@ -692,28 +725,28 @@ impl ArticleScraper {
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
#[test]
|
#[tokio::test]
|
||||||
pub fn golem() {
|
async fn golem() {
|
||||||
let config_path = PathBuf::from(r"./resources/tests/golem");
|
let config_path = PathBuf::from(r"./resources/tests/golem");
|
||||||
let out_path = PathBuf::from(r"./test_output");
|
let out_path = PathBuf::from(r"./test_output");
|
||||||
let url = url::Url::parse("https://www.golem.de/news/http-error-418-fehlercode-ich-bin-eine-teekanne-darf-bleiben-1708-129460.html").unwrap();
|
let url = url::Url::parse("https://www.golem.de/news/http-error-418-fehlercode-ich-bin-eine-teekanne-darf-bleiben-1708-129460.html").unwrap();
|
||||||
|
|
||||||
let grabber = ArticleScraper::new(config_path).unwrap();
|
let grabber = ArticleScraper::new(config_path).unwrap();
|
||||||
let article = grabber.parse(url, true).unwrap();
|
let article = grabber.parse(url, true).await.unwrap();
|
||||||
article.save_html(&out_path).unwrap();
|
article.save_html(&out_path).unwrap();
|
||||||
|
|
||||||
assert_eq!(article.title, Some(String::from("HTTP Error 418: Fehlercode \"Ich bin eine Teekanne\" darf bleiben")));
|
assert_eq!(article.title, Some(String::from("HTTP Error 418: Fehlercode \"Ich bin eine Teekanne\" darf bleiben")));
|
||||||
assert_eq!(article.author, Some(String::from("Hauke Gierow")));
|
assert_eq!(article.author, Some(String::from("Hauke Gierow")));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[tokio::test]
|
||||||
pub fn phoronix() {
|
async fn phoronix() {
|
||||||
let config_path = PathBuf::from(r"./resources/tests/phoronix");
|
let config_path = PathBuf::from(r"./resources/tests/phoronix");
|
||||||
let out_path = PathBuf::from(r"./test_output");
|
let out_path = PathBuf::from(r"./test_output");
|
||||||
let url = url::Url::parse("http://www.phoronix.com/scan.php?page=article&item=amazon_ec2_bare&num=1").unwrap();
|
let url = url::Url::parse("http://www.phoronix.com/scan.php?page=article&item=amazon_ec2_bare&num=1").unwrap();
|
||||||
|
|
||||||
let grabber = ArticleScraper::new(config_path).unwrap();
|
let grabber = ArticleScraper::new(config_path).unwrap();
|
||||||
let article = grabber.parse(url, true).unwrap();
|
let article = grabber.parse(url, true).await.unwrap();
|
||||||
article.save_html(&out_path).unwrap();
|
article.save_html(&out_path).unwrap();
|
||||||
|
|
||||||
assert_eq!(article.title, Some(String::from("Amazon EC2 Cloud Benchmarks Against Bare Metal Systems")));
|
assert_eq!(article.title, Some(String::from("Amazon EC2 Cloud Benchmarks Against Bare Metal Systems")));
|
||||||
|
|
|
@ -1,51 +0,0 @@
|
||||||
macro_rules! parse_html {
|
|
||||||
(
|
|
||||||
$html: ident,
|
|
||||||
$config: ident,
|
|
||||||
$xpath_ctx: ident
|
|
||||||
) => {
|
|
||||||
// replace matches in raw html
|
|
||||||
for replace in &$config.replace {
|
|
||||||
$html = $html.replace(&replace.to_replace, &replace.replace_with);
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse html
|
|
||||||
let parser = Parser::default_html();
|
|
||||||
let doc = parser.parse_string($html.as_str()).map_err(|err| {
|
|
||||||
error!("Parsing HTML failed for downloaded HTML {:?}", err);
|
|
||||||
ScraperErrorKind::Xml
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let $xpath_ctx = Context::new(&doc).map_err(|()| {
|
|
||||||
error!("Creating xpath context failed for downloaded HTML");
|
|
||||||
ScraperErrorKind::Xml
|
|
||||||
})?;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! evaluate_xpath {
|
|
||||||
(
|
|
||||||
$context: ident,
|
|
||||||
$xpath: ident,
|
|
||||||
$node_vec: ident
|
|
||||||
) => {
|
|
||||||
let res = $context.evaluate($xpath).map_err(|()| {
|
|
||||||
error!("Evaluation of xpath {} yielded no results", $xpath);
|
|
||||||
ScraperErrorKind::Xml
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let $node_vec = res.get_nodes_as_vec();
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! xpath_result_empty {
|
|
||||||
(
|
|
||||||
$node_vec: ident,
|
|
||||||
$xpath: ident
|
|
||||||
) => {
|
|
||||||
if $node_vec.len() == 0 {
|
|
||||||
error!("Evaluation of xpath {} yielded no results", $xpath);
|
|
||||||
return Err(ScraperErrorKind::Xml)?
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
Loading…
Add table
Add a link
Reference in a new issue