1
0
Fork 0
mirror of https://gitlab.com/news-flash/article_scraper.git synced 2025-07-07 16:15:32 +02:00
This commit is contained in:
Jan Lukas Gernert 2019-11-18 05:53:34 +01:00
parent 4b8af0d709
commit 2c6bfed550
4 changed files with 80 additions and 105 deletions

View file

@ -7,7 +7,7 @@ edition = "2018"
[dependencies] [dependencies]
failure = "0.1" failure = "0.1"
libxml = { git = "https://github.com/KWARC/rust-libxml.git" } libxml = { git = "https://github.com/KWARC/rust-libxml.git" }
reqwest = { version = "0.10.0-alpha.1", features = ["json"] } reqwest = { version = "0.10.0-alpha.2", features = ["json"] }
tokio = { version = "=0.2.0-alpha.6" } tokio = { version = "=0.2.0-alpha.6" }
url = "2.1" url = "2.1"
regex = "1.3" regex = "1.3"

View file

@ -16,7 +16,7 @@ use self::error::{ImageDownloadError, ImageDownloadErrorKind};
use base64; use base64;
use std; use std;
use image; use image;
use super::ScraperErrorKind; use crate::ArticleScraper;
mod error; mod error;
@ -63,7 +63,8 @@ impl ImageDownloader {
pub async fn download_images_from_context(&self, context: &Context) -> Result<(), ImageDownloadError> { pub async fn download_images_from_context(&self, context: &Context) -> Result<(), ImageDownloadError> {
let xpath = "//img"; let xpath = "//img";
evaluate_xpath!(context, xpath, node_vec); let node_vec = ArticleScraper::evaluate_xpath(context, xpath, false)
.context(ImageDownloadErrorKind::HtmlParse)?;
for mut node in node_vec { for mut node in node_vec {
if let Some(url) = node.get_property("src") { if let Some(url) = node.get_property("src") {
if !url.starts_with("data:") { if !url.starts_with("data:") {

View file

@ -1,5 +1,3 @@
#[macro_use]
mod macros;
mod config; mod config;
mod error; mod error;
mod article; mod article;
@ -147,11 +145,10 @@ impl ArticleScraper {
async fn parse_pages(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> { async fn parse_pages(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
let mut html = ArticleScraper::download(&url, &self.client).await?; let html = ArticleScraper::download(&url, &self.client).await?;
parse_html!(html, config, xpath_ctx); let mut xpath_ctx = Self::parse_html(html, config)?;
// check for single page link // check for single page link
let mut xpath_ctx = xpath_ctx;
if let Some(xpath_single_page_link) = config.single_page_link.clone() { if let Some(xpath_single_page_link) = config.single_page_link.clone() {
debug!("Single page link xpath specified in config {}", xpath_single_page_link); debug!("Single page link xpath specified in config {}", xpath_single_page_link);
if let Ok(result) = xpath_ctx.findvalue(&xpath_single_page_link, None) { if let Ok(result) = xpath_ctx.findvalue(&xpath_single_page_link, None) {
@ -167,16 +164,12 @@ impl ArticleScraper {
ArticleScraper::extract_body(&xpath_ctx, root, config)?; ArticleScraper::extract_body(&xpath_ctx, root, config)?;
loop { loop {
println!("loop");
if let Some(url) = self.check_for_next_page(&xpath_ctx, config) { if let Some(url) = self.check_for_next_page(&xpath_ctx, config) {
println!("url {}", url); let html = ArticleScraper::download(&url, &self.client).await?;
let mut html = ArticleScraper::download(&url, &self.client).await?; xpath_ctx = Self::parse_html(html, config)?;
parse_html!(html, config, new_xpath_ctx); ArticleScraper::strip_junk(&xpath_ctx, config, &url);
ArticleScraper::strip_junk(&new_xpath_ctx, config, &url); ArticleScraper::extract_body(&xpath_ctx, root, config)?;
ArticleScraper::extract_body(&new_xpath_ctx, root, config)?;
xpath_ctx = new_xpath_ctx;
} else { } else {
println!("break");
break; break;
} }
} }
@ -184,10 +177,51 @@ impl ArticleScraper {
Ok(()) Ok(())
} }
fn parse_html(html: String, config: &GrabberConfig) -> Result<Context, ScraperError> {
// replace matches in raw html
let mut html = html;
for replace in &config.replace {
html = html.replace(&replace.to_replace, &replace.replace_with);
}
// parse html
let parser = Parser::default_html();
let doc = parser.parse_string(html.as_str()).map_err(|err| {
error!("Parsing HTML failed for downloaded HTML {:?}", err);
ScraperErrorKind::Xml
})?;
Ok(Context::new(&doc).map_err(|()| {
error!("Creating xpath context failed for downloaded HTML");
ScraperErrorKind::Xml
})?)
}
pub fn evaluate_xpath(xpath_ctx: &Context, xpath: &str, thorw_if_empty: bool) -> Result<Vec<Node>, ScraperError> {
let res = xpath_ctx.evaluate(xpath).map_err(|()| {
error!("Evaluation of xpath {} yielded no results", xpath);
println!("Evaluation of xpath {} yielded no results", xpath);
ScraperErrorKind::Xml
})?;
let node_vec = res.get_nodes_as_vec();
if node_vec.len() == 0 {
error!("Evaluation of xpath {} yielded no results", xpath);
println!("Evaluation of xpath {} yielded no results", xpath);
if thorw_if_empty {
return Err(ScraperErrorKind::Xml)?
}
}
Ok(node_vec)
}
async fn parse_single_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> { async fn parse_single_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
let mut html = ArticleScraper::download(&url, &self.client).await?; let html = ArticleScraper::download(&url, &self.client).await?;
parse_html!(html, config, xpath_ctx); let xpath_ctx = Self::parse_html(html, config)?;
ArticleScraper::extract_metadata(&xpath_ctx, config, article); ArticleScraper::extract_metadata(&xpath_ctx, config, article);
ArticleScraper::strip_junk(&xpath_ctx, config, &url); ArticleScraper::strip_junk(&xpath_ctx, config, &url);
ArticleScraper::extract_body(&xpath_ctx, root, config)?; ArticleScraper::extract_body(&xpath_ctx, root, config)?;
@ -321,9 +355,7 @@ impl ArticleScraper {
} }
fn extract_value(context: &Context, xpath: &str) -> Result<String, ScraperError> { fn extract_value(context: &Context, xpath: &str) -> Result<String, ScraperError> {
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
evaluate_xpath!(context, xpath, node_vec);
xpath_result_empty!(node_vec, xpath);
if let Some(val) = node_vec.get(0) { if let Some(val) = node_vec.get(0) {
return Ok(val.get_content()) return Ok(val.get_content())
} }
@ -332,9 +364,7 @@ impl ArticleScraper {
} }
fn extract_value_merge(context: &Context, xpath: &str) -> Result<String, ScraperError> { fn extract_value_merge(context: &Context, xpath: &str) -> Result<String, ScraperError> {
let node_vec = Self::evaluate_xpath(context, xpath, true)?;
evaluate_xpath!(context, xpath, node_vec);
xpath_result_empty!(node_vec, xpath);
let mut val = String::new(); let mut val = String::new();
for node in node_vec { for node in node_vec {
val.push_str(&node.get_content()); val.push_str(&node.get_content());
@ -344,14 +374,14 @@ impl ArticleScraper {
} }
fn strip_node(context: &Context, xpath: &String) -> Result<(), ScraperError> { fn strip_node(context: &Context, xpath: &String) -> Result<(), ScraperError> {
let mut ancestor = xpath.clone(); let mut ancestor = xpath.clone();
if ancestor.starts_with("//") { if ancestor.starts_with("//") {
ancestor = ancestor.chars().skip(2).collect(); ancestor = ancestor.chars().skip(2).collect();
} }
let query = &format!("{}[not(ancestor::{})]", xpath, ancestor); let query = &format!("{}[not(ancestor::{})]", xpath, ancestor);
evaluate_xpath!(context, query, node_vec); //println!("{}", query);
let node_vec = Self::evaluate_xpath(context, query, false)?;
for mut node in node_vec { for mut node in node_vec {
node.unlink(); node.unlink();
} }
@ -359,11 +389,11 @@ impl ArticleScraper {
} }
fn strip_id_or_class(context: &Context, id_or_class: &String) -> Result<(), ScraperError> { fn strip_id_or_class(context: &Context, id_or_class: &String) -> Result<(), ScraperError> {
let xpath = &format!("//*[contains(@class, '{}') or contains(@id, '{}')]", id_or_class, id_or_class); let xpath = &format!("//*[contains(@class, '{}') or contains(@id, '{}')]", id_or_class, id_or_class);
evaluate_xpath!(context, xpath, node_vec); //println!("{}", xpath);
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec { for mut node in node_vec {
//node.unlink(); node.unlink();
} }
Ok(()) Ok(())
} }
@ -371,7 +401,7 @@ impl ArticleScraper {
fn fix_lazy_images(context: &Context, class: &str, property_url: &str) -> Result<(), ScraperError> { fn fix_lazy_images(context: &Context, class: &str, property_url: &str) -> Result<(), ScraperError> {
let xpath = &format!("//img[contains(@class, '{}')]", class); let xpath = &format!("//img[contains(@class, '{}')]", class);
evaluate_xpath!(context, xpath, node_vec); let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec { for mut node in node_vec {
if let Some(correct_url) = node.get_property(property_url) { if let Some(correct_url) = node.get_property(property_url) {
if let Err(_) = node.set_property("src", &correct_url) { if let Err(_) = node.set_property("src", &correct_url) {
@ -385,7 +415,7 @@ impl ArticleScraper {
fn fix_iframe_size(context: &Context, site_name: &str) -> Result<(), ScraperError> { fn fix_iframe_size(context: &Context, site_name: &str) -> Result<(), ScraperError> {
let xpath = &format!("//iframe[contains(@src, '{}')]", site_name); let xpath = &format!("//iframe[contains(@src, '{}')]", site_name);
evaluate_xpath!(context, xpath, node_vec); let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec { for mut node in node_vec {
if let Some(mut parent) = node.get_parent() { if let Some(mut parent) = node.get_parent() {
if let Ok(mut video_wrapper) = parent.new_child(None, "div") { if let Ok(mut video_wrapper) = parent.new_child(None, "div") {
@ -420,7 +450,7 @@ impl ArticleScraper {
}; };
let xpath = &format!("//{}[@{}]", xpath_tag, attribute); let xpath = &format!("//{}[@{}]", xpath_tag, attribute);
evaluate_xpath!(context, xpath, node_vec); let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec { for mut node in node_vec {
if let Err(_) = node.remove_property(attribute) { if let Err(_) = node.remove_property(attribute) {
return Err(ScraperErrorKind::Xml)? return Err(ScraperErrorKind::Xml)?
@ -437,7 +467,7 @@ impl ArticleScraper {
}; };
let xpath = &format!("//{}", xpath_tag); let xpath = &format!("//{}", xpath_tag);
evaluate_xpath!(context, xpath, node_vec); let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec { for mut node in node_vec {
if let Err(_) = node.set_attribute(attribute, value) { if let Err(_) = node.set_attribute(attribute, value) {
return Err(ScraperErrorKind::Xml)? return Err(ScraperErrorKind::Xml)?
@ -447,10 +477,7 @@ impl ArticleScraper {
} }
fn get_attribute(context: &Context, xpath: &str, attribute: &str) -> Result<String, ScraperError> { fn get_attribute(context: &Context, xpath: &str, attribute: &str) -> Result<String, ScraperError> {
println!("get attribute {}", attribute); let node_vec = Self::evaluate_xpath(context, xpath, false)?;
evaluate_xpath!(context, xpath, node_vec);
println!("found {}", node_vec.len());
xpath_result_empty!(node_vec, xpath);
for node in node_vec { for node in node_vec {
if let Some(value) = node.get_attribute(attribute) { if let Some(value) = node.get_attribute(attribute) {
return Ok(value) return Ok(value)
@ -461,8 +488,7 @@ impl ArticleScraper {
} }
fn repair_urls(context: &Context, xpath: &str, attribute: &str, article_url: &url::Url) -> Result<(), ScraperError> { fn repair_urls(context: &Context, xpath: &str, attribute: &str, article_url: &url::Url) -> Result<(), ScraperError> {
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec { for mut node in node_vec {
if let Some(val) = node.get_attribute(attribute) { if let Some(val) = node.get_attribute(attribute) {
if let Err(url::ParseError::RelativeUrlWithoutBase) = url::Url::parse(&val) { if let Err(url::ParseError::RelativeUrlWithoutBase) = url::Url::parse(&val) {
@ -603,8 +629,7 @@ impl ArticleScraper {
let mut found_something = false; let mut found_something = false;
{ {
evaluate_xpath!(context, xpath, node_vec); let node_vec = Self::evaluate_xpath(context, xpath, false)?;
xpath_result_empty!(node_vec, xpath);
for mut node in node_vec { for mut node in node_vec {
if node.get_property("style").is_some() { if node.get_property("style").is_some() {
if let Err(_) = node.remove_property("style") { if let Err(_) = node.remove_property("style") {
@ -662,7 +687,7 @@ impl ArticleScraper {
// this prevents libxml from self closing non void elements such as iframe // this prevents libxml from self closing non void elements such as iframe
let xpath = "//*[not(node())]"; let xpath = "//*[not(node())]";
evaluate_xpath!(context, xpath, node_vec); let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec { for mut node in node_vec {
if node.get_name() == "meta" { if node.get_name() == "meta" {
continue continue
@ -677,7 +702,7 @@ impl ArticleScraper {
fn eliminate_noscrip_tag(context: &Context) -> Result<(), ScraperError> { fn eliminate_noscrip_tag(context: &Context) -> Result<(), ScraperError> {
let xpath = "//noscript"; let xpath = "//noscript";
evaluate_xpath!(context, xpath, node_vec); let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec { for mut node in node_vec {
if let Some(mut parent) = node.get_parent() { if let Some(mut parent) = node.get_parent() {
@ -700,19 +725,19 @@ impl ArticleScraper {
mod tests { mod tests {
use crate::*; use crate::*;
#[tokio::test] // #[tokio::test]
async fn golem() { // async fn golem() {
let config_path = PathBuf::from(r"./resources/tests/golem"); // let config_path = PathBuf::from(r"./resources/tests/golem");
let out_path = PathBuf::from(r"./test_output"); // let out_path = PathBuf::from(r"./test_output");
let url = url::Url::parse("https://www.golem.de/news/http-error-418-fehlercode-ich-bin-eine-teekanne-darf-bleiben-1708-129460.html").unwrap(); // let url = url::Url::parse("https://www.golem.de/news/http-error-418-fehlercode-ich-bin-eine-teekanne-darf-bleiben-1708-129460.html").unwrap();
let grabber = ArticleScraper::new(config_path).unwrap(); // let grabber = ArticleScraper::new(config_path).unwrap();
let article = grabber.parse(url, true).await.unwrap(); // let article = grabber.parse(url, true).await.unwrap();
article.save_html(&out_path).unwrap(); // article.save_html(&out_path).unwrap();
assert_eq!(article.title, Some(String::from("HTTP Error 418: Fehlercode \"Ich bin eine Teekanne\" darf bleiben"))); // assert_eq!(article.title, Some(String::from("HTTP Error 418: Fehlercode \"Ich bin eine Teekanne\" darf bleiben")));
assert_eq!(article.author, Some(String::from("Hauke Gierow"))); // assert_eq!(article.author, Some(String::from("Hauke Gierow")));
} // }
#[tokio::test] #[tokio::test]
async fn phoronix() { async fn phoronix() {

View file

@ -1,51 +0,0 @@
macro_rules! parse_html {
(
$html: ident,
$config: ident,
$xpath_ctx: ident
) => {
// replace matches in raw html
for replace in &$config.replace {
$html = $html.replace(&replace.to_replace, &replace.replace_with);
}
// parse html
let parser = Parser::default_html();
let doc = parser.parse_string($html.as_str()).map_err(|err| {
error!("Parsing HTML failed for downloaded HTML {:?}", err);
ScraperErrorKind::Xml
})?;
let $xpath_ctx = Context::new(&doc).map_err(|()| {
error!("Creating xpath context failed for downloaded HTML");
ScraperErrorKind::Xml
})?;
};
}
macro_rules! evaluate_xpath {
(
$context: ident,
$xpath: ident,
$node_vec: ident
) => {
let res = $context.evaluate($xpath).map_err(|()| {
error!("Evaluation of xpath {} yielded no results", $xpath);
ScraperErrorKind::Xml
})?;
let $node_vec = res.get_nodes_as_vec();
};
}
macro_rules! xpath_result_empty {
(
$node_vec: ident,
$xpath: ident
) => {
if $node_vec.len() == 0 {
error!("Evaluation of xpath {} yielded no results", $xpath);
return Err(ScraperErrorKind::Xml)?
}
};
}