From 9c35fb9fa8a14e4b75eb0d2d6364f1b75c55f4f6 Mon Sep 17 00:00:00 2001 From: Jan Lukas Gernert Date: Mon, 16 Dec 2019 11:36:59 +0000 Subject: [PATCH] Async --- Cargo.toml | 7 ++- src/images/mod.rs | 54 +++++++++------- src/lib.rs | 155 ++++++++++++++++++++++++++++------------------ src/macros.rs | 51 --------------- 4 files changed, 128 insertions(+), 139 deletions(-) delete mode 100644 src/macros.rs diff --git a/Cargo.toml b/Cargo.toml index f732ba8..b423f30 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,12 +7,13 @@ edition = "2018" [dependencies] failure = "0.1" libxml = { git = "https://github.com/KWARC/rust-libxml.git" } -reqwest = "0.9" -url = "1.7" +reqwest = { version = "0.10.0-alpha.2", features = ["json"] } +tokio = { version = "=0.2.0-alpha.6" } +url = "2.1" regex = "1.3" encoding_rs = "0.8" chrono = "0.4" htmlescape = "0.3" -base64 = "0.10" +base64 = "0.11" image = "0.22" log = "0.4" \ No newline at end of file diff --git a/src/images/mod.rs b/src/images/mod.rs index 65a5734..e2d2b54 100644 --- a/src/images/mod.rs +++ b/src/images/mod.rs @@ -16,7 +16,7 @@ use self::error::{ImageDownloadError, ImageDownloadErrorKind}; use base64; use std; use image; -use super::ScraperErrorKind; +use crate::ArticleScraper; mod error; @@ -34,7 +34,7 @@ impl ImageDownloader { } } - pub fn download_images_from_string(&self, html: &str) -> Result { + pub async fn download_images_from_string(&self, html: &str) -> Result { let parser = Parser::default_html(); let doc = parser.parse_string(html).map_err(|_| { error!("Failed to parse HTML string"); @@ -46,7 +46,7 @@ impl ImageDownloader { ImageDownloadErrorKind::HtmlParse })?; - self.download_images_from_context(&xpath_ctx)?; + self.download_images_from_context(&xpath_ctx).await?; let options = SaveOptions { format: false, @@ -61,19 +61,20 @@ impl ImageDownloader { Ok(doc.to_string_with_options(options)) } - pub fn download_images_from_context(&self, context: &Context) -> Result<(), ImageDownloadError> { + pub async fn download_images_from_context(&self, context: &Context) -> Result<(), ImageDownloadError> { let xpath = "//img"; - evaluate_xpath!(context, xpath, node_vec); + let node_vec = ArticleScraper::evaluate_xpath(context, xpath, false) + .context(ImageDownloadErrorKind::HtmlParse)?; for mut node in node_vec { if let Some(url) = node.get_property("src") { if !url.starts_with("data:") { if let Ok(url) = url::Url::parse(&url) { - let parent_url = match self.check_image_parent(&node, &url) { + let parent_url = match self.check_image_parent(&node, &url).await { Ok(url) => Some(url), Err(_) => None, }; - if let Ok((small_image, big_image)) = self.save_image(&url, &parent_url) { + if let Ok((small_image, big_image)) = self.save_image(&url, &parent_url).await { if let Err(_) = node.set_property("src", &small_image) { return Err(ImageDownloadErrorKind::HtmlParse)?; } @@ -91,9 +92,9 @@ impl ImageDownloader { Ok(()) } - fn save_image(&self, image_url: &url::Url, parent_url: &Option) -> Result<(String, Option), ImageDownloadError> { + async fn save_image(&self, image_url: &url::Url, parent_url: &Option) -> Result<(String, Option), ImageDownloadError> { - let mut response = self.client.get(image_url.clone()).send().map_err(|err| { + let response = self.client.get(image_url.clone()).send().await.map_err(|err| { error!("GET {} failed - {}", image_url.as_str(), err.description()); err }).context(ImageDownloadErrorKind::Http)?; @@ -103,23 +104,27 @@ impl ImageDownloader { .context(ImageDownloadErrorKind::ContentType)?; let mut content_type_big : Option = None; - let mut small_image : Vec = Vec::new(); + let mut small_image = response + .bytes() + .await + .context(ImageDownloadErrorKind::IO)? + .as_ref() + .to_vec(); + let mut big_image : Option> = None; - response.copy_to(&mut small_image) - .context(ImageDownloadErrorKind::IO)?; - if let Some(parent_url) = parent_url { - let mut response_big = self.client.get(parent_url.clone()).send() + let response_big = self.client.get(parent_url.clone()).send().await .context(ImageDownloadErrorKind::Http)?; - content_type_big = Some(ImageDownloader::check_image_content_type(&response)? + content_type_big = Some(ImageDownloader::check_image_content_type(&response_big)? .to_str() .context(ImageDownloadErrorKind::ContentType)? .to_owned()); - let mut big_buffer : Vec = Vec::new(); - response_big.copy_to(&mut big_buffer) - .context(ImageDownloadErrorKind::IO)?; - big_image = Some(big_buffer); + big_image = Some(response_big + .bytes() + .await + .context(ImageDownloadErrorKind::IO)? + .to_vec()); } if content_type_small != "image/svg+xml" && content_type_small != "image/gif" { @@ -215,14 +220,14 @@ impl ImageDownloader { } } - fn check_image_parent(&self, node: &Node, child_url: &url::Url) -> Result { + async fn check_image_parent(&self, node: &Node, child_url: &url::Url) -> Result { if let Some(parent) = node.get_parent() { if parent.get_name() == "a" { if let Some(url) = parent.get_property("href") { let parent_url = url::Url::parse(&url).context(ImageDownloadErrorKind::ParentDownload)?; - let parent_response = self.client.head(parent_url.clone()).send().context(ImageDownloadErrorKind::ParentDownload)?; + let parent_response = self.client.head(parent_url.clone()).send().await.context(ImageDownloadErrorKind::ParentDownload)?; let _ = ImageDownloader::check_image_content_type(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?; - let child_response = self.client.get(child_url.clone()).send().context(ImageDownloadErrorKind::ParentDownload)?; + let child_response = self.client.get(child_url.clone()).send().await.context(ImageDownloadErrorKind::ParentDownload)?; let parent_length = Self::get_content_lenght(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?; let child_length = Self::get_content_lenght(&child_response).context(ImageDownloadErrorKind::ParentDownload)?; @@ -260,12 +265,13 @@ mod tests { use std::fs; use std::io::Write; - #[test] - pub fn close_tags() { + #[tokio::test] + async fn close_tags() { let image_dowloader = ImageDownloader::new((2048, 2048)); let hdyleaflet = fs::read_to_string(r"./resources/tests/planetGnome/fedora31.html") .expect("Failed to read HTML"); let result = image_dowloader.download_images_from_string(&hdyleaflet) + .await .expect("Failed to downalod images"); let mut file = fs::File::create(r"./resources/tests/planetGnome/fedora31_images_downloaded.html") .expect("Failed to create output file"); diff --git a/src/lib.rs b/src/lib.rs index 963a69e..a331c25 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,3 @@ -#[macro_use] -mod macros; mod config; mod error; mod article; @@ -60,10 +58,10 @@ impl ArticleScraper { }) } - pub fn parse(&self, url: url::Url, download_images: bool) -> Result { + pub async fn parse(&self, url: url::Url, download_images: bool) -> Result { info!("Scraping article: {}", url.as_str()); - let response = self.client.head(url.clone()).send() + let response = self.client.head(url.clone()).send().await .map_err(|err| { error!("Failed head request to: {} - {}", url.as_str(), err.description()); err @@ -105,7 +103,7 @@ impl ArticleScraper { ArticleScraper::generate_head(&mut root, &document)?; - self.parse_first_page(&mut article, &url, &mut root, config)?; + self.parse_pages(&mut article, &url, &mut root, config).await?; let context = Context::new(&document).map_err(|()| { error!("Failed to create xpath context for extracted article"); @@ -123,7 +121,7 @@ impl ArticleScraper { } if download_images { - if let Err(error) = self.image_downloader.download_images_from_context(&context) { + if let Err(error) = self.image_downloader.download_images_from_context(&context).await { error!("Downloading images failed: {}", error); } } @@ -145,20 +143,20 @@ impl ArticleScraper { Ok(article) } - fn parse_first_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> { + async fn parse_pages(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> { - let mut html = ArticleScraper::download(&url, &self.client)?; - parse_html!(html, config, xpath_ctx); + let html = ArticleScraper::download(&url, &self.client).await?; + let mut document = Self::parse_html(html, config)?; + let mut xpath_ctx = Self::get_xpath_ctx(&document)?; // check for single page link - let mut xpath_ctx = xpath_ctx; if let Some(xpath_single_page_link) = config.single_page_link.clone() { debug!("Single page link xpath specified in config {}", xpath_single_page_link); if let Ok(result) = xpath_ctx.findvalue(&xpath_single_page_link, None) { // parse again with single page url debug!("Single page link found {}", result); let single_page_url = url::Url::parse(&result).context(ScraperErrorKind::Url)?; - return self.parse_single_page(article, &single_page_url, root, config); + return self.parse_single_page(article, &single_page_url, root, config).await; } } @@ -166,23 +164,67 @@ impl ArticleScraper { ArticleScraper::strip_junk(&xpath_ctx, config, &url); ArticleScraper::extract_body(&xpath_ctx, root, config)?; - self.check_for_next_page(&xpath_ctx, config, root) + loop { + if let Some(url) = self.check_for_next_page(&xpath_ctx, config) { + let html = ArticleScraper::download(&url, &self.client).await?; + document = Self::parse_html(html, config)?; + xpath_ctx = Self::get_xpath_ctx(&document)?; + ArticleScraper::strip_junk(&xpath_ctx, config, &url); + ArticleScraper::extract_body(&xpath_ctx, root, config)?; + } else { + break; + } + } + + Ok(()) } - fn parse_next_page(&self, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> { - - let mut html = ArticleScraper::download(&url, &self.client)?; - parse_html!(html, config, xpath_ctx); - ArticleScraper::strip_junk(&xpath_ctx, config, &url); - ArticleScraper::extract_body(&xpath_ctx, root, config)?; - - self.check_for_next_page(&xpath_ctx, config, root) - } - - fn parse_single_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> { + fn parse_html(html: String, config: &GrabberConfig) -> Result { + // replace matches in raw html - let mut html = ArticleScraper::download(&url, &self.client)?; - parse_html!(html, config, xpath_ctx); + let mut html = html; + for replace in &config.replace { + html = html.replace(&replace.to_replace, &replace.replace_with); + } + + // parse html + let parser = Parser::default_html(); + Ok(parser.parse_string(html.as_str()).map_err(|err| { + error!("Parsing HTML failed for downloaded HTML {:?}", err); + ScraperErrorKind::Xml + })?) + } + + fn get_xpath_ctx(doc: &Document) -> Result { + Ok(Context::new(&doc).map_err(|()| { + error!("Creating xpath context failed for downloaded HTML"); + ScraperErrorKind::Xml + })?) + } + + pub fn evaluate_xpath(xpath_ctx: &Context, xpath: &str, thorw_if_empty: bool) -> Result, ScraperError> { + let res = xpath_ctx.evaluate(xpath).map_err(|()| { + error!("Evaluation of xpath {} yielded no results", xpath); + ScraperErrorKind::Xml + })?; + + let node_vec = res.get_nodes_as_vec(); + + if node_vec.len() == 0 { + error!("Evaluation of xpath {} yielded no results", xpath); + if thorw_if_empty { + return Err(ScraperErrorKind::Xml)? + } + } + + Ok(node_vec) + } + + async fn parse_single_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> { + + let html = ArticleScraper::download(&url, &self.client).await?; + let document = Self::parse_html(html, config)?; + let xpath_ctx = Self::get_xpath_ctx(&document)?; ArticleScraper::extract_metadata(&xpath_ctx, config, article); ArticleScraper::strip_junk(&xpath_ctx, config, &url); ArticleScraper::extract_body(&xpath_ctx, root, config)?; @@ -190,9 +232,9 @@ impl ArticleScraper { Ok(()) } - fn download(url: &url::Url, client: &reqwest::Client) -> Result { + async fn download(url: &url::Url, client: &reqwest::Client) -> Result { - let mut response = client.get(url.as_str()).send() + let response = client.get(url.as_str()).send().await .map_err(|err| { error!("Downloading HTML failed: GET {} - {}", url.as_str(), err.description()); err @@ -200,13 +242,14 @@ impl ArticleScraper { .context(ScraperErrorKind::Http)?; if response.status().is_success() { - let text = response.text().context(ScraperErrorKind::Http)?; + let headers = response.headers().clone(); + let text = response.text().await.context(ScraperErrorKind::Http)?; { if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_html(&text)) { return Ok(decoded_html) } - if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_http_header(response.headers())) { + if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_http_header(&headers)) { return Ok(decoded_html) } } @@ -315,9 +358,7 @@ impl ArticleScraper { } fn extract_value(context: &Context, xpath: &str) -> Result { - - evaluate_xpath!(context, xpath, node_vec); - xpath_result_empty!(node_vec, xpath); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; if let Some(val) = node_vec.get(0) { return Ok(val.get_content()) } @@ -326,9 +367,7 @@ impl ArticleScraper { } fn extract_value_merge(context: &Context, xpath: &str) -> Result { - - evaluate_xpath!(context, xpath, node_vec); - xpath_result_empty!(node_vec, xpath); + let node_vec = Self::evaluate_xpath(context, xpath, true)?; let mut val = String::new(); for node in node_vec { val.push_str(&node.get_content()); @@ -338,14 +377,13 @@ impl ArticleScraper { } fn strip_node(context: &Context, xpath: &String) -> Result<(), ScraperError> { - let mut ancestor = xpath.clone(); if ancestor.starts_with("//") { ancestor = ancestor.chars().skip(2).collect(); } let query = &format!("{}[not(ancestor::{})]", xpath, ancestor); - evaluate_xpath!(context, query, node_vec); + let node_vec = Self::evaluate_xpath(context, query, false)?; for mut node in node_vec { node.unlink(); } @@ -353,9 +391,8 @@ impl ArticleScraper { } fn strip_id_or_class(context: &Context, id_or_class: &String) -> Result<(), ScraperError> { - let xpath = &format!("//*[contains(@class, '{}') or contains(@id, '{}')]", id_or_class, id_or_class); - evaluate_xpath!(context, xpath, node_vec); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; for mut node in node_vec { node.unlink(); } @@ -365,7 +402,7 @@ impl ArticleScraper { fn fix_lazy_images(context: &Context, class: &str, property_url: &str) -> Result<(), ScraperError> { let xpath = &format!("//img[contains(@class, '{}')]", class); - evaluate_xpath!(context, xpath, node_vec); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; for mut node in node_vec { if let Some(correct_url) = node.get_property(property_url) { if let Err(_) = node.set_property("src", &correct_url) { @@ -379,7 +416,7 @@ impl ArticleScraper { fn fix_iframe_size(context: &Context, site_name: &str) -> Result<(), ScraperError> { let xpath = &format!("//iframe[contains(@src, '{}')]", site_name); - evaluate_xpath!(context, xpath, node_vec); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; for mut node in node_vec { if let Some(mut parent) = node.get_parent() { if let Ok(mut video_wrapper) = parent.new_child(None, "div") { @@ -414,7 +451,7 @@ impl ArticleScraper { }; let xpath = &format!("//{}[@{}]", xpath_tag, attribute); - evaluate_xpath!(context, xpath, node_vec); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; for mut node in node_vec { if let Err(_) = node.remove_property(attribute) { return Err(ScraperErrorKind::Xml)? @@ -431,7 +468,7 @@ impl ArticleScraper { }; let xpath = &format!("//{}", xpath_tag); - evaluate_xpath!(context, xpath, node_vec); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; for mut node in node_vec { if let Err(_) = node.set_attribute(attribute, value) { return Err(ScraperErrorKind::Xml)? @@ -441,9 +478,7 @@ impl ArticleScraper { } fn get_attribute(context: &Context, xpath: &str, attribute: &str) -> Result { - - evaluate_xpath!(context, xpath, node_vec); - xpath_result_empty!(node_vec, xpath); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; for node in node_vec { if let Some(value) = node.get_attribute(attribute) { return Ok(value) @@ -454,8 +489,7 @@ impl ArticleScraper { } fn repair_urls(context: &Context, xpath: &str, attribute: &str, article_url: &url::Url) -> Result<(), ScraperError> { - - evaluate_xpath!(context, xpath, node_vec); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; for mut node in node_vec { if let Some(val) = node.get_attribute(attribute) { if let Err(url::ParseError::RelativeUrlWithoutBase) = url::Url::parse(&val) { @@ -596,8 +630,7 @@ impl ArticleScraper { let mut found_something = false; { - evaluate_xpath!(context, xpath, node_vec); - xpath_result_empty!(node_vec, xpath); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; for mut node in node_vec { if node.get_property("style").is_some() { if let Err(_) = node.remove_property("style") { @@ -619,18 +652,18 @@ impl ArticleScraper { Ok(found_something) } - fn check_for_next_page(&self, context: &Context, config: &GrabberConfig, root: &mut Node) -> Result<(), ScraperError> { + fn check_for_next_page(&self, context: &Context, config: &GrabberConfig) -> Option { if let Some(next_page_xpath) = config.next_page_link.clone() { if let Ok(next_page_string) = ArticleScraper::get_attribute(&context, &next_page_xpath, "href") { if let Ok(next_page_url) = url::Url::parse(&next_page_string) { - return self.parse_next_page(&next_page_url, root, config) + return Some(next_page_url) } } } // last page reached - Ok(()) + None } fn generate_head(root: &mut Node, document: &Document) -> Result<(), ScraperError> { @@ -654,7 +687,7 @@ impl ArticleScraper { // this prevents libxml from self closing non void elements such as iframe let xpath = "//*[not(node())]"; - evaluate_xpath!(context, xpath, node_vec); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; for mut node in node_vec { if node.get_name() == "meta" { continue @@ -669,7 +702,7 @@ impl ArticleScraper { fn eliminate_noscrip_tag(context: &Context) -> Result<(), ScraperError> { let xpath = "//noscript"; - evaluate_xpath!(context, xpath, node_vec); + let node_vec = Self::evaluate_xpath(context, xpath, false)?; for mut node in node_vec { if let Some(mut parent) = node.get_parent() { @@ -692,28 +725,28 @@ impl ArticleScraper { mod tests { use crate::*; - #[test] - pub fn golem() { + #[tokio::test] + async fn golem() { let config_path = PathBuf::from(r"./resources/tests/golem"); let out_path = PathBuf::from(r"./test_output"); let url = url::Url::parse("https://www.golem.de/news/http-error-418-fehlercode-ich-bin-eine-teekanne-darf-bleiben-1708-129460.html").unwrap(); let grabber = ArticleScraper::new(config_path).unwrap(); - let article = grabber.parse(url, true).unwrap(); + let article = grabber.parse(url, true).await.unwrap(); article.save_html(&out_path).unwrap(); assert_eq!(article.title, Some(String::from("HTTP Error 418: Fehlercode \"Ich bin eine Teekanne\" darf bleiben"))); assert_eq!(article.author, Some(String::from("Hauke Gierow"))); } - #[test] - pub fn phoronix() { + #[tokio::test] + async fn phoronix() { let config_path = PathBuf::from(r"./resources/tests/phoronix"); let out_path = PathBuf::from(r"./test_output"); let url = url::Url::parse("http://www.phoronix.com/scan.php?page=article&item=amazon_ec2_bare&num=1").unwrap(); let grabber = ArticleScraper::new(config_path).unwrap(); - let article = grabber.parse(url, true).unwrap(); + let article = grabber.parse(url, true).await.unwrap(); article.save_html(&out_path).unwrap(); assert_eq!(article.title, Some(String::from("Amazon EC2 Cloud Benchmarks Against Bare Metal Systems"))); diff --git a/src/macros.rs b/src/macros.rs deleted file mode 100644 index c51fe16..0000000 --- a/src/macros.rs +++ /dev/null @@ -1,51 +0,0 @@ -macro_rules! parse_html { - ( - $html: ident, - $config: ident, - $xpath_ctx: ident - ) => { - // replace matches in raw html - for replace in &$config.replace { - $html = $html.replace(&replace.to_replace, &replace.replace_with); - } - - // parse html - let parser = Parser::default_html(); - let doc = parser.parse_string($html.as_str()).map_err(|err| { - error!("Parsing HTML failed for downloaded HTML {:?}", err); - ScraperErrorKind::Xml - })?; - - let $xpath_ctx = Context::new(&doc).map_err(|()| { - error!("Creating xpath context failed for downloaded HTML"); - ScraperErrorKind::Xml - })?; - }; -} - -macro_rules! evaluate_xpath { - ( - $context: ident, - $xpath: ident, - $node_vec: ident - ) => { - let res = $context.evaluate($xpath).map_err(|()| { - error!("Evaluation of xpath {} yielded no results", $xpath); - ScraperErrorKind::Xml - })?; - - let $node_vec = res.get_nodes_as_vec(); - }; -} - -macro_rules! xpath_result_empty { - ( - $node_vec: ident, - $xpath: ident - ) => { - if $node_vec.len() == 0 { - error!("Evaluation of xpath {} yielded no results", $xpath); - return Err(ScraperErrorKind::Xml)? - } - }; -} \ No newline at end of file