1
0
Fork 0
mirror of https://gitlab.com/news-flash/article_scraper.git synced 2025-07-08 08:30:00 +02:00

wip: async

This commit is contained in:
Jan Lukas Gernert 2019-11-10 14:43:59 +01:00
parent 5f82872d1f
commit 4b8af0d709
3 changed files with 74 additions and 60 deletions

View file

@ -7,12 +7,13 @@ edition = "2018"
[dependencies] [dependencies]
failure = "0.1" failure = "0.1"
libxml = { git = "https://github.com/KWARC/rust-libxml.git" } libxml = { git = "https://github.com/KWARC/rust-libxml.git" }
reqwest = "0.9" reqwest = { version = "0.10.0-alpha.1", features = ["json"] }
url = "1.7" tokio = { version = "=0.2.0-alpha.6" }
url = "2.1"
regex = "1.3" regex = "1.3"
encoding_rs = "0.8" encoding_rs = "0.8"
chrono = "0.4" chrono = "0.4"
htmlescape = "0.3" htmlescape = "0.3"
base64 = "0.10" base64 = "0.11"
image = "0.22" image = "0.22"
log = "0.4" log = "0.4"

View file

@ -34,7 +34,7 @@ impl ImageDownloader {
} }
} }
pub fn download_images_from_string(&self, html: &str) -> Result<String, ImageDownloadError> { pub async fn download_images_from_string(&self, html: &str) -> Result<String, ImageDownloadError> {
let parser = Parser::default_html(); let parser = Parser::default_html();
let doc = parser.parse_string(html).map_err(|_| { let doc = parser.parse_string(html).map_err(|_| {
error!("Failed to parse HTML string"); error!("Failed to parse HTML string");
@ -46,7 +46,7 @@ impl ImageDownloader {
ImageDownloadErrorKind::HtmlParse ImageDownloadErrorKind::HtmlParse
})?; })?;
self.download_images_from_context(&xpath_ctx)?; self.download_images_from_context(&xpath_ctx).await?;
let options = SaveOptions { let options = SaveOptions {
format: false, format: false,
@ -61,19 +61,19 @@ impl ImageDownloader {
Ok(doc.to_string_with_options(options)) Ok(doc.to_string_with_options(options))
} }
pub fn download_images_from_context(&self, context: &Context) -> Result<(), ImageDownloadError> { pub async fn download_images_from_context(&self, context: &Context) -> Result<(), ImageDownloadError> {
let xpath = "//img"; let xpath = "//img";
evaluate_xpath!(context, xpath, node_vec); evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec { for mut node in node_vec {
if let Some(url) = node.get_property("src") { if let Some(url) = node.get_property("src") {
if !url.starts_with("data:") { if !url.starts_with("data:") {
if let Ok(url) = url::Url::parse(&url) { if let Ok(url) = url::Url::parse(&url) {
let parent_url = match self.check_image_parent(&node, &url) { let parent_url = match self.check_image_parent(&node, &url).await {
Ok(url) => Some(url), Ok(url) => Some(url),
Err(_) => None, Err(_) => None,
}; };
if let Ok((small_image, big_image)) = self.save_image(&url, &parent_url) { if let Ok((small_image, big_image)) = self.save_image(&url, &parent_url).await {
if let Err(_) = node.set_property("src", &small_image) { if let Err(_) = node.set_property("src", &small_image) {
return Err(ImageDownloadErrorKind::HtmlParse)?; return Err(ImageDownloadErrorKind::HtmlParse)?;
} }
@ -91,9 +91,9 @@ impl ImageDownloader {
Ok(()) Ok(())
} }
fn save_image(&self, image_url: &url::Url, parent_url: &Option<url::Url>) -> Result<(String, Option<String>), ImageDownloadError> { async fn save_image(&self, image_url: &url::Url, parent_url: &Option<url::Url>) -> Result<(String, Option<String>), ImageDownloadError> {
let mut response = self.client.get(image_url.clone()).send().map_err(|err| { let response = self.client.get(image_url.clone()).send().await.map_err(|err| {
error!("GET {} failed - {}", image_url.as_str(), err.description()); error!("GET {} failed - {}", image_url.as_str(), err.description());
err err
}).context(ImageDownloadErrorKind::Http)?; }).context(ImageDownloadErrorKind::Http)?;
@ -103,23 +103,27 @@ impl ImageDownloader {
.context(ImageDownloadErrorKind::ContentType)?; .context(ImageDownloadErrorKind::ContentType)?;
let mut content_type_big : Option<String> = None; let mut content_type_big : Option<String> = None;
let mut small_image : Vec<u8> = Vec::new(); let mut small_image = response
.bytes()
.await
.context(ImageDownloadErrorKind::IO)?
.as_ref()
.to_vec();
let mut big_image : Option<Vec<u8>> = None; let mut big_image : Option<Vec<u8>> = None;
response.copy_to(&mut small_image)
.context(ImageDownloadErrorKind::IO)?;
if let Some(parent_url) = parent_url { if let Some(parent_url) = parent_url {
let mut response_big = self.client.get(parent_url.clone()).send() let response_big = self.client.get(parent_url.clone()).send().await
.context(ImageDownloadErrorKind::Http)?; .context(ImageDownloadErrorKind::Http)?;
content_type_big = Some(ImageDownloader::check_image_content_type(&response)? content_type_big = Some(ImageDownloader::check_image_content_type(&response_big)?
.to_str() .to_str()
.context(ImageDownloadErrorKind::ContentType)? .context(ImageDownloadErrorKind::ContentType)?
.to_owned()); .to_owned());
let mut big_buffer : Vec<u8> = Vec::new(); big_image = Some(response_big
response_big.copy_to(&mut big_buffer) .bytes()
.context(ImageDownloadErrorKind::IO)?; .await
big_image = Some(big_buffer); .context(ImageDownloadErrorKind::IO)?
.to_vec());
} }
if content_type_small != "image/svg+xml" && content_type_small != "image/gif" { if content_type_small != "image/svg+xml" && content_type_small != "image/gif" {
@ -215,14 +219,14 @@ impl ImageDownloader {
} }
} }
fn check_image_parent(&self, node: &Node, child_url: &url::Url) -> Result<url::Url, ImageDownloadError> { async fn check_image_parent(&self, node: &Node, child_url: &url::Url) -> Result<url::Url, ImageDownloadError> {
if let Some(parent) = node.get_parent() { if let Some(parent) = node.get_parent() {
if parent.get_name() == "a" { if parent.get_name() == "a" {
if let Some(url) = parent.get_property("href") { if let Some(url) = parent.get_property("href") {
let parent_url = url::Url::parse(&url).context(ImageDownloadErrorKind::ParentDownload)?; let parent_url = url::Url::parse(&url).context(ImageDownloadErrorKind::ParentDownload)?;
let parent_response = self.client.head(parent_url.clone()).send().context(ImageDownloadErrorKind::ParentDownload)?; let parent_response = self.client.head(parent_url.clone()).send().await.context(ImageDownloadErrorKind::ParentDownload)?;
let _ = ImageDownloader::check_image_content_type(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?; let _ = ImageDownloader::check_image_content_type(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?;
let child_response = self.client.get(child_url.clone()).send().context(ImageDownloadErrorKind::ParentDownload)?; let child_response = self.client.get(child_url.clone()).send().await.context(ImageDownloadErrorKind::ParentDownload)?;
let parent_length = Self::get_content_lenght(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?; let parent_length = Self::get_content_lenght(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?;
let child_length = Self::get_content_lenght(&child_response).context(ImageDownloadErrorKind::ParentDownload)?; let child_length = Self::get_content_lenght(&child_response).context(ImageDownloadErrorKind::ParentDownload)?;
@ -260,12 +264,13 @@ mod tests {
use std::fs; use std::fs;
use std::io::Write; use std::io::Write;
#[test] #[tokio::test]
pub fn close_tags() { async fn close_tags() {
let image_dowloader = ImageDownloader::new((2048, 2048)); let image_dowloader = ImageDownloader::new((2048, 2048));
let hdyleaflet = fs::read_to_string(r"./resources/tests/planetGnome/fedora31.html") let hdyleaflet = fs::read_to_string(r"./resources/tests/planetGnome/fedora31.html")
.expect("Failed to read HTML"); .expect("Failed to read HTML");
let result = image_dowloader.download_images_from_string(&hdyleaflet) let result = image_dowloader.download_images_from_string(&hdyleaflet)
.await
.expect("Failed to downalod images"); .expect("Failed to downalod images");
let mut file = fs::File::create(r"./resources/tests/planetGnome/fedora31_images_downloaded.html") let mut file = fs::File::create(r"./resources/tests/planetGnome/fedora31_images_downloaded.html")
.expect("Failed to create output file"); .expect("Failed to create output file");

View file

@ -60,10 +60,10 @@ impl ArticleScraper {
}) })
} }
pub fn parse(&self, url: url::Url, download_images: bool) -> Result<Article, ScraperError> { pub async fn parse(&self, url: url::Url, download_images: bool) -> Result<Article, ScraperError> {
info!("Scraping article: {}", url.as_str()); info!("Scraping article: {}", url.as_str());
let response = self.client.head(url.clone()).send() let response = self.client.head(url.clone()).send().await
.map_err(|err| { .map_err(|err| {
error!("Failed head request to: {} - {}", url.as_str(), err.description()); error!("Failed head request to: {} - {}", url.as_str(), err.description());
err err
@ -105,7 +105,7 @@ impl ArticleScraper {
ArticleScraper::generate_head(&mut root, &document)?; ArticleScraper::generate_head(&mut root, &document)?;
self.parse_first_page(&mut article, &url, &mut root, config)?; self.parse_pages(&mut article, &url, &mut root, config).await?;
let context = Context::new(&document).map_err(|()| { let context = Context::new(&document).map_err(|()| {
error!("Failed to create xpath context for extracted article"); error!("Failed to create xpath context for extracted article");
@ -123,7 +123,7 @@ impl ArticleScraper {
} }
if download_images { if download_images {
if let Err(error) = self.image_downloader.download_images_from_context(&context) { if let Err(error) = self.image_downloader.download_images_from_context(&context).await {
error!("Downloading images failed: {}", error); error!("Downloading images failed: {}", error);
} }
} }
@ -145,9 +145,9 @@ impl ArticleScraper {
Ok(article) Ok(article)
} }
fn parse_first_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> { async fn parse_pages(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
let mut html = ArticleScraper::download(&url, &self.client)?; let mut html = ArticleScraper::download(&url, &self.client).await?;
parse_html!(html, config, xpath_ctx); parse_html!(html, config, xpath_ctx);
// check for single page link // check for single page link
@ -158,7 +158,7 @@ impl ArticleScraper {
// parse again with single page url // parse again with single page url
debug!("Single page link found {}", result); debug!("Single page link found {}", result);
let single_page_url = url::Url::parse(&result).context(ScraperErrorKind::Url)?; let single_page_url = url::Url::parse(&result).context(ScraperErrorKind::Url)?;
return self.parse_single_page(article, &single_page_url, root, config); return self.parse_single_page(article, &single_page_url, root, config).await;
} }
} }
@ -166,22 +166,27 @@ impl ArticleScraper {
ArticleScraper::strip_junk(&xpath_ctx, config, &url); ArticleScraper::strip_junk(&xpath_ctx, config, &url);
ArticleScraper::extract_body(&xpath_ctx, root, config)?; ArticleScraper::extract_body(&xpath_ctx, root, config)?;
self.check_for_next_page(&xpath_ctx, config, root) loop {
println!("loop");
if let Some(url) = self.check_for_next_page(&xpath_ctx, config) {
println!("url {}", url);
let mut html = ArticleScraper::download(&url, &self.client).await?;
parse_html!(html, config, new_xpath_ctx);
ArticleScraper::strip_junk(&new_xpath_ctx, config, &url);
ArticleScraper::extract_body(&new_xpath_ctx, root, config)?;
xpath_ctx = new_xpath_ctx;
} else {
println!("break");
break;
}
}
Ok(())
} }
fn parse_next_page(&self, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> { async fn parse_single_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
let mut html = ArticleScraper::download(&url, &self.client)?; let mut html = ArticleScraper::download(&url, &self.client).await?;
parse_html!(html, config, xpath_ctx);
ArticleScraper::strip_junk(&xpath_ctx, config, &url);
ArticleScraper::extract_body(&xpath_ctx, root, config)?;
self.check_for_next_page(&xpath_ctx, config, root)
}
fn parse_single_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
let mut html = ArticleScraper::download(&url, &self.client)?;
parse_html!(html, config, xpath_ctx); parse_html!(html, config, xpath_ctx);
ArticleScraper::extract_metadata(&xpath_ctx, config, article); ArticleScraper::extract_metadata(&xpath_ctx, config, article);
ArticleScraper::strip_junk(&xpath_ctx, config, &url); ArticleScraper::strip_junk(&xpath_ctx, config, &url);
@ -190,9 +195,9 @@ impl ArticleScraper {
Ok(()) Ok(())
} }
fn download(url: &url::Url, client: &reqwest::Client) -> Result<String, ScraperError> { async fn download(url: &url::Url, client: &reqwest::Client) -> Result<String, ScraperError> {
let mut response = client.get(url.as_str()).send() let response = client.get(url.as_str()).send().await
.map_err(|err| { .map_err(|err| {
error!("Downloading HTML failed: GET {} - {}", url.as_str(), err.description()); error!("Downloading HTML failed: GET {} - {}", url.as_str(), err.description());
err err
@ -200,13 +205,14 @@ impl ArticleScraper {
.context(ScraperErrorKind::Http)?; .context(ScraperErrorKind::Http)?;
if response.status().is_success() { if response.status().is_success() {
let text = response.text().context(ScraperErrorKind::Http)?; let headers = response.headers().clone();
let text = response.text().await.context(ScraperErrorKind::Http)?;
{ {
if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_html(&text)) { if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_html(&text)) {
return Ok(decoded_html) return Ok(decoded_html)
} }
if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_http_header(response.headers())) { if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_http_header(&headers)) {
return Ok(decoded_html) return Ok(decoded_html)
} }
} }
@ -357,7 +363,7 @@ impl ArticleScraper {
let xpath = &format!("//*[contains(@class, '{}') or contains(@id, '{}')]", id_or_class, id_or_class); let xpath = &format!("//*[contains(@class, '{}') or contains(@id, '{}')]", id_or_class, id_or_class);
evaluate_xpath!(context, xpath, node_vec); evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec { for mut node in node_vec {
node.unlink(); //node.unlink();
} }
Ok(()) Ok(())
} }
@ -441,8 +447,9 @@ impl ArticleScraper {
} }
fn get_attribute(context: &Context, xpath: &str, attribute: &str) -> Result<String, ScraperError> { fn get_attribute(context: &Context, xpath: &str, attribute: &str) -> Result<String, ScraperError> {
println!("get attribute {}", attribute);
evaluate_xpath!(context, xpath, node_vec); evaluate_xpath!(context, xpath, node_vec);
println!("found {}", node_vec.len());
xpath_result_empty!(node_vec, xpath); xpath_result_empty!(node_vec, xpath);
for node in node_vec { for node in node_vec {
if let Some(value) = node.get_attribute(attribute) { if let Some(value) = node.get_attribute(attribute) {
@ -619,18 +626,19 @@ impl ArticleScraper {
Ok(found_something) Ok(found_something)
} }
fn check_for_next_page(&self, context: &Context, config: &GrabberConfig, root: &mut Node) -> Result<(), ScraperError> { fn check_for_next_page(&self, context: &Context, config: &GrabberConfig) -> Option<url::Url> {
if let Some(next_page_xpath) = config.next_page_link.clone() { if let Some(next_page_xpath) = config.next_page_link.clone() {
if let Ok(next_page_string) = ArticleScraper::get_attribute(&context, &next_page_xpath, "href") { if let Ok(next_page_string) = ArticleScraper::get_attribute(&context, &next_page_xpath, "href") {
if let Ok(next_page_url) = url::Url::parse(&next_page_string) { if let Ok(next_page_url) = url::Url::parse(&next_page_string) {
return self.parse_next_page(&next_page_url, root, config) println!("next_page_url: {}", next_page_url);
return Some(next_page_url)
} }
} }
} }
// last page reached // last page reached
Ok(()) None
} }
fn generate_head(root: &mut Node, document: &Document) -> Result<(), ScraperError> { fn generate_head(root: &mut Node, document: &Document) -> Result<(), ScraperError> {
@ -692,28 +700,28 @@ impl ArticleScraper {
mod tests { mod tests {
use crate::*; use crate::*;
#[test] #[tokio::test]
pub fn golem() { async fn golem() {
let config_path = PathBuf::from(r"./resources/tests/golem"); let config_path = PathBuf::from(r"./resources/tests/golem");
let out_path = PathBuf::from(r"./test_output"); let out_path = PathBuf::from(r"./test_output");
let url = url::Url::parse("https://www.golem.de/news/http-error-418-fehlercode-ich-bin-eine-teekanne-darf-bleiben-1708-129460.html").unwrap(); let url = url::Url::parse("https://www.golem.de/news/http-error-418-fehlercode-ich-bin-eine-teekanne-darf-bleiben-1708-129460.html").unwrap();
let grabber = ArticleScraper::new(config_path).unwrap(); let grabber = ArticleScraper::new(config_path).unwrap();
let article = grabber.parse(url, true).unwrap(); let article = grabber.parse(url, true).await.unwrap();
article.save_html(&out_path).unwrap(); article.save_html(&out_path).unwrap();
assert_eq!(article.title, Some(String::from("HTTP Error 418: Fehlercode \"Ich bin eine Teekanne\" darf bleiben"))); assert_eq!(article.title, Some(String::from("HTTP Error 418: Fehlercode \"Ich bin eine Teekanne\" darf bleiben")));
assert_eq!(article.author, Some(String::from("Hauke Gierow"))); assert_eq!(article.author, Some(String::from("Hauke Gierow")));
} }
#[test] #[tokio::test]
pub fn phoronix() { async fn phoronix() {
let config_path = PathBuf::from(r"./resources/tests/phoronix"); let config_path = PathBuf::from(r"./resources/tests/phoronix");
let out_path = PathBuf::from(r"./test_output"); let out_path = PathBuf::from(r"./test_output");
let url = url::Url::parse("http://www.phoronix.com/scan.php?page=article&item=amazon_ec2_bare&num=1").unwrap(); let url = url::Url::parse("http://www.phoronix.com/scan.php?page=article&item=amazon_ec2_bare&num=1").unwrap();
let grabber = ArticleScraper::new(config_path).unwrap(); let grabber = ArticleScraper::new(config_path).unwrap();
let article = grabber.parse(url, true).unwrap(); let article = grabber.parse(url, true).await.unwrap();
article.save_html(&out_path).unwrap(); article.save_html(&out_path).unwrap();
assert_eq!(article.title, Some(String::from("Amazon EC2 Cloud Benchmarks Against Bare Metal Systems"))); assert_eq!(article.title, Some(String::from("Amazon EC2 Cloud Benchmarks Against Bare Metal Systems")));