1
0
Fork 0
mirror of https://gitlab.com/news-flash/article_scraper.git synced 2025-07-08 16:40:00 +02:00

load config files in background thread

This commit is contained in:
Jan Lukas Gernert 2020-01-26 21:44:26 +01:00
parent 2cac8a2678
commit f570873aba
8 changed files with 480 additions and 326 deletions

View file

@ -1,69 +1,78 @@
mod article;
mod config;
mod error;
mod article;
pub mod images;
use reqwest;
use url;
use regex;
use log::{
error,
debug,
info,
warn,
};
use self::error::{ScraperError, ScraperErrorKind};
use crate::article::Article;
use libxml::parser::Parser;
use libxml::xpath::Context;
use libxml::tree::{
Document,
Node,
SaveOptions,
};
use std::path::PathBuf;
use std::ops::Index;
use failure::ResultExt;
use std::error::Error;
use crate::config::{
GrabberConfig,
ConfigCollection
};
use encoding_rs::{
Encoding,
};
use chrono::NaiveDateTime;
use std::str::FromStr;
use crate::config::{ConfigCollection, GrabberConfig};
use crate::images::ImageDownloader;
use self::error::{
ScraperError,
ScraperErrorKind
};
use chrono::NaiveDateTime;
use encoding_rs::Encoding;
use failure::ResultExt;
use libxml::parser::Parser;
use libxml::tree::{Document, Node, SaveOptions};
use libxml::xpath::Context;
use log::{debug, error, info, warn};
use regex;
use reqwest;
use std::collections;
use std::error::Error;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::{Arc, RwLock};
use std::thread;
use url;
pub struct ArticleScraper {
pub image_downloader: ImageDownloader,
config_files: ConfigCollection,
config_files: Arc<RwLock<Option<ConfigCollection>>>,
client: reqwest::Client,
}
impl ArticleScraper {
pub fn new(config_path: PathBuf) -> Result<ArticleScraper, ScraperError> {
let config_files = Arc::new(RwLock::new(None));
let config_files = GrabberConfig::parse_directory(&config_path).context(ScraperErrorKind::Config)?;
let locked_config_files = config_files.clone();
thread::spawn(move || {
if let Ok(config_files) = GrabberConfig::parse_directory(&config_path) {
locked_config_files
.write()
.expect("Failed to lock config file cache")
.replace(config_files);
} else {
locked_config_files
.write()
.expect("Failed to lock config file cache")
.replace(collections::HashMap::new());
}
});
Ok(ArticleScraper {
image_downloader: ImageDownloader::new((2048, 2048)),
config_files: config_files,
config_files,
client: reqwest::Client::new(),
})
}
pub async fn parse(&self, url: url::Url, download_images: bool) -> Result<Article, ScraperError> {
pub async fn parse(
&self,
url: url::Url,
download_images: bool,
) -> Result<Article, ScraperError> {
info!("Scraping article: {}", url.as_str());
let response = self.client.head(url.clone()).send().await
let response = self
.client
.head(url.clone())
.send()
.await
.map_err(|err| {
error!("Failed head request to: {} - {}", url.as_str(), err.description());
error!(
"Failed head request to: {} - {}",
url.as_str(),
err.description()
);
err
})
.context(ScraperErrorKind::Http)?;
@ -77,7 +86,7 @@ impl ArticleScraper {
// check if we are dealing with text/html
if !ArticleScraper::check_content_type(&response)? {
return Err(ScraperErrorKind::ContentType)?
return Err(ScraperErrorKind::ContentType)?;
}
// check if we have a config for the url
@ -91,19 +100,16 @@ impl ArticleScraper {
html: None,
};
let mut document = Document::new().map_err(|()| {
ScraperErrorKind::Xml
})?;
let mut document = Document::new().map_err(|()| ScraperErrorKind::Xml)?;
let mut root = Node::new("article", None, &document).map_err(|()| {
ScraperErrorKind::Xml
})?;
let mut root = Node::new("article", None, &document).map_err(|()| ScraperErrorKind::Xml)?;
document.set_root_element(&root);
ArticleScraper::generate_head(&mut root, &document)?;
self.parse_pages(&mut article, &url, &mut root, config).await?;
self.parse_pages(&mut article, &url, &mut root, &config)
.await?;
let context = Context::new(&document).map_err(|()| {
error!("Failed to create xpath context for extracted article");
@ -112,16 +118,20 @@ impl ArticleScraper {
if let Err(error) = ArticleScraper::prevent_self_closing_tags(&context) {
error!("Preventing self closing tags failed - {}", error);
return Err(error)
return Err(error);
}
if let Err(error) = ArticleScraper::eliminate_noscrip_tag(&context) {
error!("Eliminating <noscript> tag failed - {}", error);
return Err(error)
return Err(error);
}
if download_images {
if let Err(error) = self.image_downloader.download_images_from_context(&context).await {
if let Err(error) = self
.image_downloader
.download_images_from_context(&context)
.await
{
error!("Downloading images failed: {}", error);
}
}
@ -139,24 +149,34 @@ impl ArticleScraper {
};
let html = document.to_string_with_options(options);
article.html = Some(html);
Ok(article)
}
async fn parse_pages(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
async fn parse_pages(
&self,
article: &mut Article,
url: &url::Url,
root: &mut Node,
config: &GrabberConfig,
) -> Result<(), ScraperError> {
let html = ArticleScraper::download(&url, &self.client).await?;
let mut document = Self::parse_html(html, config)?;
let mut xpath_ctx = Self::get_xpath_ctx(&document)?;
// check for single page link
if let Some(xpath_single_page_link) = config.single_page_link.clone() {
debug!("Single page link xpath specified in config {}", xpath_single_page_link);
debug!(
"Single page link xpath specified in config {}",
xpath_single_page_link
);
if let Ok(result) = xpath_ctx.findvalue(&xpath_single_page_link, None) {
// parse again with single page url
debug!("Single page link found {}", result);
let single_page_url = url::Url::parse(&result).context(ScraperErrorKind::Url)?;
return self.parse_single_page(article, &single_page_url, root, config).await;
return self
.parse_single_page(article, &single_page_url, root, config)
.await;
}
}
@ -181,7 +201,7 @@ impl ArticleScraper {
fn parse_html(html: String, config: &GrabberConfig) -> Result<Document, ScraperError> {
// replace matches in raw html
let mut html = html;
for replace in &config.replace {
html = html.replace(&replace.to_replace, &replace.replace_with);
@ -202,7 +222,11 @@ impl ArticleScraper {
})?)
}
pub fn evaluate_xpath(xpath_ctx: &Context, xpath: &str, thorw_if_empty: bool) -> Result<Vec<Node>, ScraperError> {
pub fn evaluate_xpath(
xpath_ctx: &Context,
xpath: &str,
thorw_if_empty: bool,
) -> Result<Vec<Node>, ScraperError> {
let res = xpath_ctx.evaluate(xpath).map_err(|()| {
error!("Evaluation of xpath {} yielded no results", xpath);
ScraperErrorKind::Xml
@ -213,15 +237,20 @@ impl ArticleScraper {
if node_vec.len() == 0 {
error!("Evaluation of xpath {} yielded no results", xpath);
if thorw_if_empty {
return Err(ScraperErrorKind::Xml)?
return Err(ScraperErrorKind::Xml)?;
}
}
Ok(node_vec)
}
async fn parse_single_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
async fn parse_single_page(
&self,
article: &mut Article,
url: &url::Url,
root: &mut Node,
config: &GrabberConfig,
) -> Result<(), ScraperError> {
let html = ArticleScraper::download(&url, &self.client).await?;
let document = Self::parse_html(html, config)?;
let xpath_ctx = Self::get_xpath_ctx(&document)?;
@ -233,10 +262,16 @@ impl ArticleScraper {
}
async fn download(url: &url::Url, client: &reqwest::Client) -> Result<String, ScraperError> {
let response = client.get(url.as_str()).send().await
let response = client
.get(url.as_str())
.send()
.await
.map_err(|err| {
error!("Downloading HTML failed: GET {} - {}", url.as_str(), err.description());
error!(
"Downloading HTML failed: GET {} - {}",
url.as_str(),
err.description()
);
err
})
.context(ScraperErrorKind::Http)?;
@ -245,30 +280,35 @@ impl ArticleScraper {
let headers = response.headers().clone();
let text = response.text().await.context(ScraperErrorKind::Http)?;
{
if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_html(&text)) {
return Ok(decoded_html)
if let Some(decoded_html) = ArticleScraper::decode_html(
&text,
ArticleScraper::get_encoding_from_html(&text),
) {
return Ok(decoded_html);
}
if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_http_header(&headers)) {
return Ok(decoded_html)
if let Some(decoded_html) = ArticleScraper::decode_html(
&text,
ArticleScraper::get_encoding_from_http_header(&headers),
) {
return Ok(decoded_html);
}
}
warn!("No encoding of HTML detected - assuming utf-8");
return Ok(text)
return Ok(text);
}
Err(ScraperErrorKind::Http)?
}
fn get_encoding_from_http_header(headers: &reqwest::header::HeaderMap) -> Option<&str> {
if let Some(content_type) = headers.get(reqwest::header::CONTENT_TYPE) {
if let Ok(content_type) = content_type.to_str() {
let regex = regex::Regex::new(r#"charset=([^"']+)"#).unwrap();
if let Some(captures) = regex.captures(content_type) {
if let Some(regex_match) = captures.get(1) {
return Some(regex_match.as_str())
return Some(regex_match.as_str());
}
}
}
@ -280,20 +320,19 @@ impl ArticleScraper {
let regex = regex::Regex::new(r#"<meta.*?charset=([^"']+)"#).unwrap();
if let Some(captures) = regex.captures(html) {
if let Some(regex_match) = captures.get(1) {
return Some(regex_match.as_str())
return Some(regex_match.as_str());
}
}
None
}
fn decode_html(html: &str, encoding: Option<&str>) -> Option<String> {
if let Some(encoding) = encoding {
if let Some(encoding) = Encoding::for_label(encoding.as_bytes()) {
let (decoded_html, _, invalid_chars) = encoding.decode(html.as_bytes());
if !invalid_chars {
return Some(decoded_html.into_owned())
return Some(decoded_html.into_owned());
}
}
warn!("Could not decode HTML. Encoding: {}", encoding);
@ -301,46 +340,49 @@ impl ArticleScraper {
None
}
fn get_grabber_config(&self, url: &url::Url) -> Result<&GrabberConfig, ScraperError> {
let config_name = match url.host_str()
{
fn get_grabber_config(&self, url: &url::Url) -> Result<GrabberConfig, ScraperError> {
let config_name = match url.host_str() {
Some(name) => {
let mut name = name;
if name.starts_with("www.") {
name = &name[4..]
}
name
},
}
None => {
error!("Getting config failed due to bad Url");
return Err(ScraperErrorKind::Config)?
},
return Err(ScraperErrorKind::Config)?;
}
};
let config_name = config_name.to_owned() + ".txt";
if !self.config_files.contains_key(&config_name) {
error!("No config file of the name {} fount", config_name);
Err(ScraperErrorKind::Config)?
if let Some(config_files) = &*self.config_files.read().unwrap() {
match config_files.get(&config_name) {
Some(config) => return Ok(config.clone()),
None => {
error!("No config file of the name {} fount", config_name);
Err(ScraperErrorKind::Config)?
}
}
} else {
error!("Config files have not been parsed yet.");
return Err(ScraperErrorKind::Config)?;
}
Ok(self.config_files.index(&config_name))
}
fn check_content_type(response: &reqwest::Response) -> Result<bool, ScraperError> {
if response.status().is_success() {
if let Some(content_type) = response.headers().get(reqwest::header::CONTENT_TYPE) {
if let Ok(content_type) = content_type.to_str() {
if content_type.contains("text/html") {
return Ok(true)
return Ok(true);
}
}
}
error!("Content type is not text/HTML");
return Ok(false)
return Ok(false);
}
error!("Failed to determine content type");
@ -348,10 +390,9 @@ impl ArticleScraper {
}
fn check_redirect(response: &reqwest::Response) -> Option<url::Url> {
if response.status() == reqwest::StatusCode::PERMANENT_REDIRECT {
debug!("Article url redirects to {}", response.url().as_str());
return Some(response.url().clone())
return Some(response.url().clone());
}
None
@ -360,9 +401,9 @@ impl ArticleScraper {
fn extract_value(context: &Context, xpath: &str) -> Result<String, ScraperError> {
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
if let Some(val) = node_vec.get(0) {
return Ok(val.get_content())
return Ok(val.get_content());
}
Err(ScraperErrorKind::Xml)?
}
@ -372,8 +413,8 @@ impl ArticleScraper {
for node in node_vec {
val.push_str(&node.get_content());
}
return Ok(val.trim().to_string())
return Ok(val.trim().to_string());
}
fn strip_node(context: &Context, xpath: &String) -> Result<(), ScraperError> {
@ -391,7 +432,10 @@ impl ArticleScraper {
}
fn strip_id_or_class(context: &Context, id_or_class: &String) -> Result<(), ScraperError> {
let xpath = &format!("//*[contains(@class, '{}') or contains(@id, '{}')]", id_or_class, id_or_class);
let xpath = &format!(
"//*[contains(@class, '{}') or contains(@id, '{}')]",
id_or_class, id_or_class
);
let mut ancestor = xpath.clone();
if ancestor.starts_with("//") {
@ -406,8 +450,11 @@ impl ArticleScraper {
Ok(())
}
fn fix_lazy_images(context: &Context, class: &str, property_url: &str) -> Result<(), ScraperError> {
fn fix_lazy_images(
context: &Context,
class: &str,
property_url: &str,
) -> Result<(), ScraperError> {
let xpath = &format!("//img[contains(@class, '{}')]", class);
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec {
@ -421,7 +468,6 @@ impl ArticleScraper {
}
fn fix_iframe_size(context: &Context, site_name: &str) -> Result<(), ScraperError> {
let xpath = &format!("//iframe[contains(@src, '{}')]", site_name);
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec {
@ -439,70 +485,86 @@ impl ArticleScraper {
}
}
}
error!("Failed to add video wrapper <div> as parent of iframe");
return Err(ScraperErrorKind::Xml)?
return Err(ScraperErrorKind::Xml)?;
}
error!("Failed to get parent of iframe");
return Err(ScraperErrorKind::Xml)?
return Err(ScraperErrorKind::Xml)?;
}
Ok(())
}
fn remove_attribute(context: &Context, tag: Option<&str>, attribute: &str) -> Result<(), ScraperError> {
fn remove_attribute(
context: &Context,
tag: Option<&str>,
attribute: &str,
) -> Result<(), ScraperError> {
let xpath_tag = match tag {
Some(tag) => tag,
None => "*"
None => "*",
};
let xpath = &format!("//{}[@{}]", xpath_tag, attribute);
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec {
if let Err(_) = node.remove_property(attribute) {
return Err(ScraperErrorKind::Xml)?
return Err(ScraperErrorKind::Xml)?;
}
}
Ok(())
}
fn add_attribute(context: &Context, tag: Option<&str>, attribute: &str, value: &str) -> Result<(), ScraperError> {
fn add_attribute(
context: &Context,
tag: Option<&str>,
attribute: &str,
value: &str,
) -> Result<(), ScraperError> {
let xpath_tag = match tag {
Some(tag) => tag,
None => "*"
None => "*",
};
let xpath = &format!("//{}", xpath_tag);
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec {
if let Err(_) = node.set_attribute(attribute, value) {
return Err(ScraperErrorKind::Xml)?
return Err(ScraperErrorKind::Xml)?;
}
}
Ok(())
}
fn get_attribute(context: &Context, xpath: &str, attribute: &str) -> Result<String, ScraperError> {
fn get_attribute(
context: &Context,
xpath: &str,
attribute: &str,
) -> Result<String, ScraperError> {
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for node in node_vec {
if let Some(value) = node.get_attribute(attribute) {
return Ok(value)
return Ok(value);
}
}
Err(ScraperErrorKind::Xml)?
}
fn repair_urls(context: &Context, xpath: &str, attribute: &str, article_url: &url::Url) -> Result<(), ScraperError> {
fn repair_urls(
context: &Context,
xpath: &str,
attribute: &str,
article_url: &url::Url,
) -> Result<(), ScraperError> {
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec {
if let Some(val) = node.get_attribute(attribute) {
if let Err(url::ParseError::RelativeUrlWithoutBase) = url::Url::parse(&val) {
if let Ok(fixed_url) = ArticleScraper::complete_url(article_url, &val) {
if let Err(_) = node.set_attribute(attribute, fixed_url.as_str()) {
return Err(ScraperErrorKind::Xml)?
return Err(ScraperErrorKind::Xml)?;
}
}
}
@ -511,8 +573,10 @@ impl ArticleScraper {
Ok(())
}
fn complete_url(article_url: &url::Url, incomplete_url: &str) -> Result<url::Url, ScraperError> {
fn complete_url(
article_url: &url::Url,
incomplete_url: &str,
) -> Result<url::Url, ScraperError> {
let mut completed_url = article_url.scheme().to_owned();
completed_url.push(':');
@ -522,17 +586,16 @@ impl ArticleScraper {
completed_url.push_str("//");
completed_url.push_str(host);
}
_ => return Err(ScraperErrorKind::Url)?
_ => return Err(ScraperErrorKind::Url)?,
};
}
completed_url.push_str(incomplete_url);
let url = url::Url::parse(&completed_url).context(ScraperErrorKind::Url)?;
return Ok(url)
return Ok(url);
}
fn strip_junk(context: &Context, config: &GrabberConfig, url: &url::Url) {
// strip specified xpath
for xpath_strip in &config.xpath_strip {
let _ = ArticleScraper::strip_node(&context, xpath_strip);
@ -545,7 +608,10 @@ impl ArticleScraper {
// strip any <img> element where @src attribute contains this substring
for xpath_strip_img_src in &config.strip_image_src {
let _ = ArticleScraper::strip_node(&context, &format!("//img[contains(@src,'{}')]", xpath_strip_img_src));
let _ = ArticleScraper::strip_node(
&context,
&format!("//img[contains(@src,'{}')]", xpath_strip_img_src),
);
}
let _ = ArticleScraper::fix_lazy_images(&context, "lazyload", "data-src");
@ -563,13 +629,16 @@ impl ArticleScraper {
let _ = ArticleScraper::repair_urls(&context, "//iframe", "src", &url);
// strip elements using Readability.com and Instapaper.com ignore class names
// .entry-unrelated and .instapaper_ignore
// See http://blog.instapaper.com/post/730281947
// .entry-unrelated and .instapaper_ignore
// See http://blog.instapaper.com/post/730281947
let _ = ArticleScraper::strip_node(&context, &String::from(
"//*[contains(@class,' entry-unrelated ') or contains(@class,' instapaper_ignore ')]"));
// strip elements that contain style="display: none;"
let _ = ArticleScraper::strip_node(&context, &String::from("//*[contains(@style,'display:none')]"));
let _ = ArticleScraper::strip_node(
&context,
&String::from("//*[contains(@style,'display:none')]"),
);
// strip all scripts
let _ = ArticleScraper::strip_node(&context, &String::from("//script"));
@ -580,12 +649,11 @@ impl ArticleScraper {
// strip all empty url-tags <a/>
let _ = ArticleScraper::strip_node(&context, &String::from("//a[not(node())]"));
// strip all external css and fonts
// strip all external css and fonts
let _ = ArticleScraper::strip_node(&context, &String::from("//*[@type='text/css']"));
}
fn extract_metadata(context: &Context, config: &GrabberConfig, article: &mut Article) {
// try to get title
for xpath_title in &config.xpath_title {
if let Ok(title) = ArticleScraper::extract_value_merge(&context, xpath_title) {
@ -611,47 +679,51 @@ impl ArticleScraper {
if let Ok(date) = NaiveDateTime::from_str(&date_string) {
article.date = Some(date);
break;
}
else {
} else {
warn!("Parsing the date string '{}' failed", date_string);
}
}
}
}
fn extract_body(context: &Context, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
fn extract_body(
context: &Context,
root: &mut Node,
config: &GrabberConfig,
) -> Result<(), ScraperError> {
let mut found_something = false;
for xpath_body in &config.xpath_body {
found_something = ArticleScraper::extract_body_single(&context, root, xpath_body)?;
}
if !found_something {
return Err(ScraperErrorKind::Scrape)?
return Err(ScraperErrorKind::Scrape)?;
}
Ok(())
}
fn extract_body_single(context: &Context, root: &mut Node, xpath: &str) -> Result<bool, ScraperError> {
fn extract_body_single(
context: &Context,
root: &mut Node,
xpath: &str,
) -> Result<bool, ScraperError> {
let mut found_something = false;
{
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec {
if node.get_property("style").is_some() {
if let Err(_) = node.remove_property("style") {
return Err(ScraperErrorKind::Xml)?
return Err(ScraperErrorKind::Xml)?;
}
}
node.unlink();
if let Ok(_) = root.add_child(&mut node) {
found_something = true;
}
else {
} else {
error!("Failed to add body to prepared document");
return Err(ScraperErrorKind::Xml)?
return Err(ScraperErrorKind::Xml)?;
}
}
}
@ -660,11 +732,12 @@ impl ArticleScraper {
}
fn check_for_next_page(&self, context: &Context, config: &GrabberConfig) -> Option<url::Url> {
if let Some(next_page_xpath) = config.next_page_link.clone() {
if let Ok(next_page_string) = ArticleScraper::get_attribute(&context, &next_page_xpath, "href") {
if let Ok(next_page_string) =
ArticleScraper::get_attribute(&context, &next_page_xpath, "href")
{
if let Ok(next_page_url) = url::Url::parse(&next_page_string) {
return Some(next_page_url)
return Some(next_page_url);
}
}
}
@ -674,12 +747,11 @@ impl ArticleScraper {
}
fn generate_head(root: &mut Node, document: &Document) -> Result<(), ScraperError> {
if let Ok(mut head_node) = Node::new("head", None, document) {
if let Ok(()) = root.add_prev_sibling(&mut head_node) {
if let Ok(mut meta) = head_node.new_child(None, "meta") {
if let Ok(_) = meta.set_property("charset", "utf-8") {
return Ok(())
return Ok(());
}
}
}
@ -689,7 +761,6 @@ impl ArticleScraper {
}
fn prevent_self_closing_tags(context: &Context) -> Result<(), ScraperError> {
// search document for empty tags and add a empty text node as child
// this prevents libxml from self closing non void elements such as iframe
@ -697,7 +768,7 @@ impl ArticleScraper {
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
for mut node in node_vec {
if node.get_name() == "meta" {
continue
continue;
}
let _ = node.add_text_child(None, "empty", "");
@ -707,7 +778,6 @@ impl ArticleScraper {
}
fn eliminate_noscrip_tag(context: &Context) -> Result<(), ScraperError> {
let xpath = "//noscript";
let node_vec = Self::evaluate_xpath(context, xpath, false)?;
@ -720,18 +790,16 @@ impl ArticleScraper {
let _ = parent.add_child(&mut child);
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::*;
#[tokio::test(basic_scheduler)]
async fn golem() {
let config_path = PathBuf::from(r"./resources/tests/golem");
@ -742,7 +810,12 @@ mod tests {
let article = grabber.parse(url, true).await.unwrap();
article.save_html(&out_path).unwrap();
assert_eq!(article.title, Some(String::from("HTTP Error 418: Fehlercode \"Ich bin eine Teekanne\" darf bleiben")));
assert_eq!(
article.title,
Some(String::from(
"HTTP Error 418: Fehlercode \"Ich bin eine Teekanne\" darf bleiben"
))
);
assert_eq!(article.author, Some(String::from("Hauke Gierow")));
}
@ -750,12 +823,20 @@ mod tests {
async fn phoronix() {
let config_path = PathBuf::from(r"./resources/tests/phoronix");
let out_path = PathBuf::from(r"./test_output");
let url = url::Url::parse("http://www.phoronix.com/scan.php?page=article&item=amazon_ec2_bare&num=1").unwrap();
let url = url::Url::parse(
"http://www.phoronix.com/scan.php?page=article&item=amazon_ec2_bare&num=1",
)
.unwrap();
let grabber = ArticleScraper::new(config_path).unwrap();
let article = grabber.parse(url, true).await.unwrap();
article.save_html(&out_path).unwrap();
assert_eq!(article.title, Some(String::from("Amazon EC2 Cloud Benchmarks Against Bare Metal Systems")));
assert_eq!(
article.title,
Some(String::from(
"Amazon EC2 Cloud Benchmarks Against Bare Metal Systems"
))
);
}
}