1
0
Fork 0
mirror of https://gitlab.com/news-flash/article_scraper.git synced 2025-07-10 01:15:31 +02:00

initial commit

This commit is contained in:
Jan Lukas Gernert 2018-07-31 16:10:09 +02:00
commit 4b2e6a24eb
17 changed files with 2423 additions and 0 deletions

39
src/article.rs Normal file
View file

@ -0,0 +1,39 @@
use std;
use url::Url;
use std::path::PathBuf;
use chrono::NaiveDateTime;
use error::{
ScraperError,
ScraperErrorKind,
};
use std::io::Write;
use failure::ResultExt;
pub struct Article {
pub title: Option<String>,
pub author: Option<String>,
pub url: Url,
pub date: Option<NaiveDateTime>,
pub html: Option<String>,
}
impl Article {
pub fn save_html(&self, path: &PathBuf) -> Result<(), ScraperError> {
if let Some(ref html) = self.html {
if let Ok(()) = std::fs::create_dir_all(&path) {
let mut file_name = match self.title.clone() {
Some(file_name) => file_name,
None => "Unknown Title".to_owned(),
};
file_name.push_str(".html");
let path = path.join(file_name);
let mut html_file = std::fs::File::create(&path).context(ScraperErrorKind::IO)?;
html_file.write_all(html.as_bytes()).context(ScraperErrorKind::IO)?;
}
}
Err(ScraperErrorKind::Unknown)?
}
}

57
src/config/error.rs Normal file
View file

@ -0,0 +1,57 @@
use failure::{Context, Fail, Backtrace, Error};
use std::fmt;
#[derive(Debug)]
pub struct ConfigError {
inner: Context<ConfigErrorKind>,
}
#[derive(Copy, Clone, Eq, PartialEq, Debug, Fail)]
pub enum ConfigErrorKind {
#[fail(display = "IO Error")]
IO,
#[fail(display = "Config does not contain body xpath")]
BadConfig,
#[fail(display = "Unknown Error")]
Unknown,
}
impl Fail for ConfigError {
fn cause(&self) -> Option<&Fail> {
self.inner.cause()
}
fn backtrace(&self) -> Option<&Backtrace> {
self.inner.backtrace()
}
}
impl fmt::Display for ConfigError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.inner, f)
}
}
// impl ConfigError {
// pub fn kind(&self) -> ConfigErrorKind {
// *self.inner.get_context()
// }
// }
impl From<ConfigErrorKind> for ConfigError {
fn from(kind: ConfigErrorKind) -> ConfigError {
ConfigError { inner: Context::new(kind) }
}
}
impl From<Context<ConfigErrorKind>> for ConfigError {
fn from(inner: Context<ConfigErrorKind>) -> ConfigError {
ConfigError { inner: inner }
}
}
impl From<Error> for ConfigError {
fn from(_: Error) -> ConfigError {
ConfigError { inner: Context::new(ConfigErrorKind::Unknown) }
}
}

43
src/config/macros.rs Normal file
View file

@ -0,0 +1,43 @@
macro_rules! extract_vec_multi {
(
$line: ident,
$identifier: ident,
$vector: ident
) => {
if $line.starts_with($identifier) {
let value = GrabberConfig::extract_value($identifier, $line);
let value = GrabberConfig::split_values(value);
let value: Vec<String> = value.iter().map(|s| s.trim().to_string()).collect();
$vector.extend(value);
continue;
}
};
}
macro_rules! extract_vec_single {
(
$line: ident,
$identifier: ident,
$vector: ident
) => {
if $line.starts_with($identifier) {
let value = GrabberConfig::extract_value($identifier, $line);
$vector.push(value.to_string());
continue;
}
};
}
macro_rules! extract_option_single {
(
$line: ident,
$identifier: ident,
$option: ident
) => {
if $line.starts_with($identifier) {
let value = GrabberConfig::extract_value($identifier, $line);
$option = Some(value.to_string());
continue;
}
};
}

188
src/config/mod.rs Normal file
View file

@ -0,0 +1,188 @@
use std::collections;
use std::path::{PathBuf};
use std::fs;
use std::io;
use std::io::BufRead;
use failure::ResultExt;
use self::error::{ConfigError, ConfigErrorKind};
#[macro_use]
mod macros;
mod error;
pub type ConfigCollection = collections::HashMap<String, GrabberConfig>;
pub struct Replace {
pub to_replace: String,
pub replace_with: String,
}
pub struct GrabberConfig {
pub xpath_title: Vec<String>,
pub xpath_author: Vec<String>,
pub xpath_date: Vec<String>,
pub xpath_body: Vec<String>,
pub xpath_strip: Vec<String>,
pub strip_id_or_class: Vec<String>,
pub strip_image_src: Vec<String>,
pub replace: Vec<Replace>,
pub single_page_link: Option<String>,
pub next_page_link: Option<String>,
}
impl GrabberConfig {
pub fn parse_directory(directory: &PathBuf) -> Result<ConfigCollection, ConfigError> {
let paths = fs::read_dir(directory).context(ConfigErrorKind::IO)?;
let mut collection: collections::HashMap<String, GrabberConfig> = collections::HashMap::new();
for path in paths {
if let Ok(path) = path {
if let Some(extension) = path.path().extension() {
if let Some(extension) = extension.to_str() {
if extension == "txt" {
if let Ok(config) = GrabberConfig::new(path.path()) {
collection.insert(path.file_name().to_string_lossy().into_owned(), config);
}
}
}
}
}
}
Ok(collection)
}
fn new(config_path: PathBuf) -> Result<GrabberConfig, ConfigError> {
let file = fs::File::open(&config_path).context(ConfigErrorKind::IO)?;
let buffer = io::BufReader::new(&file);
let mut xpath_title: Vec<String> = Vec::new();
let mut xpath_author: Vec<String> = Vec::new();
let mut xpath_date: Vec<String> = Vec::new();
let mut xpath_body: Vec<String> = Vec::new();
let mut xpath_strip: Vec<String> = Vec::new();
let mut strip_id_or_class: Vec<String> = Vec::new();
let mut strip_image_src: Vec<String> = Vec::new();
let mut replace_vec: Vec<Replace> = Vec::new();
let mut next_page_link: Option<String> = None;
let mut single_page_link: Option<String> = None;
// ignore: tidy, prune, autodetect_on_failure and test_url
let title = "title:";
let body = "body:";
let date = "date:";
let author = "author:";
let strip = "strip:";
let strip_id = "strip_id_or_class:";
let strip_img = "strip_image_src:";
let single_page = "single_page_link:";
let next_page = "next_page_link:";
let find = "find_string:";
let replace = "replace_string:";
let replace_single = "replace_string(";
// ignore these
let tidy = "tidy:";
let prune = "prune:";
let test_url = "test_url:";
let autodetect = "autodetect_on_failure:";
let mut iterator = buffer.lines().peekable();
while let Some(Ok(line)) = iterator.next() {
let line = line.trim();
if line.starts_with("#")
|| line.starts_with(tidy)
|| line.starts_with(prune)
|| line.starts_with(test_url)
|| line.starts_with(autodetect)
|| line.is_empty() {
continue;
}
extract_vec_multi!(line, title, xpath_title);
extract_vec_multi!(line, body, xpath_body);
extract_vec_multi!(line, date, xpath_date);
extract_vec_multi!(line, author, xpath_author);
extract_vec_single!(line, strip, xpath_strip);
extract_vec_single!(line, strip_id, strip_id_or_class);
extract_vec_single!(line, strip_img, strip_image_src);
extract_option_single!(line, single_page, single_page_link);
extract_option_single!(line, next_page, next_page_link);
if line.starts_with(replace_single) {
let value = GrabberConfig::extract_value(replace_single, line);
let value: Vec<&str> = value.split("): ").map(|s| s.trim()).collect();
if value.len() != 2{
continue;
}
if let Some(to_replace) = value.get(0) {
if let Some(replace_with) = value.get(1) {
replace_vec.push(
Replace {
to_replace: to_replace.to_string(),
replace_with: replace_with.to_string(),
}
);
}
}
continue;
}
if line.starts_with(find) {
let value1 = GrabberConfig::extract_value(find, line);
if let Some(&Ok(ref next_line)) = iterator.peek() {
let value2 = GrabberConfig::extract_value(replace, &next_line);
let r = Replace {
to_replace: value1.to_string(),
replace_with: value2.to_string(),
};
replace_vec.push(r);
}
continue;
}
}
if xpath_body.len() == 0 {
error!("No body xpath found for {}", config_path.display());
Err(ConfigErrorKind::BadConfig)?
}
let config = GrabberConfig {
xpath_title: xpath_title,
xpath_author: xpath_author,
xpath_date: xpath_date,
xpath_body: xpath_body,
xpath_strip: xpath_strip,
strip_id_or_class: strip_id_or_class,
strip_image_src: strip_image_src,
replace: replace_vec,
single_page_link: single_page_link,
next_page_link: next_page_link,
};
Ok(config)
}
fn extract_value<'a>(identifier: &str, line: &'a str) -> &'a str {
let value = &line[identifier.len()..];
let value = value.trim();
match value.find('#') {
Some(pos) => &value[..pos],
None => value,
}
}
fn split_values<'a>(values: &'a str) -> Vec<&'a str> {
values.split('|').map(|s| s.trim()).collect()
}
}

67
src/error.rs Normal file
View file

@ -0,0 +1,67 @@
use failure::{Context, Fail, Backtrace, Error};
use std::fmt;
#[derive(Debug)]
pub struct ScraperError {
inner: Context<ScraperErrorKind>,
}
#[derive(Copy, Clone, Eq, PartialEq, Debug, Fail)]
pub enum ScraperErrorKind {
#[fail(display = "libXml Error")]
Xml,
#[fail(display = "No content found")]
Scrape,
#[fail(display = "Url Error")]
Url,
#[fail(display = "Http request failed")]
Http,
#[fail(display = "Config Error")]
Config,
#[fail(display = "IO Error")]
IO,
#[fail(display = "Content-type suggest no html")]
ContentType,
#[fail(display = "Unknown Error")]
Unknown,
}
impl Fail for ScraperError {
fn cause(&self) -> Option<&Fail> {
self.inner.cause()
}
fn backtrace(&self) -> Option<&Backtrace> {
self.inner.backtrace()
}
}
impl fmt::Display for ScraperError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.inner, f)
}
}
impl ScraperError {
pub fn kind(&self) -> ScraperErrorKind {
*self.inner.get_context()
}
}
impl From<ScraperErrorKind> for ScraperError {
fn from(kind: ScraperErrorKind) -> ScraperError {
ScraperError { inner: Context::new(kind) }
}
}
impl From<Context<ScraperErrorKind>> for ScraperError {
fn from(inner: Context<ScraperErrorKind>) -> ScraperError {
ScraperError { inner: inner }
}
}
impl From<Error> for ScraperError {
fn from(_: Error) -> ScraperError {
ScraperError { inner: Context::new(ScraperErrorKind::Unknown) }
}
}

83
src/images/error.rs Normal file
View file

@ -0,0 +1,83 @@
use failure::{Context, Fail, Backtrace, Error};
use std::fmt;
use super::super::ScraperErrorKind;
#[derive(Debug)]
pub struct ImageDownloadError {
inner: Context<ImageDownloadErrorKind>,
}
#[derive(Copy, Clone, Eq, PartialEq, Debug, Fail)]
pub enum ImageDownloadErrorKind {
#[fail(display = "Parsing the supplied html string failed")]
HtmlParse,
#[fail(display = "Scaling down a downloaded image failed")]
ImageScale,
#[fail(display = "Downloading the parent element of an image failed")]
ParentDownload,
#[fail(display = "Generating image name failed")]
ImageName,
#[fail(display = "Getting the content-length property failed")]
ContentLenght,
#[fail(display = "Content-type suggest no image")]
ContentType,
#[fail(display = "Http error")]
Http,
#[fail(display = "IO error")]
IO,
#[fail(display = "Invalid URL")]
InvalidUrl,
#[fail(display = "Unknown Error")]
Unknown,
}
impl Fail for ImageDownloadError {
fn cause(&self) -> Option<&Fail> {
self.inner.cause()
}
fn backtrace(&self) -> Option<&Backtrace> {
self.inner.backtrace()
}
}
impl fmt::Display for ImageDownloadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.inner, f)
}
}
impl ImageDownloadError {
pub fn kind(&self) -> ImageDownloadErrorKind {
*self.inner.get_context()
}
}
impl From<ImageDownloadErrorKind> for ImageDownloadError {
fn from(kind: ImageDownloadErrorKind) -> ImageDownloadError {
ImageDownloadError { inner: Context::new(kind) }
}
}
impl From<Context<ImageDownloadErrorKind>> for ImageDownloadError {
fn from(inner: Context<ImageDownloadErrorKind>) -> ImageDownloadError {
ImageDownloadError { inner: inner }
}
}
impl From<ScraperErrorKind> for ImageDownloadError {
fn from(kind: ScraperErrorKind) -> ImageDownloadError {
let kind = match kind {
ScraperErrorKind::Xml => ImageDownloadErrorKind::HtmlParse,
_ => ImageDownloadErrorKind::Unknown,
};
ImageDownloadError { inner: Context::new(kind) }
}
}
impl From<Error> for ImageDownloadError {
fn from(_: Error) -> ImageDownloadError {
ImageDownloadError { inner: Context::new(ImageDownloadErrorKind::Unknown) }
}
}

280
src/images/mod.rs Normal file
View file

@ -0,0 +1,280 @@
use std::path::PathBuf;
use reqwest;
use libxml::parser::Parser;
use libxml::xpath::Context;
use libxml::tree::Node;
use url;
use failure::ResultExt;
use std::error::Error;
use self::error::{ImageDownloadError, ImageDownloadErrorKind};
use base64;
use std;
use image;
use mime_guess;
use super::ScraperErrorKind;
mod error;
pub struct ImageDownloader {
save_image_path: PathBuf,
client: reqwest::Client,
max_size: (u32, u32),
scale_size: (u32, u32),
}
impl ImageDownloader {
pub fn new(save_image_path: PathBuf, max_size: (u32, u32), scale_size: (u32, u32)) -> ImageDownloader {
ImageDownloader {
save_image_path: save_image_path,
client: reqwest::Client::new(),
max_size: max_size,
scale_size: scale_size,
}
}
pub fn download_images_from_string(&self, html: &str, article_url: &url::Url) -> Result<String, ImageDownloadError> {
let parser = Parser::default_html();
let doc = match parser.parse_string(html) {
Ok(doc) => doc,
Err(_) => {
error!("Failed to parse HTML string");
return Err(ImageDownloadErrorKind::HtmlParse)?
}
};
let xpath_ctx = match Context::new(&doc) {
Ok(context) => context,
Err(_) => {
error!("Failed to create xpath context for document");
return Err(ImageDownloadErrorKind::HtmlParse)?
}
};
self.download_images_from_context(&xpath_ctx, article_url)?;
Ok(doc.to_string(/*format:*/ false))
}
pub fn download_images_from_context(&self, context: &Context, article_url: &url::Url) -> Result<(), ImageDownloadError> {
let xpath = "//img";
evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec {
if let Some(url) = node.get_property("src") {
let url = url::Url::parse(&url).context(ImageDownloadErrorKind::InvalidUrl)?;
let parent_url_result = match self.check_image_parent(&node, &url) {
Ok(url) => Some(url),
Err(_) => None,
};
if let Some(parent_url) = parent_url_result.clone() {
if let Ok(path) = self.save_image(&parent_url, article_url) {
if let Some(path) = path.to_str() {
if let Err(_) = node.set_property("parent_img", path) {
return Err(ImageDownloadErrorKind::HtmlParse)?;
}
}
}
}
let mut img_path = self.save_image(&url, article_url)?;
if let Some((width, height)) = ImageDownloader::get_image_dimensions(&node) {
if width > self.max_size.0 || height > self.max_size.1 {
if let Ok(small_img_path) = ImageDownloader::scale_image(&img_path, self.scale_size.0, self.scale_size.1) {
if parent_url_result.is_none() {
if let Some(img_path) = img_path.to_str() {
if let Err(_) = node.set_property("big_img", img_path) {
return Err(ImageDownloadErrorKind::HtmlParse)?;
}
}
img_path = small_img_path;
}
}
}
}
if let Some(img_path) = img_path.to_str() {
if let Err(_) = node.set_property("src", img_path) {
return Err(ImageDownloadErrorKind::HtmlParse)?;
}
}
}
}
Ok(())
}
fn save_image(&self, image_url: &url::Url, article_url: &url::Url) -> Result<PathBuf, ImageDownloadError> {
let mut response = match self.client.get(image_url.clone()).send() {
Ok(response) => response,
Err(error) => {
error!("GET {} failed - {}", image_url.as_str(), error.description());
Err(error).context(ImageDownloadErrorKind::Http)?
}
};
let content_type = ImageDownloader::check_image_content_type(&response)?;
if let Some(host) = article_url.host_str() {
let folder_name = base64::encode(article_url.as_str()).replace("/", "_");
let path = self.save_image_path.join(host);
let path = path.join(folder_name);
if let Ok(()) = std::fs::create_dir_all(&path) {
let file_name = ImageDownloader::extract_image_name(image_url, content_type)?;
let path = path.join(file_name);
let mut image_buffer = match std::fs::File::create(&path) {
Ok(buffer) => buffer,
Err(error) => {
error!("Failed to create file {}", path.display());
Err(error).context(ImageDownloadErrorKind::IO)?
}
};
response.copy_to(&mut image_buffer).context(ImageDownloadErrorKind::IO)?;
let path = std::fs::canonicalize(&path).context(ImageDownloadErrorKind::IO)?;
return Ok(path)
}
}
Err(ImageDownloadErrorKind::InvalidUrl)?
}
fn check_image_content_type(response: &reqwest::Response) -> Result<reqwest::header::ContentType, ImageDownloadError> {
if response.status().is_success() {
if let Some(content_type) = response.headers().get::<reqwest::header::ContentType>() {
if content_type.type_() == reqwest::mime::IMAGE {
return Ok(content_type.clone())
}
}
error!("{} is not an image", response.url());
return Err(ImageDownloadErrorKind::ContentType)?
}
Err(ImageDownloadErrorKind::Http)?
}
fn get_content_lenght(response: &reqwest::Response) -> Result<u64, ImageDownloadError> {
if response.status().is_success() {
if let Some(&reqwest::header::ContentLength(content_length)) = response.headers().get::<reqwest::header::ContentLength>() {
return Ok(content_length)
}
}
Err(ImageDownloadErrorKind::ContentLenght)?
}
fn get_image_dimensions(node: &Node) -> Option<(u32, u32)> {
if let Some(width) = node.get_property("width") {
if let Some(height) = node.get_property("height") {
if let Ok(width) = width.parse::<u32>() {
if let Ok(height) = height.parse::<u32>() {
if width > 1 && height > 1 {
return Some((width, height))
}
}
}
}
}
debug!("Image dimensions not available");
None
}
fn extract_image_name(url: &url::Url, content_type: reqwest::header::ContentType) -> Result<String, ImageDownloadError> {
if let Some(file_name) = url.path_segments().and_then(|segments| segments.last()) {
let mut image_name = file_name.to_owned();
if let Some(query) = url.query() {
image_name.push_str("_");
image_name.push_str(query);
}
let primary_type = content_type.type_().as_str();
let mut sub_type = content_type.subtype().as_str().to_owned();
if let Some(suffix) = content_type.suffix() {
sub_type.push_str("+");
sub_type.push_str(suffix.as_str());
}
if let Some(extensions) = mime_guess::get_extensions(primary_type, &sub_type) {
let mut extension_present = false;
for extension in extensions {
if image_name.ends_with(extension) {
extension_present = true;
break;
}
}
if !extension_present {
image_name.push_str(".");
image_name.push_str(extensions[0]);
}
}
return Ok(image_name)
}
error!("Could not generate image name for {}", url.as_str());
Err(ImageDownloadErrorKind::ImageName)?
}
fn check_image_parent(&self, node: &Node, child_url: &url::Url) -> Result<url::Url, ImageDownloadError> {
if let Some(parent) = node.get_parent() {
if parent.get_name() == "a" {
if let Some(url) = parent.get_property("href") {
let parent_url = url::Url::parse(&url).context(ImageDownloadErrorKind::ParentDownload)?;
let parent_response = self.client.head(parent_url.clone()).send().context(ImageDownloadErrorKind::ParentDownload)?;
let _ = ImageDownloader::check_image_content_type(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?;
let child_response = self.client.get(child_url.clone()).send().context(ImageDownloadErrorKind::ParentDownload)?;
let parent_length = ImageDownloader::get_content_lenght(&parent_response).context(ImageDownloadErrorKind::ParentDownload)?;
let child_length = ImageDownloader::get_content_lenght(&child_response).context(ImageDownloadErrorKind::ParentDownload)?;
if parent_length > child_length {
return Ok(parent_url)
}
return Ok(child_url.clone())
}
}
}
debug!("Image parent element not relevant");
Err(ImageDownloadErrorKind::ParentDownload)?
}
fn scale_image(image_path: &PathBuf, max_width: u32, max_height: u32) -> Result<PathBuf, ImageDownloadError> {
let image = match image::open(image_path) {
Ok(image) => image,
Err(error) => {
error!("Failed to open image to resize: {:?}", image_path);
return Err(error).context(ImageDownloadErrorKind::ImageScale)?
}
};
let image = image.resize(max_width, max_height, image::FilterType::Lanczos3);
if let Some(file_name) = image_path.file_name() {
let mut file_name = file_name.to_os_string();
file_name.push("_resized");
let mut resized_path = image_path.clone();
resized_path.set_file_name(file_name);
if let Err(error) = image.save(&resized_path) {
error!("Failed to write resized image to disk.");
return Err(error).context(ImageDownloadErrorKind::ImageScale)?
}
return Ok(resized_path)
}
Err(ImageDownloadErrorKind::ImageScale)?
}
}

723
src/lib.rs Normal file
View file

@ -0,0 +1,723 @@
#[macro_use]
extern crate failure;
extern crate libxml;
extern crate reqwest;
extern crate url;
extern crate regex;
extern crate encoding_rs;
extern crate html2text;
extern crate htmlescape;
extern crate base64;
extern crate image;
extern crate chrono;
extern crate mime_guess;
#[macro_use]
extern crate log;
#[macro_use]
mod macros;
mod config;
mod error;
mod article;
pub mod images;
use article::Article;
use libxml::parser::Parser;
use libxml::xpath::Context;
use libxml::tree::{
Document,
Node
};
use std::path::PathBuf;
use std::ops::Index;
use failure::ResultExt;
use std::error::Error;
use config::{
GrabberConfig,
ConfigCollection
};
use encoding_rs::*;
use chrono::NaiveDateTime;
use std::str::FromStr;
use images::ImageDownloader;
use self::error::{
ScraperError,
ScraperErrorKind
};
pub struct ArticleScraper {
image_downloader: ImageDownloader,
config_files: ConfigCollection,
client: reqwest::Client,
pub download_images: bool,
}
impl ArticleScraper {
pub fn new(config_path: PathBuf, save_image_path: PathBuf, download_images: bool) -> Result<ArticleScraper, ScraperError> {
let config_files = GrabberConfig::parse_directory(&config_path).context(ScraperErrorKind::Config)?;
Ok(ArticleScraper {
image_downloader: ImageDownloader::new(save_image_path, (2000, 2000), (1000, 800)),
config_files: config_files,
client: reqwest::Client::new(),
download_images: download_images,
})
}
pub fn parse(&self, url: url::Url) -> Result<Article, ScraperError> {
info!("Scraping article: {}", url.as_str());
// do a HEAD request to url
let response = match self.client.head(url.clone()).send() {
Ok(response) => response,
Err(error) => {
error!("Failed head request to: {} - {}", url.as_str(), error.description());
Err(error).context(ScraperErrorKind::Http)?
}
};
// check if url redirects and we need to pick up the new url
let mut url = url;
if let Some(new_url) = ArticleScraper::check_redirect(&response) {
debug!("Url {} redirects to {}", url.as_str(), new_url.as_str());
url = new_url;
}
// check if we are dealing with text/html
if !ArticleScraper::check_content_type(&response)? {
return Err(ScraperErrorKind::ContentType)?
}
// check if we have a config for the url
let config = self.get_grabber_config(&url)?;
let mut article = Article {
title: None,
author: None,
url: url.clone(),
date: None,
html: None,
};
// create empty document to hold the content
let mut document = match Document::new() {
Ok(doc) => doc,
Err(()) => return Err(ScraperErrorKind::Xml)?
};
let mut root = match Node::new("article", None, &document) {
Ok(root) => root,
Err(()) => return Err(ScraperErrorKind::Xml)?
};
document.set_root_element(&root);
ArticleScraper::generate_head(&mut root, &document)?;
self.parse_first_page(&mut article, &url, &mut root, config)?;
let context = match Context::new(&document) {
Ok(context) => context,
Err(_) => {
error!("Failed to create xpath context for extracted article");
return Err(ScraperErrorKind::Xml)?
}
};
if let Err(error) = ArticleScraper::prevent_self_closing_tags(&context) {
error!("Preventing self closing tags failed - {}", error);
return Err(error)
}
if let Err(error) = ArticleScraper::eliminate_noscrip_tag(&context) {
error!("Eliminating <noscript> tag failed - {}", error);
return Err(error)
}
if self.download_images {
if let Err(error) = self.image_downloader.download_images_from_context(&context, &url) {
error!("Downloading images failed: {}", error);
}
}
// serialize content
let html = document.to_string(/*format:*/ false);
article.html = Some(html);
Ok(article)
}
fn parse_first_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
let mut html = ArticleScraper::download(&url, &self.client)?;
parse_html!(html, config, xpath_ctx);
// check for single page link
let mut xpath_ctx = xpath_ctx;
if let Some(xpath_single_page_link) = config.single_page_link.clone() {
debug!("Single page link xpath specified in config {}", xpath_single_page_link);
if let Ok(result) = xpath_ctx.findvalue(&xpath_single_page_link, None) {
// parse again with single page url
debug!("Single page link found {}", result);
let single_page_url = url::Url::parse(&result).context(ScraperErrorKind::Url)?;
return self.parse_single_page(article, &single_page_url, root, config);
}
}
ArticleScraper::extract_metadata(&xpath_ctx, config, article);
ArticleScraper::strip_junk(&xpath_ctx, config, &url);
ArticleScraper::extract_body(&xpath_ctx, root, config)?;
self.check_for_next_page(&xpath_ctx, config, root)
}
fn parse_next_page(&self, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
let mut html = ArticleScraper::download(&url, &self.client)?;
parse_html!(html, config, xpath_ctx);
ArticleScraper::strip_junk(&xpath_ctx, config, &url);
ArticleScraper::extract_body(&xpath_ctx, root, config)?;
self.check_for_next_page(&xpath_ctx, config, root)
}
fn parse_single_page(&self, article: &mut Article, url: &url::Url, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
let mut html = ArticleScraper::download(&url, &self.client)?;
parse_html!(html, config, xpath_ctx);
ArticleScraper::extract_metadata(&xpath_ctx, config, article);
ArticleScraper::strip_junk(&xpath_ctx, config, &url);
ArticleScraper::extract_body(&xpath_ctx, root, config)?;
Ok(())
}
fn download(url: &url::Url, client: &reqwest::Client) -> Result<String, ScraperError> {
let mut response = match client.get(url.as_str()).send() {
Ok(response) => response,
Err(error) => {
error!("Downloading HTML failed: GET {} - {}", url.as_str(), error.description());
return Err(error).context(ScraperErrorKind::Http)?
}
};
if response.status().is_success() {
let text = response.text().context(ScraperErrorKind::Http)?;
{
if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_html(&text)) {
return Ok(decoded_html)
}
if let Some(decoded_html) = ArticleScraper::decode_html(&text, ArticleScraper::get_encoding_from_http_header(response.headers())) {
return Ok(decoded_html)
}
}
warn!("No encoding of HTML detected - assuming utf-8");
return Ok(text)
}
Err(ScraperErrorKind::Http)?
}
fn get_encoding_from_http_header(headers: &reqwest::header::Headers) -> Option<&str> {
if let Some(content_type) = headers.get::<reqwest::header::ContentType>() {
if let Some(encoding) = content_type.get_param(reqwest::mime::CHARSET) {
return Some(encoding.as_str())
}
}
None
}
fn get_encoding_from_html(html: &str) -> Option<&str> {
let regex = regex::Regex::new(r#"<meta.*?charset=([^"']+)"#).unwrap();
if let Some(captures) = regex.captures(html) {
if let Some(regex_match) = captures.get(1) {
return Some(regex_match.as_str())
}
}
None
}
fn decode_html(html: &str, encoding: Option<&str>) -> Option<String> {
if let Some(encoding) = encoding {
if let Some(encoding) = Encoding::for_label(encoding.as_bytes()) {
let (decoded_html, _, invalid_chars) = encoding.decode(html.as_bytes());
if !invalid_chars {
return Some(decoded_html.into_owned())
}
}
warn!("Could not decode HTML. Encoding: {}", encoding);
}
None
}
fn get_grabber_config(&self, url: &url::Url) -> Result<&GrabberConfig, ScraperError> {
let config_name = match url.host_str()
{
Some(name) => {
let mut name = name;
if name.starts_with("www.") {
name = &name[4..]
}
name
},
None => {
error!("Getting config failed due to bad Url");
return Err(ScraperErrorKind::Config)?
},
};
let config_name = config_name.to_owned() + ".txt";
if !self.config_files.contains_key(&config_name) {
error!("No config file of the name {} fount", config_name);
Err(ScraperErrorKind::Config)?
}
Ok(self.config_files.index(&config_name))
}
fn check_content_type(response: &reqwest::Response) -> Result<bool, ScraperError> {
if response.status().is_success() {
if let Some(content_type) = response.headers().get::<reqwest::header::ContentType>() {
if content_type.type_() == reqwest::mime::TEXT && content_type.subtype() == reqwest::mime::HTML {
return Ok(true)
}
}
error!("Content type is not text/HTML");
return Ok(false)
}
error!("Failed to determine content type");
Err(ScraperErrorKind::Http)?
}
fn check_redirect(response: &reqwest::Response) -> Option<url::Url> {
if response.status() == reqwest::StatusCode::PermanentRedirect {
debug!("Article url redirects to {}", response.url().as_str());
return Some(response.url().clone())
}
None
}
fn extract_value(context: &Context, xpath: &str) -> Result<String, ScraperError> {
evaluate_xpath!(context, xpath, node_vec);
xpath_result_empty!(node_vec, xpath);
if let Some(val) = node_vec.get(0) {
return Ok(val.get_content())
}
Err(ScraperErrorKind::Xml)?
}
fn extract_value_merge(context: &Context, xpath: &str) -> Result<String, ScraperError> {
evaluate_xpath!(context, xpath, node_vec);
xpath_result_empty!(node_vec, xpath);
let mut val = String::new();
for node in node_vec {
val.push_str(&node.get_content());
}
return Ok(val.trim().to_string())
}
fn strip_node(context: &Context, xpath: &String) -> Result<(), ScraperError> {
let mut ancestor = xpath.clone();
if ancestor.starts_with("//") {
ancestor = ancestor.chars().skip(2).collect();
}
let query = &format!("{}[not(ancestor::{})]", xpath, ancestor);
evaluate_xpath!(context, query, node_vec);
for mut node in node_vec {
node.unlink();
}
Ok(())
}
fn strip_id_or_class(context: &Context, id_or_class: &String) -> Result<(), ScraperError> {
let xpath = &format!("//*[contains(@class, '{}') or contains(@id, '{}')]", id_or_class, id_or_class);
evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec {
node.unlink();
}
Ok(())
}
fn fix_lazy_images(context: &Context, class: &str, property_url: &str) -> Result<(), ScraperError> {
let xpath = &format!("//img[contains(@class, '{}')]", class);
evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec {
if let Some(correct_url) = node.get_property(property_url) {
if let Err(_) = node.set_property("src", &correct_url) {
return Err(ScraperErrorKind::Xml)?;
}
}
}
Ok(())
}
fn fix_iframe_size(context: &Context, site_name: &str) -> Result<(), ScraperError> {
let xpath = &format!("//iframe[contains(@src, '{}')]", site_name);
evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec {
if let Some(mut parent) = node.get_parent() {
if let Ok(mut video_wrapper) = parent.new_child(None, "div") {
if let Ok(()) = video_wrapper.set_property("class", "videoWrapper") {
if let Ok(()) = node.set_property("width", "100%") {
if let Ok(()) = node.remove_property("height") {
node.unlink();
match video_wrapper.add_child(&mut node) {
Ok(_) => continue,
Err(_) => {
error!("Failed to add iframe as child of video wrapper <div>");
return Err(ScraperErrorKind::Xml)?
}
}
}
}
}
}
error!("Failed to add video wrapper <div> as parent of iframe");
return Err(ScraperErrorKind::Xml)?
}
error!("Failed to get parent of iframe");
return Err(ScraperErrorKind::Xml)?
}
Ok(())
}
fn remove_attribute(context: &Context, tag: Option<&str>, attribute: &str) -> Result<(), ScraperError> {
let xpath_tag = match tag {
Some(tag) => tag,
None => "*"
};
let xpath = &format!("//{}[@{}]", xpath_tag, attribute);
evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec {
if let Err(_) = node.remove_property(attribute) {
return Err(ScraperErrorKind::Xml)?
}
}
Ok(())
}
fn add_attribute(context: &Context, tag: Option<&str>, attribute: &str, value: &str) -> Result<(), ScraperError> {
let xpath_tag = match tag {
Some(tag) => tag,
None => "*"
};
let xpath = &format!("//{}", xpath_tag);
evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec {
if let Err(_) = node.set_attribute(attribute, value) {
return Err(ScraperErrorKind::Xml)?
}
}
Ok(())
}
fn get_attribute(context: &Context, xpath: &str, attribute: &str) -> Result<String, ScraperError> {
evaluate_xpath!(context, xpath, node_vec);
xpath_result_empty!(node_vec, xpath);
for node in node_vec {
if let Some(value) = node.get_attribute(attribute) {
return Ok(value)
}
}
Err(ScraperErrorKind::Xml)?
}
fn repair_urls(context: &Context, xpath: &str, attribute: &str, article_url: &url::Url) -> Result<(), ScraperError> {
evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec {
if let Some(val) = node.get_attribute(attribute) {
if let Err(url::ParseError::RelativeUrlWithoutBase) = url::Url::parse(&val) {
if let Ok(fixed_url) = ArticleScraper::complete_url(article_url, &val) {
if let Err(_) = node.set_attribute(attribute, fixed_url.as_str()) {
return Err(ScraperErrorKind::Xml)?
}
}
}
}
}
Ok(())
}
fn complete_url(article_url: &url::Url, incomplete_url: &str) -> Result<url::Url, ScraperError> {
let mut completed_url = article_url.scheme().to_owned();
completed_url.push(':');
if !incomplete_url.starts_with("//") {
match article_url.host() {
Some(url::Host::Domain(host)) => {
completed_url.push_str("//");
completed_url.push_str(host);
}
_ => return Err(ScraperErrorKind::Url)?
};
}
completed_url.push_str(incomplete_url);
let url = url::Url::parse(&completed_url).context(ScraperErrorKind::Url)?;
return Ok(url)
}
fn strip_junk(context: &Context, config: &GrabberConfig, url: &url::Url) {
// strip specified xpath
for xpath_strip in &config.xpath_strip {
let _ = ArticleScraper::strip_node(&context, xpath_strip);
}
// strip everything with specified 'id' or 'class'
for xpaht_strip_class in &config.strip_id_or_class {
let _ = ArticleScraper::strip_id_or_class(&context, xpaht_strip_class);
}
// strip any <img> element where @src attribute contains this substring
for xpath_strip_img_src in &config.strip_image_src {
let _ = ArticleScraper::strip_node(&context, &format!("//img[contains(@src,'{}')]", xpath_strip_img_src));
}
let _ = ArticleScraper::fix_lazy_images(&context, "lazyload", "data-src");
let _ = ArticleScraper::fix_iframe_size(&context, "youtube.com");
let _ = ArticleScraper::remove_attribute(&context, None, "style");
let _ = ArticleScraper::remove_attribute(&context, Some("a"), "onclick");
let _ = ArticleScraper::remove_attribute(&context, Some("img"), "srcset");
let _ = ArticleScraper::remove_attribute(&context, Some("img"), "sizes");
let _ = ArticleScraper::add_attribute(&context, Some("a"), "target", "_blank");
let _ = ArticleScraper::repair_urls(&context, "//img", "src", &url);
let _ = ArticleScraper::repair_urls(&context, "//a", "src", &url);
let _ = ArticleScraper::repair_urls(&context, "//a", "href", &url);
let _ = ArticleScraper::repair_urls(&context, "//object", "data", &url);
let _ = ArticleScraper::repair_urls(&context, "//iframe", "src", &url);
// strip elements using Readability.com and Instapaper.com ignore class names
// .entry-unrelated and .instapaper_ignore
// See http://blog.instapaper.com/post/730281947
let _ = ArticleScraper::strip_node(&context, &String::from(
"//*[contains(@class,' entry-unrelated ') or contains(@class,' instapaper_ignore ')]"));
// strip elements that contain style="display: none;"
let _ = ArticleScraper::strip_node(&context, &String::from("//*[contains(@style,'display:none')]"));
// strip all scripts
let _ = ArticleScraper::strip_node(&context, &String::from("//script"));
// strip all comments
let _ = ArticleScraper::strip_node(&context, &String::from("//comment()"));
// strip all empty url-tags <a/>
let _ = ArticleScraper::strip_node(&context, &String::from("//a[not(node())]"));
// strip all external css and fonts
let _ = ArticleScraper::strip_node(&context, &String::from("//*[@type='text/css']"));
}
fn extract_metadata(context: &Context, config: &GrabberConfig, article: &mut Article) {
// try to get title
for xpath_title in &config.xpath_title {
if let Ok(title) = ArticleScraper::extract_value_merge(&context, xpath_title) {
debug!("Article title: {}", title);
article.title = Some(title);
break;
}
}
// try to get the author
for xpath_author in &config.xpath_author {
if let Ok(author) = ArticleScraper::extract_value(&context, xpath_author) {
debug!("Article author: {}", author);
article.author = Some(author);
break;
}
}
// try to get the date
for xpath_date in &config.xpath_date {
if let Ok(date_string) = ArticleScraper::extract_value(&context, xpath_date) {
debug!("Article date: {}", date_string);
if let Ok(date) = NaiveDateTime::from_str(&date_string) {
article.date = Some(date);
break;
}
else {
warn!("Parsing the date string '{}' failed", date_string);
}
}
}
}
fn extract_body(context: &Context, root: &mut Node, config: &GrabberConfig) -> Result<(), ScraperError> {
let mut found_something = false;
for xpath_body in &config.xpath_body {
found_something = ArticleScraper::extract_body_single(&context, root, xpath_body)?;
}
if !found_something {
return Err(ScraperErrorKind::Scrape)?
}
Ok(())
}
fn extract_body_single(context: &Context, root: &mut Node, xpath: &str) -> Result<bool, ScraperError> {
let mut found_something = false;
{
evaluate_xpath!(context, xpath, node_vec);
xpath_result_empty!(node_vec, xpath);
for mut node in node_vec {
if node.get_property("style").is_some() {
if let Err(_) = node.remove_property("style") {
return Err(ScraperErrorKind::Xml)?
}
}
node.unlink();
if let Ok(_) = root.add_child(&mut node) {
found_something = true;
}
else {
error!("Failed to add body to prepared document");
return Err(ScraperErrorKind::Xml)?
}
}
}
Ok(found_something)
}
fn check_for_next_page(&self, context: &Context, config: &GrabberConfig, root: &mut Node) -> Result<(), ScraperError> {
if let Some(next_page_xpath) = config.next_page_link.clone() {
if let Ok(next_page_string) = ArticleScraper::get_attribute(&context, &next_page_xpath, "href") {
if let Ok(next_page_url) = url::Url::parse(&next_page_string) {
return self.parse_next_page(&next_page_url, root, config)
}
}
}
// last page reached
Ok(())
}
fn generate_head(root: &mut Node, document: &Document) -> Result<(), ScraperError> {
if let Ok(mut head_node) = Node::new("head", None, document) {
if let Ok(()) = root.add_prev_sibling(&mut head_node) {
if let Ok(mut meta) = head_node.new_child(None, "meta") {
if let Ok(_) = meta.set_property("charset", "utf-8") {
return Ok(())
}
}
}
}
Err(ScraperErrorKind::Xml)?
}
fn prevent_self_closing_tags(context: &Context) -> Result<(), ScraperError> {
// search document for empty tags and add a empty text node as child
// this prevents libxml from self closing non void elements such as iframe
let xpath = "//*[not(node())]";
evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec {
if node.get_name() == "meta" {
continue
}
let _ = node.add_text_child(None, "empty", "");
}
Ok(())
}
fn eliminate_noscrip_tag(context: &Context) -> Result<(), ScraperError> {
let xpath = "//noscript";
evaluate_xpath!(context, xpath, node_vec);
for mut node in node_vec {
if let Some(mut parent) = node.get_parent() {
node.unlink();
let children = node.get_child_nodes();
for mut child in children {
child.unlink();
let _ = parent.add_child(&mut child);
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use ::*;
#[test]
pub fn golem() {
let config_path = PathBuf::from(r"./resources/tests/golem");
let image_path = PathBuf::from(r"./test_output");
let url = url::Url::parse("https://www.golem.de/news/http-error-418-fehlercode-ich-bin-eine-teekanne-darf-bleiben-1708-129460.html").unwrap();
let grabber = ArticleScraper::new(config_path, image_path.clone(), true).unwrap();
let article = grabber.parse(url).unwrap();
article.save_html(&image_path).unwrap();
assert_eq!(article.title, Some(String::from("HTTP Error 418: Fehlercode \"Ich bin eine Teekanne\" darf bleiben")));
assert_eq!(article.author, Some(String::from("Hauke Gierow")));
}
#[test]
pub fn phoronix() {
let config_path = PathBuf::from(r"./resources/tests/phoronix");
let image_path = PathBuf::from(r"./test_output");
let url = url::Url::parse("http://www.phoronix.com/scan.php?page=article&item=amazon_ec2_bare&num=1").unwrap();
let grabber = ArticleScraper::new(config_path, image_path.clone(), true).unwrap();
let article = grabber.parse(url).unwrap();
article.save_html(&image_path).unwrap();
assert_eq!(article.title, Some(String::from("Amazon EC2 Cloud Benchmarks Against Bare Metal Systems")));
}
}

60
src/macros.rs Normal file
View file

@ -0,0 +1,60 @@
macro_rules! parse_html {
(
$html: ident,
$config: ident,
$xpath_ctx: ident
) => {
// replace matches in raw html
for replace in &$config.replace {
$html = $html.replace(&replace.to_replace, &replace.replace_with);
}
// parse html
let parser = Parser::default_html();
let doc = match parser.parse_string($html.as_str()) {
Ok(doc) => doc,
Err(_) => {
error!("Parsing HTML failed for downloaded HTML");
return Err(ScraperErrorKind::Xml)?
}
};
let $xpath_ctx = match Context::new(&doc) {
Ok(context) => context,
Err(_) => {
error!("Creating xpath context failed for downloaded HTML");
return Err(ScraperErrorKind::Xml)?
}
};
};
}
macro_rules! evaluate_xpath {
(
$context: ident,
$xpath: ident,
$node_vec: ident
) => {
let res = match $context.evaluate($xpath) {
Ok(result) => result,
Err(_) => {
error!("Evaluation of xpath {} yielded no results", $xpath);
return Err(ScraperErrorKind::Xml)?
}
};
let $node_vec = res.get_nodes_as_vec();
};
}
macro_rules! xpath_result_empty {
(
$node_vec: ident,
$xpath: ident
) => {
if $node_vec.len() == 0 {
error!("Evaluation of xpath {} yielded no results", $xpath);
return Err(ScraperErrorKind::Xml)?
}
};
}