From 11e08ae5051bd8c89db378d01fa5348ae9b237ee Mon Sep 17 00:00:00 2001 From: Jan Lukas Gernert Date: Sun, 19 Mar 2023 22:43:26 +0100 Subject: [PATCH] move conditional cleaning right after parsing & port attribute cleaning form readability --- expected.html | 325 ++++++++++++++++ resources/tests/readability/002/expected.html | 62 +-- .../tests/readability/blogger/expected.html | 12 +- .../readability/clean-links/expected.html | 4 +- .../readability/hukumusume/expected.html | 107 ++++++ .../tests/readability/hukumusume/source.html | 356 ++++++++++++++++++ src/constants.rs | 18 +- src/full_text_parser/mod.rs | 145 ++++--- src/full_text_parser/readability/mod.rs | 5 + src/full_text_parser/readability/tests.rs | 13 +- 10 files changed, 943 insertions(+), 104 deletions(-) create mode 100644 expected.html create mode 100644 resources/tests/readability/hukumusume/expected.html create mode 100644 resources/tests/readability/hukumusume/source.html diff --git a/expected.html b/expected.html new file mode 100644 index 0000000..87552b7 --- /dev/null +++ b/expected.html @@ -0,0 +1,325 @@ +

For more than a decade the Web has used XMLHttpRequest (XHR) to achieve + asynchronous requests in JavaScript. While very useful, XHR is not a very + nice API. It suffers from lack of separation of concerns. The input, output + and state are all managed by interacting with one object, and state is + tracked using events. Also, the event-based model doesn’t play well with + JavaScript’s recent focus on Promise- and generator-based asynchronous + programming.

+

The Fetch API intends + to fix most of these problems. It does this by introducing the same primitives + to JS that are used in the HTTP protocol. In addition, it introduces a + utility function fetch() that succinctly captures the intention + of retrieving a resource from the network.

+

The Fetch specification, which + defines the API, nails down the semantics of a user agent fetching a resource. + This, combined with ServiceWorkers, is an attempt to:

+
    +
  1. Improve the offline experience.
  2. +
  3. Expose the building blocks of the Web to the platform as part of the + extensible web movement.
  4. +
+

As of this writing, the Fetch API is available in Firefox 39 (currently + Nightly) and Chrome 42 (currently dev). Github has a Fetch polyfill.

+

Feature detection

+

Fetch API support can be detected by checking for Headers,Request, Response or fetch on + the window or worker scope.

+

Simple fetching

+

The most useful, high-level part of the Fetch API is the fetch() function. + In its simplest form it takes a URL and returns a promise that resolves + to the response. The response is captured as a Response object.

+
fetch("/data.json").then(function(res){// res instanceof Response == true.if(res.ok){
+    res.json().then(function(data){
+      console.log(data.entries);});}else{
+    console.log("Looks like the response wasn't perfect, got status", res.status);}},function(e){
+  console.log("Fetch failed!", e);});
+

Submitting some parameters, it would look like this:

+
fetch("http://www.example.org/submit.php",{
+  method:"POST",
+  headers:{"Content-Type":"application/x-www-form-urlencoded"},
+  body:"firstName=Nikhil&favColor=blue&password=easytoguess"}).then(function(res){if(res.ok){
+    alert("Perfect! Your settings are saved.");}elseif(res.status==401){
+    alert("Oops! You are not authorized.");}},function(e){
+  alert("Error submitting form!");});
+

The fetch() function’s arguments are the same as those passed + to the +
Request() constructor, so you may directly pass arbitrarily + complex requests to fetch() as discussed below.

+

Headers

+

Fetch introduces 3 interfaces. These are Headers, Request and +
Response. They map directly to the underlying HTTP concepts, + but have +
certain visibility filters in place for privacy and security reasons, + such as +
supporting CORS rules and ensuring cookies aren’t readable by third parties.

+

The Headers interface is + a simple multi-map of names to values:

+
var content ="Hello World";var reqHeaders =new Headers();
+reqHeaders.append("Content-Type","text/plain"
+reqHeaders.append("Content-Length", content.length.toString());
+reqHeaders.append("X-Custom-Header","ProcessThisImmediately");
+

The same can be achieved by passing an array of arrays or a JS object + literal +
to the constructor:

+
reqHeaders =new Headers({"Content-Type":"text/plain","Content-Length": content.length.toString(),"X-Custom-Header":"ProcessThisImmediately",});
+

The contents can be queried and retrieved:

+
console.log(reqHeaders.has("Content-Type"));// true
+console.log(reqHeaders.has("Set-Cookie"));// false
+reqHeaders.set("Content-Type","text/html");
+reqHeaders.append("X-Custom-Header","AnotherValue");
+ 
+console.log(reqHeaders.get("Content-Length"));// 11
+console.log(reqHeaders.getAll("X-Custom-Header"));// ["ProcessThisImmediately", "AnotherValue"]
+ 
+reqHeaders.delete("X-Custom-Header");
+console.log(reqHeaders.getAll("X-Custom-Header"));// []
+

Some of these operations are only useful in ServiceWorkers, but they provide +
a much nicer API to Headers.

+

Since Headers can be sent in requests, or received in responses, and have + various limitations about what information can and should be mutable, Headers objects + have a guard property. This is not exposed to the Web, but + it affects which mutation operations are allowed on the Headers object. +
Possible values are:

+
    +
  • “none”: default.
  • +
  • “request”: guard for a Headers object obtained from a Request (Request.headers).
  • +
  • “request-no-cors”: guard for a Headers object obtained from a Request + created +
    with mode “no-cors”.
  • +
  • “response”: naturally, for Headers obtained from Response (Response.headers).
  • +
  • “immutable”: Mostly used for ServiceWorkers, renders a Headers object +
    read-only.
  • +
+

The details of how each guard affects the behaviors of the Headers object + are +
in the specification. For example, + you may not append or set a “request” guarded Headers’ “Content-Length” + header. Similarly, inserting “Set-Cookie” into a Response header is not + allowed so that ServiceWorkers may not set cookies via synthesized Responses.

+

All of the Headers methods throw TypeError if name is not a + valid HTTP Header name. The mutation operations will throw TypeError + if there is an immutable guard. Otherwise they fail silently. For example:

+
var res = Response.error();try{
+  res.headers.set("Origin","http://mybank.com");}catch(e){
+  console.log("Cannot pretend to be a bank!");}
+

Request

+

The Request interface defines a request to fetch a resource over HTTP. + URL, method and headers are expected, but the Request also allows specifying + a body, a request mode, credentials and cache hints.

+

The simplest Request is of course, just a URL, as you may do to GET a + resource.

+
var req =new Request("/index.html");
+console.log(req.method);// "GET"
+console.log(req.url);// "http://example.com/index.html"
+

You may also pass a Request to the Request() constructor to + create a copy. +
(This is not the same as calling the clone() method, which + is covered in +
the “Reading bodies” section.).

+
var copy =new Request(req);
+console.log(copy.method);// "GET"
+console.log(copy.url);// "http://example.com/index.html"
+

Again, this form is probably only useful in ServiceWorkers.

+

The non-URL attributes of the Request can only be set by passing + initial +
values as a second argument to the constructor. This argument is a dictionary.

+
var uploadReq =new Request("/uploadImage",{
+  method:"POST",
+  headers:{"Content-Type":"image/png",},
+  body:"image data"});
+

The Request’s mode is used to determine if cross-origin requests lead + to valid responses, and which properties on the response are readable. + Legal mode values are "same-origin", "no-cors" (default) + and "cors".

+

The "same-origin" mode is simple, if a request is made to another + origin with this mode set, the result is simply an error. You could use + this to ensure that +
a request is always being made to your origin.

+
var arbitraryUrl = document.getElementById("url-input").value;
+fetch(arbitraryUrl,{ mode:"same-origin"}).then(function(res){
+  console.log("Response succeeded?", res.ok);},function(e){
+  console.log("Please enter a same-origin URL!");});
+

The "no-cors" mode captures what the web platform does by default + for scripts you import from CDNs, images hosted on other domains, and so + on. First, it prevents the method from being anything other than “HEAD”, + “GET” or “POST”. Second, if any ServiceWorkers intercept these requests, + they may not add or override any headers except for these. + Third, JavaScript may not access any properties of the resulting Response. + This ensures that ServiceWorkers do not affect the semantics of the Web + and prevents security and privacy issues that could arise from leaking + data across domains.

+

"cors" mode is what you’ll usually use to make known cross-origin + requests to access various APIs offered by other vendors. These are expected + to adhere to +
the CORS protocol. + Only a limited set of + headers is exposed in the Response, but the body is readable. For example, + you could get a list of Flickr’s most interesting photos + today like this:

+
var u =new URLSearchParams();
+u.append('method','flickr.interestingness.getList');
+u.append('api_key','<insert api key here>');
+u.append('format','json');
+u.append('nojsoncallback','1');var apiCall = fetch('https://api.flickr.com/services/rest?'+ u);
+ 
+apiCall.then(function(response){return response.json().then(function(json){// photo is a list of photos.return json.photos.photo;});}).then(function(photos){
+  photos.forEach(function(photo){
+    console.log(photo.title);});});
+

You may not read out the “Date” header since Flickr does not allow it + via +
Access-Control-Expose-Headers.

+
response.headers.get("Date");// null
+

The credentials enumeration determines if cookies for the other + domain are +
sent to cross-origin requests. This is similar to XHR’s withCredentials
flag, but tri-valued as "omit" (default), "same-origin" and "include".

+

The Request object will also give the ability to offer caching hints to + the user-agent. This is currently undergoing some security review. + Firefox exposes the attribute, but it has no effect.

+

Requests have two read-only attributes that are relevant to ServiceWorkers +
intercepting them. There is the string referrer, which is + set by the UA to be +
the referrer of the Request. This may be an empty string. The other is +
context which is a rather large enumeration defining + what sort of resource is being fetched. This could be “image” if the request + is from an + <img>tag in the controlled document, “worker” if it is an attempt to load a + worker script, and so on. When used with the fetch() function, + it is “fetch”.

+

Response

+

Response instances are returned by calls to fetch(). + They can also be created by JS, but this is only useful in ServiceWorkers.

+

We have already seen some attributes of Response when we looked at fetch(). + The most obvious candidates are status, an integer (default + value 200) and statusText (default value “OK”), which correspond + to the HTTP status code and reason. The ok attribute is just + a shorthand for checking that status is in the range 200-299 + inclusive.

+

headers is the Response’s Headers object, with guard “response”. + The url attribute reflects the URL of the corresponding request.

+

Response also has a type, which is “basic”, “cors”, “default”, + “error” or +
“opaque”.

+
    +
  • +"basic": normal, same origin response, with all headers exposed + except +
    “Set-Cookie” and “Set-Cookie2″.
  • +
  • +"cors": response was received from a valid cross-origin request. + Certain headers and the bodymay be accessed.
  • +
  • +"error": network error. No useful information describing + the error is available. The Response’s status is 0, headers are empty and + immutable. This is the type for a Response obtained from Response.error().
  • +
  • +"opaque": response for “no-cors” request to cross-origin + resource. Severely
    + restricted
    +
  • +
+

The “error” type results in the fetch() Promise rejecting with + TypeError.

+

There are certain attributes that are useful only in a ServiceWorker scope. + The +
idiomatic way to return a Response to an intercepted request in ServiceWorkers + is:

+
addEventListener('fetch',function(event){
+  event.respondWith(new Response("Response body",{
+    headers:{"Content-Type":"text/plain"}});});
+

As you can see, Response has a two argument constructor, where both arguments + are optional. The first argument is a body initializer, and the second + is a dictionary to set the status, statusText and headers.

+

The static method Response.error() simply returns an error + response. Similarly, Response.redirect(url, status) returns + a Response resulting in +
a redirect to url.

+

Dealing with bodies

+

Both Requests and Responses may contain body data. We’ve been glossing + over it because of the various data types body may contain, but we will + cover it in detail now.

+

A body is an instance of any of the following types.

+ +

In addition, Request and Response both offer the following methods to + extract their body. These all return a Promise that is eventually resolved + with the actual content.

+
    +
  • arrayBuffer()
  • +
  • blob()
  • +
  • json()
  • +
  • text()
  • +
  • formData()
  • +
+

This is a significant improvement over XHR in terms of ease of use of + non-text data!

+

Request bodies can be set by passing body parameters:

+
var form =new FormData(document.getElementById('login-form'));
+fetch("/login",{
+  method:"POST",
+  body: form
+})
+

Responses take the first argument as the body.

+
var res =new Response(new File(["chunk","chunk"],"archive.zip",{ type:"application/zip"}));
+

Both Request and Response (and by extension the fetch() function), + will try to intelligently determine the content type. + Request will also automatically set a “Content-Type” header if none is + set in the dictionary.

+

Streams and cloning

+

It is important to realise that Request and Response bodies can only be + read once! Both interfaces have a boolean attribute bodyUsed to + determine if it is safe to read or not.

+
var res =new Response("one time use");
+console.log(res.bodyUsed);// false
+res.text().then(function(v){
+  console.log(res.bodyUsed);// true});
+console.log(res.bodyUsed);// true
+ 
+res.text().catch(function(e){
+  console.log("Tried to read already consumed Response");});
+

This decision allows easing the transition to an eventual stream-based Fetch + API. The intention is to let applications consume data as it arrives, allowing + for JavaScript to deal with larger files like videos, and perform things + like compression and editing on the fly.

+

Often, you’ll want access to the body multiple times. For example, you + can use the upcoming Cache API to + store Requests and Responses for offline use, and Cache requires bodies + to be available for reading.

+

So how do you read out the body multiple times within such constraints? + The API provides a clone() method on the two interfaces. This + will return a clone of the object, with a ‘new’ body. clone() MUST + be called before the body of the corresponding object has been used. That + is, clone() first, read later.

+
addEventListener('fetch',function(evt){var sheep =new Response("Dolly");
+  console.log(sheep.bodyUsed);// falsevar clone = sheep.clone();
+  console.log(clone.bodyUsed);// false
+ 
+  clone.text();
+  console.log(sheep.bodyUsed);// false
+  console.log(clone.bodyUsed);// true
+ 
+  evt.respondWith(cache.add(sheep.clone()).then(function(e){return sheep;});});
+

Future improvements

+

Along with the transition to streams, Fetch will eventually have the ability + to abort running fetch()es and some way to report the progress + of a fetch. These are provided by XHR, but are a little tricky to fit in + the Promise-based nature of the Fetch API.

+

You can contribute to the evolution of this API by participating in discussions + on the WHATWG mailing list and + in the issues in the Fetch and + ServiceWorkerspecifications.

+

For a better web!

+

The author would like to thank Andrea Marchesini, Anne van Kesteren and Ben
+Kelly for helping with the specification and implementation.

diff --git a/resources/tests/readability/002/expected.html b/resources/tests/readability/002/expected.html index 6007921..87552b7 100644 --- a/resources/tests/readability/002/expected.html +++ b/resources/tests/readability/002/expected.html @@ -27,19 +27,19 @@

The most useful, high-level part of the Fetch API is the fetch() function. In its simplest form it takes a URL and returns a promise that resolves to the response. The response is captured as a Response object.

-
fetch("/data.json").then(function(res){// res instanceof Response == true.if(res.ok){
+
fetch("/data.json").then(function(res){// res instanceof Response == true.if(res.ok){
     res.json().then(function(data){
       console.log(data.entries);});}else{
     console.log("Looks like the response wasn't perfect, got status", res.status);}},function(e){
-  console.log("Fetch failed!", e);});
+ console.log("Fetch failed!", e);});

Submitting some parameters, it would look like this:

-
fetch("http://www.example.org/submit.php",{
+
fetch("http://www.example.org/submit.php",{
   method:"POST",
   headers:{"Content-Type":"application/x-www-form-urlencoded"},
   body:"firstName=Nikhil&favColor=blue&password=easytoguess"}).then(function(res){if(res.ok){
     alert("Perfect! Your settings are saved.");}elseif(res.status==401){
     alert("Oops! You are not authorized.");}},function(e){
-  alert("Error submitting form!");});
+ alert("Error submitting form!");});

The fetch() function’s arguments are the same as those passed to the
Request() constructor, so you may directly pass arbitrarily @@ -53,16 +53,16 @@
supporting CORS rules and ensuring cookies aren’t readable by third parties.

The Headers interface is a simple multi-map of names to values:

-
var content ="Hello World";var reqHeaders =new Headers();
+
var content ="Hello World";var reqHeaders =new Headers();
 reqHeaders.append("Content-Type","text/plain"
 reqHeaders.append("Content-Length", content.length.toString());
-reqHeaders.append("X-Custom-Header","ProcessThisImmediately");
+reqHeaders.append("X-Custom-Header","ProcessThisImmediately");

The same can be achieved by passing an array of arrays or a JS object literal
to the constructor:

-
reqHeaders =new Headers({"Content-Type":"text/plain","Content-Length": content.length.toString(),"X-Custom-Header":"ProcessThisImmediately",});
+
reqHeaders =new Headers({"Content-Type":"text/plain","Content-Length": content.length.toString(),"X-Custom-Header":"ProcessThisImmediately",});

The contents can be queried and retrieved:

-
console.log(reqHeaders.has("Content-Type"));// true
+
console.log(reqHeaders.has("Content-Type"));// true
 console.log(reqHeaders.has("Set-Cookie"));// false
 reqHeaders.set("Content-Type","text/html");
 reqHeaders.append("X-Custom-Header","AnotherValue");
@@ -71,7 +71,7 @@ console.log(reqHeaders.get(<
 console.log(reqHeaders.getAll("X-Custom-Header"));// ["ProcessThisImmediately", "AnotherValue"]
  
 reqHeaders.delete("X-Custom-Header");
-console.log(reqHeaders.getAll("X-Custom-Header"));// []
+console.log(reqHeaders.getAll("X-Custom-Header"));// []

Some of these operations are only useful in ServiceWorkers, but they provide
a much nicer API to Headers.

Since Headers can be sent in requests, or received in responses, and have @@ -98,34 +98,34 @@ console.log(reqHeaders.getAll(All of the Headers methods throw TypeError if name is not a valid HTTP Header name. The mutation operations will throw TypeError if there is an immutable guard. Otherwise they fail silently. For example:

-
var res = Response.error();try{
+
var res = Response.error();try{
   res.headers.set("Origin","http://mybank.com");}catch(e){
-  console.log("Cannot pretend to be a bank!");}
+ console.log("Cannot pretend to be a bank!");}

Request

The Request interface defines a request to fetch a resource over HTTP. URL, method and headers are expected, but the Request also allows specifying a body, a request mode, credentials and cache hints.

The simplest Request is of course, just a URL, as you may do to GET a resource.

-
var req =new Request("/index.html");
+
var req =new Request("/index.html");
 console.log(req.method);// "GET"
-console.log(req.url);// "http://example.com/index.html"
+console.log(req.url);// "http://example.com/index.html"

You may also pass a Request to the Request() constructor to create a copy.
(This is not the same as calling the clone() method, which is covered in
the “Reading bodies” section.).

-
var copy =new Request(req);
+
var copy =new Request(req);
 console.log(copy.method);// "GET"
-console.log(copy.url);// "http://example.com/index.html"
+console.log(copy.url);// "http://example.com/index.html"

Again, this form is probably only useful in ServiceWorkers.

The non-URL attributes of the Request can only be set by passing initial
values as a second argument to the constructor. This argument is a dictionary.

-
var uploadReq =new Request("/uploadImage",{
+
var uploadReq =new Request("/uploadImage",{
   method:"POST",
   headers:{"Content-Type":"image/png",},
-  body:"image data"});
+ body:"image data"});

The Request’s mode is used to determine if cross-origin requests lead to valid responses, and which properties on the response are readable. Legal mode values are "same-origin", "no-cors" (default) @@ -134,10 +134,10 @@ console.log(copy.url); origin with this mode set, the result is simply an error. You could use this to ensure that
a request is always being made to your origin.

-
var arbitraryUrl = document.getElementById("url-input").value;
+
var arbitraryUrl = document.getElementById("url-input").value;
 fetch(arbitraryUrl,{ mode:"same-origin"}).then(function(res){
   console.log("Response succeeded?", res.ok);},function(e){
-  console.log("Please enter a same-origin URL!");});
+ console.log("Please enter a same-origin URL!");});

The "no-cors" mode captures what the web platform does by default for scripts you import from CDNs, images hosted on other domains, and so on. First, it prevents the method from being anything other than “HEAD”, @@ -155,7 +155,7 @@ fetch(arbitraryUrl,{ mode:most interesting photos today like this:

-
var u =new URLSearchParams();
+
var u =new URLSearchParams();
 u.append('method','flickr.interestingness.getList');
 u.append('api_key','<insert api key here>');
 u.append('format','json');
@@ -163,11 +163,11 @@ u.append('nojsoncallback',then(function(response){return response.json().then(function(json){// photo is a list of photos.return json.photos.photo;});}).then(function(photos){
   photos.forEach(function(photo){
-    console.log(photo.title);});});
+ console.log(photo.title);});});

You may not read out the “Date” header since Flickr does not allow it via
Access-Control-Expose-Headers.

-
response.headers.get("Date");// null
+
response.headers.get("Date");// null

The credentials enumeration determines if cookies for the other domain are
sent to cross-origin requests. This is similar to XHR’s withCredentials
flag, but tri-valued as "omit" (default), "same-origin" and "include".

@@ -222,9 +222,9 @@ apiCall.then(function(respon The
idiomatic way to return a Response to an intercepted request in ServiceWorkers is:

-
addEventListener('fetch',function(event){
+
addEventListener('fetch',function(event){
   event.respondWith(new Response("Response body",{
-    headers:{"Content-Type":"text/plain"}});});
+ headers:{"Content-Type":"text/plain"}});});

As you can see, Response has a two argument constructor, where both arguments are optional. The first argument is a body initializer, and the second is a dictionary to set the status, statusText and headers.

@@ -266,13 +266,13 @@ apiCall.then(function(respon

This is a significant improvement over XHR in terms of ease of use of non-text data!

Request bodies can be set by passing body parameters:

-
var form =new FormData(document.getElementById('login-form'));
+
var form =new FormData(document.getElementById('login-form'));
 fetch("/login",{
   method:"POST",
   body: form
-})
+})

Responses take the first argument as the body.

-
var res =new Response(new File(["chunk","chunk"],"archive.zip",{ type:"application/zip"}));
+
var res =new Response(new File(["chunk","chunk"],"archive.zip",{ type:"application/zip"}));

Both Request and Response (and by extension the fetch() function), will try to intelligently determine the content type. Request will also automatically set a “Content-Type” header if none is @@ -281,14 +281,14 @@ fetch("/login",{

It is important to realise that Request and Response bodies can only be read once! Both interfaces have a boolean attribute bodyUsed to determine if it is safe to read or not.

-
var res =new Response("one time use");
+
var res =new Response("one time use");
 console.log(res.bodyUsed);// false
 res.text().then(function(v){
   console.log(res.bodyUsed);// true});
 console.log(res.bodyUsed);// true
  
 res.text().catch(function(e){
-  console.log("Tried to read already consumed Response");});
+ console.log("Tried to read already consumed Response");});

This decision allows easing the transition to an eventual stream-based Fetch API. The intention is to let applications consume data as it arrives, allowing for JavaScript to deal with larger files like videos, and perform things @@ -302,7 +302,7 @@ res.text().catch(clone() MUST be called before the body of the corresponding object has been used. That is, clone() first, read later.

-
addEventListener('fetch',function(evt){var sheep =new Response("Dolly");
+
addEventListener('fetch',function(evt){var sheep =new Response("Dolly");
   console.log(sheep.bodyUsed);// falsevar clone = sheep.clone();
   console.log(clone.bodyUsed);// false
  
@@ -310,7 +310,7 @@ res.text().catch(log(sheep.bodyUsed);// false
   console.log(clone.bodyUsed);// true
  
-  evt.respondWith(cache.add(sheep.clone()).then(function(e){return sheep;});});
+ evt.respondWith(cache.add(sheep.clone()).then(function(e){return sheep;});});

Future improvements

Along with the transition to streams, Fetch will eventually have the ability to abort running fetch()es and some way to report the progress diff --git a/resources/tests/readability/blogger/expected.html b/resources/tests/readability/blogger/expected.html index 2acdfd1..bdfb3b2 100644 --- a/resources/tests/readability/blogger/expected.html +++ b/resources/tests/readability/blogger/expected.html @@ -4,14 +4,14 @@

So what's a GreenPak?


Silego Technology is a fabless semiconductor company located in the SF Bay area, which makes (among other things) a line of programmable logic devices known as GreenPak. Their 5th generation parts were just announced, but I started this project before that happened so I'm still targeting the 4th generation.

GreenPak devices are kind of like itty bitty PSoCs - they have a mixed signal fabric with an ADC, DACs, comparators, voltage references, plus a digital LUT/FF fabric and some typical digital MCU peripherals like counters and oscillators (but no CPU).

It's actually an interesting architecture - FPGAs (including some devices marketed as CPLDs) are a 2D array of LUTs connected via wires to adjacent cells, and true (product term) CPLDs are a star topology of AND-OR arrays connected by a crossbar. GreenPak, on the other hand, is a star topology of LUTs, flipflops, and analog/digital hard IP connected to a crossbar.

Without further ado, here's a block diagram showing all the cool stuff you get in the SLG46620V:

- - +
+
SLG46620V block diagram (from device datasheet)

They're also tiny (the SLG46620V is a 20-pin 0.4mm pitch STQFN measuring 2x3 mm, and the lower gate count SLG46140V is a mere 1.6x2 mm) and probably the cheapest programmable logic device on the market - $0.50 in low volume and less than $0.40 in larger quantities.

The Vdd range of GreenPak4 is huge, more like what you'd expect from an MCU than an FPGA! It can run on anything from 1.8 to 5V, although performance is only specified at 1.8, 3.3, and 5V nominal voltages. There's also a dual-rail version that trades one of the GPIO pins for a second power supply pin, allowing you to interface to logic at two different voltage levels.

To support low-cost/space-constrained applications, they even have the configuration memory on die. It's one-time programmable and needs external Vpp to program (presumably Silego didn't want to waste die area on charge pumps that would only be used once) but has a SRAM programming mode for prototyping.

The best part is that the development software (GreenPak Designer) is free of charge and provided for all major operating systems including Linux! Unfortunately, the only supported design entry method is schematic entry and there's no way to write your design in a HDL.

While schematics may be fine for quick tinkering on really simple designs, they quickly get unwieldy. The nightmare of a circuit shown below is just a bunch of counters hooked up to LEDs that blink at various rates.

- - +
+
Schematic from hell!

@@ -19,8 +19,8 @@

Great! How does it work?


Rather than wasting time writing a synthesizer, I decided to write a GreenPak technology library for Clifford Wolf's excellent open source synthesis tool, Yosys, and then make a place-and-route tool to turn that into a final netlist. The post-PAR netlist can then be loaded into GreenPak Designer in order to program the device.

The first step of the process is to run the "synth_greenpak4" Yosys flow on the Verilog source. This runs a generic RTL synthesis pass, then some coarse-grained extraction passes to infer shift register and counter cells from behavioral logic, and finally maps the remaining logic to LUT/FF cells and outputs a JSON-formatted netlist.

Once the design has been synthesized, my tool (named, surprisingly, gp4par) is then launched on the netlist. It begins by parsing the JSON and constructing a directed graph of cell objects in memory. A second graph, containing all of the primitives in the device and the legal connections between them, is then created based on the device specified on the command line. (As of now only the SLG46620V is supported; the SLG46621V can be added fairly easily but the SLG46140V has a slightly different microarchitecture which will require a bit more work to support.)

After the graphs are generated, each node in the netlist graph is assigned a numeric label identifying the type of cell and each node in the device graph is assigned a list of legal labels: for example, an I/O buffer site is legal for an input buffer, output buffer, or bidirectional buffer.

- - +
+
Example labeling for a subset of the netlist and device graphs

diff --git a/resources/tests/readability/clean-links/expected.html b/resources/tests/readability/clean-links/expected.html index 1de1d24..d042ec5 100644 --- a/resources/tests/readability/clean-links/expected.html +++ b/resources/tests/readability/clean-links/expected.html @@ -1,4 +1,4 @@ -

+

Study Webtext

Ah Bartleby! Ah humanity!

-

+ diff --git a/resources/tests/readability/hukumusume/expected.html b/resources/tests/readability/hukumusume/expected.html new file mode 100644 index 0000000..994ea5e --- /dev/null +++ b/resources/tests/readability/hukumusume/expected.html @@ -0,0 +1,107 @@ +
+ + + + + +
+ + + +
+ + +

福娘童話集 > きょうのイソップ童話 > 1月のイソップ童話 > 欲張りなイヌ +

+

元旦のイソップ童話



よくばりなイヌ



+ 欲張りなイヌ



ひらがな ←→ 日本語・英語 ←→ English

+
+ + + + + + + +
おりがみをつくろう( おりがみくらぶ より)

+犬の顔の折り紙いぬのかお犬の顔の紙いぬ +

+ + + + +
+ ♪音声配信(html5) +
亜姫の朗読☆ イソップ童話より
+

+  肉をくわえたイヌが、橋を渡っていました。  ふと下を見ると、川の中にも肉をくわえたイヌがいます。 イヌはそれを見て、思いました。(あいつの肉の方が、大きそうだ)  イヌは、くやしくてたまりません。 (そうだ、あいつをおどかして、あの肉を取ってやろう)  そこでイヌは、川の中のイヌに向かって思いっきり吠えました。 「ウゥー、ワン!!」  そのとたん、くわえていた肉はポチャンと川の中に落ちてしまいました。 「ああー、ぁぁー」  川の中には、がっかりしたイヌの顔がうつっています。  さっきの川の中のイヌは、水にうつった自分の顔だったのです。  同じ物を持っていても、人が持っている物の方が良く見え、また、欲張るとけっきょく損をするというお話しです。 +

+

+ おしまい +

+

前のページへ戻る



+ + + + + + + +
+ + + + + + + + + + + + + + + + +
+1月 1日の豆知識



+ 366日への旅
+
+きょうの記念日

元旦 +
+きょうの誕生花

松(まつ) +
+きょうの誕生日・出来事

1949年 Mr.マリック(マジシャン) +
+恋の誕生日占い

自分の考えをしっかりと持った女の子。 +
+なぞなぞ小学校

○(丸)を取ったらお母さんになってしまう男の人は? +
+あこがれの職業紹介

歌手 +
+恋の魔法とおまじない 001

両思いになれる おまじない +
1月 1日の童話・昔話



+ 福娘童話集
+きょうの日本昔話

ネコがネズミを追いかける訳 +
+きょうの世界昔話

モンゴルの十二支話 +
+きょうの日本民話

仕事の取替えっこ +
+きょうのイソップ童話

欲張りなイヌ +
+きょうの江戸小話

ぞうきんとお年玉 +
+きょうの百物語

百物語の幽霊 +
+ + + + + + + +
福娘のサイト
366日への旅

毎日の記念日・誕生花 ・有名人の誕生日と性格判断
福娘童話集

世界と日本の童話と昔話
女の子応援サイト -さくら-

誕生日占い、お仕事紹介、おまじない、など
子どもの病気相談所

病気検索と対応方法、症状から検索するWEB問診
世界60秒巡り

国旗国歌や世界遺産など、世界の国々の豆知識
+ +
diff --git a/resources/tests/readability/hukumusume/source.html b/resources/tests/readability/hukumusume/source.html new file mode 100644 index 0000000..7034430 --- /dev/null +++ b/resources/tests/readability/hukumusume/source.html @@ -0,0 +1,356 @@ + + + + + 欲張りなイヌ <福娘童話集 きょうのイソップ童話> + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ 福娘童話集 > きょうのイソップ童話 + + 福娘童話集 きょうのイソップ童話 + + 童話・昔話・おとぎ話の福娘童話集 +
+
+
+ + + + + + + + +
+ + + +
+ + + + + + + + + +
+ + +
+   +
+
+

+ 福娘童話集 > きょうのイソップ童話 > 1月のイソップ童話 > 欲張りなイヌ +

+

+ 元旦のイソップ童話
+
+
+
+ よくばりなイヌ
+
+
+
+ 欲張りなイヌ
+
+
+
+ ひらがな ←→ 日本語・英語 ←→ English +

+ + + + + + +
+ + + + + + + + + + + + +
+ + + おりがみをつくろう + + ( おりがみくらぶ より) + + +
+ + + + + + +
+ 犬の顔の折り紙いぬのかお   犬の顔の紙いぬ +
+
+
+ + + + + + + + + + + + +
+ ♪音声配信(html5) +
+ +
+ 亜姫の朗読☆ イソップ童話より +
+

+  肉をくわえたイヌが、橋を渡っていました。  ふと下を見ると、川の中にも肉をくわえたイヌがいます。 イヌはそれを見て、思いました。(あいつの肉の方が、大きそうだ)  イヌは、くやしくてたまりません。 (そうだ、あいつをおどかして、あの肉を取ってやろう)  そこでイヌは、川の中のイヌに向かって思いっきり吠えました。 「ウゥー、ワン!!」  そのとたん、くわえていた肉はポチャンと川の中に落ちてしまいました。 「ああー、ぁぁー」  川の中には、がっかりしたイヌの顔がうつっています。  さっきの川の中のイヌは、水にうつった自分の顔だったのです。  同じ物を持っていても、人が持っている物の方が良く見え、また、欲張るとけっきょく損をするというお話しです。 +

+

+ おしまい +

+

+ 前のページへ戻る
+
+
+
+ + +

+
+ + + + + + + + + + +
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+      1月 1日の豆知識
+
+
+
+ 366日への旅
+
+ きょうの記念日
+
+ 元旦 +
+ きょうの誕生花
+
+ 松(まつ) +
+ きょうの誕生日・出来事
+
+ 1949年 Mr.マリック(マジシャン) +
+ 恋の誕生日占い
+
+ 自分の考えをしっかりと持った女の子。 +
+ なぞなぞ小学校
+
+ ○(丸)を取ったらお母さんになってしまう男の人は? +
+ あこがれの職業紹介
+
+ 歌手 +
+ 恋の魔法とおまじない 001
+
+ 両思いになれる おまじない +
+   1月 1日の童話・昔話
+
+
+
+ 福娘童話集
+
+ きょうの日本昔話
+
+ ネコがネズミを追いかける訳 +
+ きょうの世界昔話
+
+ モンゴルの十二支話 +
+ きょうの日本民話
+
+ 仕事の取替えっこ +
+ きょうのイソップ童話
+
+ 欲張りなイヌ +
+ きょうの江戸小話
+
+ ぞうきんとお年玉 +
+ きょうの百物語
+
+ 百物語の幽霊 +
+ + + + + + + + + + + + + + + + + + + + + +
+ 福娘のサイト +
+ 366日への旅
+
+ 毎日の記念日・誕生花 ・有名人の誕生日と性格判断
+
+ 福娘童話集
+
+ 世界と日本の童話と昔話
+
+ 女の子応援サイト -さくら-
+
+ 誕生日占い、お仕事紹介、おまじない、など
+
+ 子どもの病気相談所
+
+ 病気検索と対応方法、症状から検索するWEB問診
+
+ 世界60秒巡り
+
+ 国旗国歌や世界遺産など、世界の国々の豆知識
+
+
+ + diff --git a/src/constants.rs b/src/constants.rs index 4244326..c5f93f4 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -73,7 +73,23 @@ pub const UNLIKELY_ROLES: &[&str] = &[ pub const DEFAULT_TAGS_TO_SCORE: &[&str] = &["SECTION", "H2", "H3", "H4", "H5", "H6", "P", "TD", "PRE"]; -pub static DIV_TO_P_ELEMS: Lazy> = Lazy::new(|| { +pub const DEPRECATED_SIZE_ATTRIBUTE_ELEMS: Lazy> = + Lazy::new(|| HashSet::from(["TABLE", "TH", "TD", "HR", "PRE"])); +pub const PRESENTATIONAL_ATTRIBUTES: &[&str] = &[ + "align", + "background", + "bgcolor", + "border", + "cellpadding", + "cellspacing", + "frame", + "hspace", + "rules", + "style", + "valign", + "vspace", +]; +pub static DIV_TO_P_ELEMS: Lazy> = Lazy::new(|| { HashSet::from([ "BLOCKQUOTE", "DL", diff --git a/src/full_text_parser/mod.rs b/src/full_text_parser/mod.rs index af8dd9f..40bfb68 100644 --- a/src/full_text_parser/mod.rs +++ b/src/full_text_parser/mod.rs @@ -19,7 +19,6 @@ use fingerprints::Fingerprints; use libxml::parser::Parser; use libxml::tree::{Document, Node}; use libxml::xpath::Context; -use log::{debug, error, info, warn}; use reqwest::header::HeaderMap; use reqwest::{Client, Url}; use std::path::Path; @@ -42,7 +41,7 @@ impl FullTextParser { ) -> Result { libxml::tree::node::set_node_rc_guard(10); - info!("Scraping article: '{}'", url.as_str()); + log::debug!("Scraping article: '{url}'"); // check if we have a config for the url let config = self.get_grabber_config(url); @@ -58,14 +57,14 @@ impl FullTextParser { .headers(headers) .send() .await - .map_err(|err| { - error!("Failed head request to: '{}' - '{}'", url.as_str(), err); + .map_err(|error| { + log::error!("Failed head request to: '{url}' - '{error}'"); FullTextParserError::Http })?; // check if url redirects and we need to pick up the new url let url = if let Some(new_url) = Util::check_redirect(&response, url) { - debug!("Url '{}' redirects to '{}'", url.as_str(), new_url.as_str()); + log::debug!("Url '{url}' redirects to '{new_url}'"); new_url } else { url.clone() @@ -117,16 +116,18 @@ impl FullTextParser { .await?; let context = Context::new(&document).map_err(|()| { - error!("Failed to create xpath context for extracted article"); + log::error!("Failed to create xpath context for extracted article"); FullTextParserError::Xml })?; if let Err(error) = Self::prevent_self_closing_tags(&context) { - error!("Preventing self closing tags failed - '{}'", error); + log::error!("Preventing self closing tags failed - '{error}'"); return Err(error); } - Self::post_process_content(&document)?; + if let Some(mut root) = document.get_root_element() { + Self::post_process_content(&mut root, false)?; + } article.document = Some(document); @@ -151,14 +152,14 @@ impl FullTextParser { global_config.single_page_link.as_deref(), ); if let Some(xpath_single_page_link) = rule { - debug!( + log::debug!( "Single page link xpath specified in config '{}'", xpath_single_page_link ); if let Some(single_page_url) = Util::find_page_url(&xpath_ctx, xpath_single_page_link) { // parse again with single page url - debug!("Single page link found '{}'", single_page_url); + log::debug!("Single page link found '{}'", single_page_url); if let Err(error) = self .parse_single_page( @@ -171,8 +172,8 @@ impl FullTextParser { ) .await { - log::warn!("Single Page parsing: {}", error); - log::debug!("Continuing with regular parser."); + log::warn!("Single Page parsing: {error}"); + log::info!("Continuing with regular parser."); } } } @@ -181,26 +182,35 @@ impl FullTextParser { if article.thumbnail_url.is_none() { Self::check_for_thumbnail(&xpath_ctx, article); } - Self::strip_junk(&xpath_ctx, config, global_config); - Self::fix_urls(&xpath_ctx, &article.url); + Self::prep_content(&xpath_ctx, config, global_config, &article.url); let found_body = Self::extract_body(&xpath_ctx, root, config, global_config)?; if !found_body { if let Err(error) = Readability::extract_body(document, root, article.title.as_deref()) { - log::error!("Both ftr and readability failed to find content: {}", error); + log::error!("Both ftr and readability failed to find content: {error}"); return Err(error); } } while let Some(url) = self.check_for_next_page(&xpath_ctx, config, global_config) { + log::debug!(""); + let headers = Util::generate_headers(config, global_config)?; let html = Self::download(&url, client, headers).await?; document = Self::parse_html(&html, config, global_config)?; xpath_ctx = Self::get_xpath_ctx(&document)?; - Self::strip_junk(&xpath_ctx, config, global_config); - Self::fix_urls(&xpath_ctx, &url); - Self::extract_body(&xpath_ctx, root, config, global_config)?; + Self::prep_content(&xpath_ctx, config, global_config, &url); + let found_body = Self::extract_body(&xpath_ctx, root, config, global_config)?; + + if !found_body { + if let Err(error) = + Readability::extract_body(document, root, article.title.as_deref()) + { + log::error!("Both ftr and readability failed to find content: {error}"); + return Err(error); + } + } } Ok(()) @@ -227,14 +237,14 @@ impl FullTextParser { // parse html let parser = Parser::default_html(); parser.parse_string(html.as_str()).map_err(|err| { - error!("Parsing HTML failed for downloaded HTML {:?}", err); + log::error!("Parsing HTML failed for downloaded HTML {:?}", err); FullTextParserError::Xml }) } fn get_xpath_ctx(doc: &Document) -> Result { Context::new(doc).map_err(|()| { - error!("Creating xpath context failed for downloaded HTML"); + log::error!("Creating xpath context failed for downloaded HTML"); FullTextParserError::Xml }) } @@ -254,8 +264,7 @@ impl FullTextParser { let xpath_ctx = Self::get_xpath_ctx(&document)?; metadata::extract(&xpath_ctx, config, Some(global_config), article); Self::check_for_thumbnail(&xpath_ctx, article); - Self::strip_junk(&xpath_ctx, config, global_config); - Self::fix_urls(&xpath_ctx, url); + Self::prep_content(&xpath_ctx, config, global_config, url); Self::extract_body(&xpath_ctx, root, config, global_config)?; Ok(()) @@ -272,7 +281,7 @@ impl FullTextParser { .send() .await .map_err(|err| { - error!( + log::error!( "Downloading HTML failed: GET '{}' - '{}'", url.as_str(), err @@ -289,22 +298,22 @@ impl FullTextParser { match from_utf8(&bytes) { Ok(utf8_str) => { - debug!("Valid utf-8 string"); + log::debug!("Valid utf-8 string"); return Ok(utf8_str.into()); } Err(error) => { - debug!("Invalid utf-8 string"); + log::debug!("Invalid utf-8 string"); let lossy_string = std::string::String::from_utf8_lossy(&bytes); if let Some(encoding) = Self::get_encoding_from_html(&lossy_string) { - debug!("Encoding extracted from HTML: '{}'", encoding); + log::debug!("Encoding extracted from HTML: '{}'", encoding); if let Some(decoded_html) = Self::decode_html(&bytes, encoding) { return Ok(decoded_html); } } if let Some(encoding) = Self::get_encoding_from_http_header(&headers) { - debug!("Encoding extracted from headers: '{}'", encoding); + log::debug!("Encoding extracted from headers: '{}'", encoding); if let Some(decoded_html) = Self::decode_html(&bytes, encoding) { return Ok(decoded_html); } @@ -350,7 +359,7 @@ impl FullTextParser { return Some(decoded_html.into_owned()); } } - warn!("Could not decode HTML. Encoding: '{}'", encoding); + log::warn!("Could not decode HTML. Encoding: '{}'", encoding); None } @@ -364,7 +373,7 @@ impl FullTextParser { Ok(name.into()) } None => { - error!("Getting config failed due to bad Url"); + log::error!("Getting config failed due to bad Url"); Err(FullTextParserError::Config) } } @@ -420,7 +429,7 @@ impl FullTextParser { .and_then(|correct_url| node.set_property("src", &correct_url).ok()) .is_none() { - warn!("Failed to fix lazy loading image"); + log::warn!("Failed to fix lazy loading image"); } } Ok(()) @@ -445,10 +454,10 @@ impl FullTextParser { }) .is_err(); if !success { - warn!("Failed to add iframe as child of video wrapper
"); + log::warn!("Failed to add iframe as child of video wrapper
"); } } else { - warn!("Failed to get parent of iframe"); + log::warn!("Failed to get parent of iframe"); } } Ok(()) @@ -529,7 +538,21 @@ impl FullTextParser { _ = Self::repair_urls(context, "//iframe", "src", url); } - fn strip_junk(context: &Context, config: Option<&ConfigEntry>, global_config: &ConfigEntry) { + fn prep_content( + context: &Context, + config: Option<&ConfigEntry>, + global_config: &ConfigEntry, + url: &Url, + ) { + // replace H1 with H2 as H1 should be only title that is displayed separately + if let Ok(h1_nodes) = Util::evaluate_xpath(context, "//h1", false) { + for mut h1_node in h1_nodes { + _ = h1_node.set_name("h2"); + } + } + + _ = Util::mark_data_tables(context); + // strip specified xpath if let Some(config) = config { for xpath_strip in &config.xpath_strip { @@ -620,6 +643,8 @@ impl FullTextParser { _ = Util::strip_node(context, "//footer"); _ = Util::strip_node(context, "//link"); _ = Util::strip_node(context, "//aside"); + + Self::fix_urls(context, url); } /** @@ -759,11 +784,13 @@ impl FullTextParser { return Err(FullTextParserError::Xml); } + Self::post_process_content(&mut node, true)?; + node.unlink(); if root.add_child(&mut node).is_ok() { found_something = true; } else { - error!("Failed to add body to prepared document"); + log::error!("Failed to add body to prepared document"); return Err(FullTextParserError::Xml); } } @@ -830,35 +857,22 @@ impl FullTextParser { Ok(()) } - pub(crate) fn post_process_content(document: &Document) -> Result<(), FullTextParserError> { - let context = Context::new(document).map_err(|()| { - error!("Creating xpath context failed for article HTML"); - FullTextParserError::Xml - })?; - - // replace H1 with H2 as H1 should be only title that is displayed separately - let h1_nodes = Util::evaluate_xpath(&context, "//h1", false)?; - for mut h1_node in h1_nodes { - h1_node.set_name("h2").map_err(|e| { - log::error!("{e}"); - FullTextParserError::Xml - })?; + pub(crate) fn post_process_content( + node: &mut Node, + clean_conditionally: bool, + ) -> Result<(), FullTextParserError> { + if clean_conditionally { + Util::clean_conditionally(node, "fieldset"); + Util::clean_conditionally(node, "table"); + Util::clean_conditionally(node, "ul"); + Util::clean_conditionally(node, "div"); } - Util::mark_data_tables(&context)?; + Self::clean_attributes(node)?; + Self::simplify_nested_elements(node)?; - if let Some(mut root) = document.get_root_element() { - Util::clean_conditionally(&mut root, "fieldset"); - Util::clean_conditionally(&mut root, "table"); - Util::clean_conditionally(&mut root, "ul"); - Util::clean_conditionally(&mut root, "div"); - - Self::clean_attributes(&mut root)?; - Self::simplify_nested_elements(&mut root)?; - - Self::remove_single_cell_tables(&mut root); - Self::remove_extra_p_and_div(&mut root); - } + Self::remove_single_cell_tables(node); + Self::remove_extra_p_and_div(node); Ok(()) } @@ -927,6 +941,17 @@ impl FullTextParser { let mut node_iter = Some(root.clone()); while let Some(mut node) = node_iter { + let tag_name = node.get_name().to_uppercase(); + + for attr in constants::PRESENTATIONAL_ATTRIBUTES { + _ = node.remove_attribute(attr); + } + + if constants::DEPRECATED_SIZE_ATTRIBUTE_ELEMS.contains(tag_name.as_str()) { + _ = node.remove_attribute("width"); + _ = node.remove_attribute("height"); + } + node.remove_attribute("class").map_err(|e| { log::error!("{e}"); FullTextParserError::Xml diff --git a/src/full_text_parser/readability/mod.rs b/src/full_text_parser/readability/mod.rs index 8762300..dfe2a5b 100644 --- a/src/full_text_parser/readability/mod.rs +++ b/src/full_text_parser/readability/mod.rs @@ -497,6 +497,11 @@ impl Readability { } } + crate::FullTextParser::post_process_content( + &mut article_content, + state.clean_conditionally, + )?; + if needed_to_create_top_candidate { // We already created a fake div thing, and there wouldn't have been any siblings left // for the previous loop, so there's no point trying to create a new div, and then diff --git a/src/full_text_parser/readability/tests.rs b/src/full_text_parser/readability/tests.rs index 9c7dfa5..fb02eb2 100644 --- a/src/full_text_parser/readability/tests.rs +++ b/src/full_text_parser/readability/tests.rs @@ -18,9 +18,7 @@ async fn run_test(name: &str) { let document = crate::FullTextParser::parse_html(&html, None, &empty_config).unwrap(); let xpath_ctx = crate::FullTextParser::get_xpath_ctx(&document).unwrap(); - crate::FullTextParser::strip_junk(&xpath_ctx, None, &empty_config); - - crate::FullTextParser::fix_urls(&xpath_ctx, &url); + crate::FullTextParser::prep_content(&xpath_ctx, None, &empty_config, &url); let mut article = Article { title: None, author: None, @@ -36,7 +34,9 @@ async fn run_test(name: &str) { metadata::extract(&xpath_ctx, None, None, &mut article); super::Readability::extract_body(document, &mut root, article.title.as_deref()).unwrap(); - crate::FullTextParser::post_process_content(&article_document).unwrap(); + if let Some(mut root) = article_document.get_root_element() { + crate::FullTextParser::post_process_content(&mut root, false).unwrap(); + } article.document = Some(article_document); let html = article.get_content().unwrap(); @@ -236,6 +236,11 @@ async fn hidden_nodes() { run_test("hidden-nodes").await } +#[tokio::test] +async fn hukumusume() { + run_test("hukumusume").await +} + #[tokio::test] async fn webmd_1() { run_test("webmd-1").await