diff options
| -rw-r--r-- | paper2remarkable/providers/__init__.py | 2 | ||||
| -rw-r--r-- | paper2remarkable/providers/html.py | 122 | ||||
| -rw-r--r-- | paper2remarkable/ui.py | 13 | ||||
| -rw-r--r-- | paper2remarkable/utils.py | 4 | ||||
| -rw-r--r-- | setup.py | 4 | ||||
| -rw-r--r-- | tests/test_providers.py | 8 |
6 files changed, 150 insertions, 3 deletions
diff --git a/paper2remarkable/providers/__init__.py b/paper2remarkable/providers/__init__.py index fabdcfe..f87a044 100644 --- a/paper2remarkable/providers/__init__.py +++ b/paper2remarkable/providers/__init__.py @@ -3,6 +3,7 @@ from .acm import ACM from .arxiv import Arxiv from .citeseerx import CiteSeerX +from .html import HTML from .local import LocalFile from .neurips import NeurIPS from .openreview import OpenReview @@ -23,4 +24,5 @@ providers = [ Springer, LocalFile, PdfUrl, + HTML, ] diff --git a/paper2remarkable/providers/html.py b/paper2remarkable/providers/html.py new file mode 100644 index 0000000..6136fc9 --- /dev/null +++ b/paper2remarkable/providers/html.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- + +"""Provider for HTML documents + +This provider is a little bit special, in that it isn't simply pulling an +academic paper from a site, but instead aims to pull a HTML article. + +Author: G.J.J. van den Burg +License: See LICENSE file. +Copyright: 2020, G.J.J. van den Burg + +""" + +import html2text +import markdown +import readability +import titlecase +import unidecode +import urllib +import weasyprint +import weasyprint.fonts + +from ._base import Provider +from ._info import Informer + +from ..utils import clean_string, get_page_with_retry +from ..log import Logger + +logger = Logger() + +CSS = """ +@import url('https://fonts.googleapis.com/css?family=EB+Garamond|Noto+Serif&display=swap'); +@page { size: A4; margin: 1in; } +a { color: black; } +img { display: block; margin: 0 auto; text-align: center; max-width: 70%; max-height: 300px; } +p { font-size: 11pt; font-family: 'EB Garamond'; hyphens: auto; text-align: justify; } +h1,h2,h3 { font-family: 'Noto Serif'; } +h1 { font-size: 26px; } +h2 { font-size: 18px; } +h3 { font-size: 14px; } +""" + + +def my_fetcher(url): + if url.startswith("//"): + url = "https:" + url + elif url.startswith("file:///"): + url = "https:" + url[len("file:/") :] + return weasyprint.default_url_fetcher(url) + + +class HTMLInformer(Informer): + def __init__(self): + super().__init__() + + def get_filename(self, abs_url): + request_text = get_page_with_retry(abs_url, return_text=True) + doc = readability.Document(request_text) + title = doc.title() + + # Clean the title and make it titlecase + title = clean_string(title) + title = titlecase.titlecase(title) + title = title.replace(" ", "_") + title = clean_string(title) + name = title + ".pdf" + name = unidecode.unidecode(name) + logger.info("Created filename: %s" % name) + return name + + +class HTML(Provider): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.informer = HTMLInformer() + + def get_abs_pdf_urls(self, url): + return url, url + + def retrieve_pdf(self, pdf_url, filename): + """Turn the HTML article in a clean pdf file""" + # Steps + # 1. Pull the HTML page using requests + # 2. Extract the article part of the page using readability + # 3. Convert the article HTML to markdown using html2text + # 4. Convert the markdown back to HTML (this is done to sanitize HTML) + # 4. Convert the HTML to PDF, pulling in images where needed + # 5. Save the PDF to the specified filename. + request_text = get_page_with_retry(pdf_url, return_text=True) + doc = readability.Document(request_text) + title = doc.title() + raw_html = doc.summary(html_partial=True) + + h2t = html2text.HTML2Text() + h2t.wrap_links = False + text = h2t.handle(raw_html) + + # Add the title back to the document + article = "# {title}\n\n{text}".format(title=title, text=text) + + # fix relative urls + base_url = "{0.scheme}://{0.netloc}".format( + urllib.parse.urlsplit(pdf_url) + ) + html_article = markdown.markdown(article) + html_article = html_article.replace(' src="//', ' src="https://') + html_article = html_article.replace( + ' src="/', ' src="{base}/'.format(base=base_url) + ) + + font_config = weasyprint.fonts.FontConfiguration() + html = weasyprint.HTML(string=html_article, url_fetcher=my_fetcher) + css = weasyprint.CSS(string=CSS, font_config=font_config) + + html.write_pdf(filename, stylesheets=[css], font_config=font_config) + + def validate(src): + try: + result = urllib.parse.urlparse(src) + return all([result.scheme, result.netloc, result.path]) + except: + return False diff --git a/paper2remarkable/ui.py b/paper2remarkable/ui.py index 032bf99..05116ee 100644 --- a/paper2remarkable/ui.py +++ b/paper2remarkable/ui.py @@ -13,7 +13,7 @@ import sys from . import __version__, GITHUB_URL -from .providers import providers, LocalFile +from .providers import providers, LocalFile, HTML from .utils import follow_redirects, is_url @@ -22,6 +22,11 @@ def parse_args(): description="Paper2reMarkable version %s" % __version__ ) parser.add_argument( + "--html", + help="URL is to a HTML article instead of a PDF", + action="store_true", + ) + parser.add_argument( "-b", "--blank", help="Add a blank page after every page of the PDF", @@ -99,7 +104,11 @@ def main(): args = parse_args() cookiejar = None - if is_url(args.input): + if args.html and is_url(args.input): + # input is a url + url, cookiejar = follow_redirects(args.input) + provider = HTML + elif is_url(args.input): # input is a url url, cookiejar = follow_redirects(args.input) provider = next((p for p in providers if p.validate(url)), None) diff --git a/paper2remarkable/utils.py b/paper2remarkable/utils.py index 79421df..d4e5075 100644 --- a/paper2remarkable/utils.py +++ b/paper2remarkable/utils.py @@ -64,7 +64,7 @@ def download_url(url, filename, cookiejar=None): fid.write(content) -def get_page_with_retry(url, tries=5, cookiejar=None): +def get_page_with_retry(url, tries=5, cookiejar=None, return_text=False): count = 0 jar = {} if cookiejar is None else cookiejar while count < tries: @@ -82,6 +82,8 @@ def get_page_with_retry(url, tries=5, cookiejar=None): time.sleep(5) continue logger.info("Downloaded url: %s" % url) + if return_text: + return res.text return res.content @@ -26,6 +26,10 @@ REQUIRED = [ "titlecase>=0.12", "PyPDF2>=1.26", "regex>=2018.11" + "readability-lxml>=0.7.1", + "html2text>=2020.1.16", + "weasyprint>=51", + "markdown>=3.1.1" ] docs_require = [] diff --git a/tests/test_providers.py b/tests/test_providers.py index e256eec..80f4662 100644 --- a/tests/test_providers.py +++ b/tests/test_providers.py @@ -16,6 +16,7 @@ from paper2remarkable.providers import ( ACM, Arxiv, CiteSeerX, + HTML, LocalFile, NeurIPS, OpenReview, @@ -206,6 +207,13 @@ class TestProviders(unittest.TestCase): filename = prov.run(url) self.assertEqual(exp, os.path.basename(filename)) + def test_html_1(self): + prov = HTML(upload=False, verbose=VERBOSE) + url = "https://hbr.org/2019/11/getting-your-team-to-do-more-than-meet-deadlines" + exp = "Getting_Your_Team_to_Do_More_Than_Meet_Deadlines.pdf" + filename = prov.run(url) + self.assertEqual(exp, os.path.basename(filename)) + if __name__ == "__main__": unittest.main() |
