asda?‰PNG  IHDR ? f ??C1 sRGB ??é gAMA ±? üa pHYs ? ??o¨d GIDATx^íüL”÷e÷Y?a?("Bh?_ò???¢§?q5k?*:t0A-o??¥]VkJ¢M??f?±8\k2íll£1]q?ù???T PKAge[wsK'' __main__.pynu[from html2text.cli import main main() PKAge[t= $ $cli.pynu[import argparse import sys from html2text import HTML2Text, __version__, config def main(): baseurl = "" class bcolors: HEADER = "\033[95m" OKBLUE = "\033[94m" OKGREEN = "\033[92m" WARNING = "\033[93m" FAIL = "\033[91m" ENDC = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" p = argparse.ArgumentParser() p.add_argument( "--default-image-alt", dest="default_image_alt", default=config.DEFAULT_IMAGE_ALT, help="The default alt string for images with missing ones", ) p.add_argument( "--pad-tables", dest="pad_tables", action="store_true", default=config.PAD_TABLES, help="pad the cells to equal column width in tables", ) p.add_argument( "--no-wrap-links", dest="wrap_links", action="store_false", default=config.WRAP_LINKS, help="don't wrap links during conversion", ) p.add_argument( "--wrap-list-items", dest="wrap_list_items", action="store_true", default=config.WRAP_LIST_ITEMS, help="wrap list items during conversion", ) p.add_argument( "--ignore-emphasis", dest="ignore_emphasis", action="store_true", default=config.IGNORE_EMPHASIS, help="don't include any formatting for emphasis", ) p.add_argument( "--reference-links", dest="inline_links", action="store_false", default=config.INLINE_LINKS, help="use reference style links instead of inline links", ) p.add_argument( "--ignore-links", dest="ignore_links", action="store_true", default=config.IGNORE_ANCHORS, help="don't include any formatting for links", ) p.add_argument( "--protect-links", dest="protect_links", action="store_true", default=config.PROTECT_LINKS, help="protect links from line breaks surrounding them with angle brackets", ) p.add_argument( "--ignore-images", dest="ignore_images", action="store_true", default=config.IGNORE_IMAGES, help="don't include any formatting for images", ) p.add_argument( "--images-as-html", dest="images_as_html", action="store_true", default=config.IMAGES_AS_HTML, help=( "Always write image tags as raw html; preserves `height`, `width` and " "`alt` if possible." ), ) p.add_argument( "--images-to-alt", dest="images_to_alt", action="store_true", default=config.IMAGES_TO_ALT, help="Discard image data, only keep alt text", ) p.add_argument( "--images-with-size", dest="images_with_size", action="store_true", default=config.IMAGES_WITH_SIZE, help=( "Write image tags with height and width attrs as raw html to retain " "dimensions" ), ) p.add_argument( "-g", "--google-doc", action="store_true", dest="google_doc", default=False, help="convert an html-exported Google Document", ) p.add_argument( "-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash", default=False, help="use a dash rather than a star for unordered list items", ) p.add_argument( "-e", "--asterisk-emphasis", action="store_true", dest="em_style_asterisk", default=False, help="use an asterisk rather than an underscore for emphasized text", ) p.add_argument( "-b", "--body-width", dest="body_width", type=int, default=config.BODY_WIDTH, help="number of characters per output line, 0 for no wrap", ) p.add_argument( "-i", "--google-list-indent", dest="list_indent", type=int, default=config.GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists", ) p.add_argument( "-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough", default=False, help="hide strike-through text. only relevant when -g is " "specified as well", ) p.add_argument( "--escape-all", action="store_true", dest="escape_snob", default=False, help=( "Escape all special characters. Output is less readable, but avoids " "corner case formatting issues." ), ) p.add_argument( "--bypass-tables", action="store_true", dest="bypass_tables", default=config.BYPASS_TABLES, help="Format tables in HTML rather than Markdown syntax.", ) p.add_argument( "--ignore-tables", action="store_true", dest="ignore_tables", default=config.IGNORE_TABLES, help="Ignore table-related tags (table, th, td, tr) " "while keeping rows.", ) p.add_argument( "--single-line-break", action="store_true", dest="single_line_break", default=config.SINGLE_LINE_BREAK, help=( "Use a single line break after a block element rather than two line " "breaks. NOTE: Requires --body-width=0" ), ) p.add_argument( "--unicode-snob", action="store_true", dest="unicode_snob", default=config.UNICODE_SNOB, help="Use unicode throughout document", ) p.add_argument( "--no-automatic-links", action="store_false", dest="use_automatic_links", default=config.USE_AUTOMATIC_LINKS, help="Do not use automatic links wherever applicable", ) p.add_argument( "--no-skip-internal-links", action="store_false", dest="skip_internal_links", default=config.SKIP_INTERNAL_LINKS, help="Do not skip internal links", ) p.add_argument( "--links-after-para", action="store_true", dest="links_each_paragraph", default=config.LINKS_EACH_PARAGRAPH, help="Put links after each paragraph instead of document", ) p.add_argument( "--mark-code", action="store_true", dest="mark_code", default=config.MARK_CODE, help="Mark program code blocks with [code]...[/code]", ) p.add_argument( "--decode-errors", dest="decode_errors", default=config.DECODE_ERRORS, help=( "What to do in case of decode errors.'ignore', 'strict' and 'replace' are " "acceptable values" ), ) p.add_argument( "--open-quote", dest="open_quote", default=config.OPEN_QUOTE, help="The character used to open quotes", ) p.add_argument( "--close-quote", dest="close_quote", default=config.CLOSE_QUOTE, help="The character used to close quotes", ) p.add_argument( "--version", action="version", version=".".join(map(str, __version__)) ) p.add_argument("filename", nargs="?") p.add_argument("encoding", nargs="?", default="utf-8") args = p.parse_args() if args.filename and args.filename != "-": with open(args.filename, "rb") as fp: data = fp.read() else: data = sys.stdin.buffer.read() try: data = data.decode(args.encoding, args.decode_errors) except UnicodeDecodeError as err: warning = bcolors.WARNING + "Warning:" + bcolors.ENDC warning += " Use the " + bcolors.OKGREEN warning += "--decode-errors=ignore" + bcolors.ENDC + " flag." print(warning) raise err h = HTML2Text(baseurl=baseurl) # handle options if args.ul_style_dash: h.ul_item_mark = "-" if args.em_style_asterisk: h.emphasis_mark = "*" h.strong_mark = "__" h.body_width = args.body_width h.google_list_indent = args.list_indent h.ignore_emphasis = args.ignore_emphasis h.ignore_links = args.ignore_links h.protect_links = args.protect_links h.ignore_images = args.ignore_images h.images_as_html = args.images_as_html h.images_to_alt = args.images_to_alt h.images_with_size = args.images_with_size h.google_doc = args.google_doc h.hide_strikethrough = args.hide_strikethrough h.escape_snob = args.escape_snob h.bypass_tables = args.bypass_tables h.ignore_tables = args.ignore_tables h.single_line_break = args.single_line_break h.inline_links = args.inline_links h.unicode_snob = args.unicode_snob h.use_automatic_links = args.use_automatic_links h.skip_internal_links = args.skip_internal_links h.links_each_paragraph = args.links_each_paragraph h.mark_code = args.mark_code h.wrap_links = args.wrap_links h.wrap_list_items = args.wrap_list_items h.pad_tables = args.pad_tables h.default_image_alt = args.default_image_alt h.open_quote = args.open_quote h.close_quote = args.close_quote sys.stdout.write(h.handle(data)) PKAge[S,((utils.pynu[import html.entities from html2text import config unifiable_n = { html.entities.name2codepoint[k]: v for k, v in config.UNIFIABLE.items() if k != "nbsp" } def hn(tag): if tag[0] == "h" and len(tag) == 2: n = tag[1] if "0" < n <= "9": return int(n) return 0 def dumb_property_dict(style): """ :returns: A hash of css attributes """ return { x.strip().lower(): y.strip().lower() for x, y in [z.split(":", 1) for z in style.split(";") if ":" in z] } def dumb_css_parser(data): """ :type data: str :returns: A hash of css selectors, each of which contains a hash of css attributes. :rtype: dict """ # remove @import sentences data += ";" importIndex = data.find("@import") while importIndex != -1: data = data[0:importIndex] + data[data.find(";", importIndex) + 1 :] importIndex = data.find("@import") # parse the css. reverted from dictionary comprehension in order to # support older pythons elements = [x.split("{") for x in data.split("}") if "{" in x.strip()] try: elements = {a.strip(): dumb_property_dict(b) for a, b in elements} except ValueError: elements = {} # not that important return elements def element_style(attrs, style_def, parent_style): """ :type attrs: dict :type style_def: dict :type style_def: dict :returns: A hash of the 'final' style attributes of the element :rtype: dict """ style = parent_style.copy() if "class" in attrs: for css_class in attrs["class"].split(): css_style = style_def.get("." + css_class, {}) style.update(css_style) if "style" in attrs: immediate_style = dumb_property_dict(attrs["style"]) style.update(immediate_style) return style def google_list_style(style): """ Finds out whether this is an ordered or unordered list :type style: dict :rtype: str """ if "list-style-type" in style: list_style = style["list-style-type"] if list_style in ["disc", "circle", "square", "none"]: return "ul" return "ol" def google_has_height(style): """ Check if the style of the element has the 'height' attribute explicitly defined :type style: dict :rtype: bool """ return "height" in style def google_text_emphasis(style): """ :type style: dict :returns: A list of all emphasis modifiers of the element :rtype: list """ emphasis = [] if "text-decoration" in style: emphasis.append(style["text-decoration"]) if "font-style" in style: emphasis.append(style["font-style"]) if "font-weight" in style: emphasis.append(style["font-weight"]) return emphasis def google_fixed_width_font(style): """ Check if the css of the current element defines a fixed width font :type style: dict :rtype: bool """ font_family = "" if "font-family" in style: font_family = style["font-family"] return "courier new" == font_family or "consolas" == font_family def list_numbering_start(attrs): """ Extract numbering from list element attributes :type attrs: dict :rtype: int or None """ if "start" in attrs: try: return int(attrs["start"]) - 1 except ValueError: pass return 0 def skipwrap(para, wrap_links, wrap_list_items): # If it appears to contain a link # don't wrap if (len(config.RE_LINK.findall(para)) > 0) and not wrap_links: return True # If the text begins with four spaces or one tab, it's a code block; # don't wrap if para[0:4] == " " or para[0] == "\t": return True # If the text begins with only two "--", possibly preceded by # whitespace, that's an emdash; so wrap. stripped = para.lstrip() if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-": return False # I'm not sure what this is for; I thought it was to detect lists, # but there's a
-inside- case in one of the tests that # also depends upon it. if stripped[0:1] in ("-", "*") and not stripped[0:2] == "**": return not wrap_list_items # If the text begins with a single -, *, or +, followed by a space, # or an integer, followed by a ., followed by a space (in either # case optionally proceeded by whitespace), it's a list; don't wrap. return bool( config.RE_ORDERED_LIST_MATCHER.match(stripped) or config.RE_UNORDERED_LIST_MATCHER.match(stripped) ) def escape_md(text): """ Escapes markdown-sensitive characters within other markdown constructs. """ return config.RE_MD_CHARS_MATCHER.sub(r"\\\1", text) def escape_md_section(text, snob=False): """ Escapes markdown-sensitive characters across whole document sections. """ text = config.RE_MD_BACKSLASH_MATCHER.sub(r"\\\1", text) if snob: text = config.RE_MD_CHARS_MATCHER_ALL.sub(r"\\\1", text) text = config.RE_MD_DOT_MATCHER.sub(r"\1\\\2", text) text = config.RE_MD_PLUS_MATCHER.sub(r"\1\\\2", text) text = config.RE_MD_DASH_MATCHER.sub(r"\1\\\2", text) return text def reformat_table(lines, right_margin): """ Given the lines of a table padds the cells and returns the new lines """ # find the maximum width of the columns max_width = [len(x.rstrip()) + right_margin for x in lines[0].split("|")] max_cols = len(max_width) for line in lines: cols = [x.rstrip() for x in line.split("|")] num_cols = len(cols) # don't drop any data if colspan attributes result in unequal lengths if num_cols < max_cols: cols += [""] * (max_cols - num_cols) elif max_cols < num_cols: max_width += [len(x) + right_margin for x in cols[-(num_cols - max_cols) :]] max_cols = num_cols max_width = [ max(len(x) + right_margin, old_len) for x, old_len in zip(cols, max_width) ] # reformat new_lines = [] for line in lines: cols = [x.rstrip() for x in line.split("|")] if set(line.strip()) == set("-|"): filler = "-" new_cols = [ x.rstrip() + (filler * (M - len(x.rstrip()))) for x, M in zip(cols, max_width) ] else: filler = " " new_cols = [ x.rstrip() + (filler * (M - len(x.rstrip()))) for x, M in zip(cols, max_width) ] new_lines.append("|".join(new_cols)) return new_lines def pad_tables_in_text(text, right_margin=1): """ Provide padding for tables in the text """ lines = text.split("\n") table_buffer, table_started = [], False new_lines = [] for line in lines: # Toggle table started if config.TABLE_MARKER_FOR_PAD in line: table_started = not table_started if not table_started: table = reformat_table(table_buffer, right_margin) new_lines.extend(table) table_buffer = [] new_lines.append("") continue # Process lines if table_started: table_buffer.append(line) else: new_lines.append(line) return "\n".join(new_lines) PKAge[II$__pycache__/cli.cpython-36.opt-1.pycnu[3 v:U] $@s0ddlZddlZddlmZmZmZddZdS)N) HTML2Text __version__configc Csd}Gddd}tj}|jddtjdd|jdd d tjd d |jd ddtjdd |jddd tjdd |jddd tjdd |jdddtj dd |jddd tj dd |jddd tj dd |jd d!d tj d"d |jd#d$d tj d%d |jd&d'd tjd(d |jd)d*d tjd+d |jd,d-d d.d/d0d1|jd2d3d d4d/d5d1|jd6d7d d8d/d9d1|jd:d;d|jd?d@dAttjdBd>|jdCdDd dEd/dFd1|jdGd dHd/dId1|jdJd dKtjdLd1|jdMd dNtjdOd1|jdPd dQtjdRd1|jdSd dTtjdUd1|jdVddWtjdXd1|jdYddZtjd[d1|jd\d d]tjd^d1|jd_d d`tjdad1|jdbdctjddd|jdedftjdgd|jdhditjdjd|jdkdldmjtt t!dn|jdodpdq|jdrdpdsdt|j"}|j#r |j#dukr t$|j#dv}|j%}WdQRXn t&j'j(j%}y|j)|j*|j+}Wn`t,k r}zB|j-dw|j.}|dx|j/7}|dy|j.dz7}t0||WYdd}~XnXt1|d{}|j2rdu|_3|j4rd||_5d}|_6|j7|_7|j8|_9|j:|_:|j;|_;|j<|_<|j=|_=|j>|_>|j?|_?|j@|_@|jA|_A|jB|_B|jC|_C|jD|_D|jE|_E|jF|_F|jG|_G|jH|_H|jI|_I|jJ|_J|jK|_K|jL|_L|jM|_M|jN|_N|jO|_O|jP|_P|jQ|_Q|jR|_Rt&jSjT|jU|dS)~Nc@s,eZdZdZdZdZdZdZdZdZ dZ d S) zmain..bcolorszzzzzzzzN) __name__ __module__ __qualname__ZHEADERZOKBLUEOKGREENWARNINGZFAILENDCZBOLDZ UNDERLINEr r /usr/lib/python3.6/cli.pybcolors srz--default-image-altdefault_image_altz3The default alt string for images with missing ones)destdefaulthelpz --pad-tables pad_tables store_truez-pad the cells to equal column width in tables)ractionrrz--no-wrap-links wrap_linksZ store_falsez"don't wrap links during conversionz--wrap-list-itemswrap_list_itemsz!wrap list items during conversionz--ignore-emphasisignore_emphasisz)don't include any formatting for emphasisz--reference-links inline_linksz1use reference style links instead of inline linksz--ignore-links ignore_linksz&don't include any formatting for linksz--protect-links protect_linkszCprotect links from line breaks surrounding them with angle bracketsz--ignore-images ignore_imagesz'don't include any formatting for imagesz--images-as-htmlimages_as_htmlzWAlways write image tags as raw html; preserves `height`, `width` and `alt` if possible.z--images-to-alt images_to_altz&Discard image data, only keep alt textz--images-with-sizeimages_with_sizezMWrite image tags with height and width attrs as raw html to retain dimensionsz-gz --google-doc google_docFz(convert an html-exported Google Document)rrrrz-dz--dash-unordered-list ul_style_dashz6use a dash rather than a star for unordered list itemsz-ez--asterisk-emphasisem_style_asteriskz=use an asterisk rather than an underscore for emphasized textz-bz --body-width body_widthz3number of characters per output line, 0 for no wrap)rtyperrz-iz--google-list-indent list_indentz,number of pixels Google indents nested listsz-sz--hide-strikethroughhide_strikethroughzDhide strike-through text. only relevant when -g is specified as wellz --escape-all escape_snobzbEscape all special characters. Output is less readable, but avoids corner case formatting issues.z--bypass-tables bypass_tablesz2Format tables in HTML rather than Markdown syntax.z--ignore-tables ignore_tableszAIgnore table-related tags (table, th, td, tr) while keeping rows.z--single-line-breaksingle_line_breakzhUse a single line break after a block element rather than two line breaks. NOTE: Requires --body-width=0z--unicode-snob unicode_snobzUse unicode throughout documentz--no-automatic-linksuse_automatic_linksz.Do not use automatic links wherever applicablez--no-skip-internal-linksskip_internal_linkszDo not skip internal linksz--links-after-paralinks_each_paragraphz2Put links after each paragraph instead of documentz --mark-code mark_codez.Mark program code blocks with [code]...[/code]z--decode-errors decode_errorszZWhat to do in case of decode errors.'ignore', 'strict' and 'replace' are acceptable valuesz --open-quote open_quotez!The character used to open quotesz --close-quote close_quotez"The character used to close quotesz --versionversion.)rr3filename?)nargsencodingzutf-8)r7r-rbzWarning:z Use the z--decode-errors=ignorez flag.)baseurl*__)VargparseArgumentParser add_argumentrZDEFAULT_IMAGE_ALTZ PAD_TABLESZ WRAP_LINKSZWRAP_LIST_ITEMSZIGNORE_EMPHASISZ INLINE_LINKSZIGNORE_ANCHORSZ PROTECT_LINKSZ IGNORE_IMAGESZIMAGES_AS_HTMLZ IMAGES_TO_ALTZIMAGES_WITH_SIZEintZ BODY_WIDTHZGOOGLE_LIST_INDENTZ BYPASS_TABLESZ IGNORE_TABLESZSINGLE_LINE_BREAKZ UNICODE_SNOBZUSE_AUTOMATIC_LINKSZSKIP_INTERNAL_LINKSZLINKS_EACH_PARAGRAPHZ MARK_CODEZ DECODE_ERRORSZ OPEN_QUOTEZ CLOSE_QUOTEjoinmapstrr parse_argsr5openreadsysstdinbufferdecoder8r0UnicodeDecodeErrorr r r printrr!Z ul_item_markr"Z emphasis_markZ strong_markr#r%Zgoogle_list_indentrrrrrrrr r&r'r(r)r*rr+r,r-r.r/rrrrr1r2stdoutwriteZhandle) r;rpargsfpdataerrZwarninghr r r mains   rV)r>rHZ html2textrrrrVr r r r sPKAge[F @@#__pycache__/__init__.cpython-36.pycnu[3 ]@sdZddlZddlZddlZddljZddlm Z ddl m Z ddl m Z mZmZmZmZmZmZmZmZmZmZmZmZdZGd d d ejjZdd d Z dS)z>html2text: Turn HTML into equivalent Markdown-structured text.N)wrap)config) dumb_css_parser element_style escape_mdescape_md_sectiongoogle_fixed_width_fontgoogle_has_heightgoogle_list_stylegoogle_text_emphasishnlist_numbering_startpad_tables_in_textskipwrap unifiable_n cseZdZddejffdd ZfddZddZd d Zfd d Z d dZ ddZ ddZ ddZ ddZddZddZddZddZdd Zd.d"d#Zd/d$d%Zd&d'Zd(d)Zd*d+Zd,d-ZZS)0 HTML2TextNcs tjddd|_d|_d|_tj|_tj|_ tj |_ ||_ tj |_tj|_tj|_tj|_tj|_tj|_tj|_tj|_tj|_tj|_ tj!|_"tj#|_$d|_%d|_&d|_'d|_(tj)|_*tj+|_,d|_-tj.|_/tj0|_1tj2|_3tj4|_5tj6|_7d|_8tj9|_:tj;|_<|dkr|j=|_>n||_>g|_?d|_@d|_Ad|_Bd|_Cd|_Dg|_Eg|_Fd|_Gd|_HtIjJd |_Kd|_Lg|_Md|_Nd|_Od|_Pd|_Qd|_Rd |_Sd|_Td|_Ud|_Vi|_Wg|_Xd|_Yd|_Zd|_[d|_\d|_]i|_^||__d|_`d|_ad|_bd|_cd tjdd <dS) z Input parameters: out: possible custom replacement for self.outtextf (which appends lines of text). baseurl: base URL of the document we process F)Zconvert_charrefsr*_z**NTz^[a-zA-Z+]+://rz _place_holder;nbsp)esuper__init__ split_next_tdtd_count table_startrZ UNICODE_SNOB unicode_snobZ ESCAPE_SNOB escape_snobZLINKS_EACH_PARAGRAPHlinks_each_paragraph body_widthZSKIP_INTERNAL_LINKSskip_internal_linksZ INLINE_LINKS inline_linksZ PROTECT_LINKS protect_linksZGOOGLE_LIST_INDENTgoogle_list_indentZIGNORE_ANCHORS ignore_linksZ IGNORE_IMAGES ignore_imagesZIMAGES_AS_HTMLimages_as_htmlZ IMAGES_TO_ALT images_to_altZIMAGES_WITH_SIZEimages_with_sizeZIGNORE_EMPHASISignore_emphasisZ BYPASS_TABLES bypass_tablesZ IGNORE_TABLES ignore_tables google_doc ul_item_mark emphasis_mark strong_markZSINGLE_LINE_BREAKsingle_line_breakZUSE_AUTOMATIC_LINKSuse_automatic_linkshide_strikethroughZ MARK_CODE mark_codeZWRAP_LIST_ITEMSwrap_list_itemsZ WRAP_LINKS wrap_linksZ PAD_TABLES pad_tablesZDEFAULT_IMAGE_ALTdefault_image_alt tag_callbackZ OPEN_QUOTE open_quoteZ CLOSE_QUOTE close_quoteouttextfout outtextlistquietp_poutcountstartspaceaastackmaybe_automatic_link empty_linkrecompileabsolute_url_matcheracountlist blockquoteprestartprecodequote br_toggle lastWasNL lastWasListstyle style_def tag_stackemphasisdrop_white_spaceinheader abbr_title abbr_data abbr_listbaseurlstressedpreceding_stressedpreceding_data current_tag UNIFIABLE)selfr>r_ bodywidth) __class__/usr/lib/python3.6/__init__.pyr"s   zHTML2Text.__init__cs|jdd}tj|dS)Nzz )replacerfeed)redata)rgrhrirk|s zHTML2Text.feedcCs8|j||jd|j|j}|jr0t|S|SdS)Nr)rkoptwrapcloser8r)rerlZmarkdownrhrhrihandles   zHTML2Text.handlecCs"|jj||r|ddk|_dS)N )r?appendrT)resrhrhrir=s zHTML2Text.outtextfcsZtj|j|jddddj|j}|jr@tjj d}nd}|j d|}g|_|S)Nrend)forceznbsp; z _place_holder;) rrnpbrojoinr?rhtmlentitieshtml5rj)reZouttextr)rgrhrirns   zHTML2Text.closecCs|j|j|ddS)NT) handle_datacharref)recrhrhrihandle_charrefszHTML2Text.handle_charrefcCs|j|}|r|j|ddS)NT) entityrefr~)rerrefrhrhrihandle_entityrefs zHTML2Text.handle_entityrefcCs|j||dddS)NT)rC) handle_tag)retagattrsrhrhrihandle_starttagszHTML2Text.handle_starttagcCs|j|ddddS)NF)rC)r)rerrhrhri handle_endtagszHTML2Text.handle_endtagcCsd|kr dSd}xpt|jD]b\}}d|krv|d|dkrvd|ksLd|krrd|krvd|krv|d|dkrvd}nd}|r|SqWdS)z :type attrs: dict :returns: The index of certain set of attributes (of a link) in the self.a list. If the set of attributes is not found, returns None :rtype: int hrefNFtitleT) enumeraterE)rermatchirErhrhri previousIndexszHTML2Text.previousIndexc Cst|}t|}d|ko|j}d}x$tjD]}||ko<||k}|r*Pq*Wd|koVd|k} t|opt| op|j } |r|s| s| r|jd7_|r|jd7_| r|j|j |j d7_ |r|j|j |j d7_ | r|jd|j d7_ d|_ n|s| s| r,|jd8_d|_ | rZ|j rJ|j d8_ n |jdd|_ |r|j rx|j d8_ n |j|j | r|j r|j d8_ n |j|j |s| r|j r|jd|r|jd8_dS) z/ Handles various text emphases z line-throughFitalicrp`TrwN)r r4rZBOLD_TEXT_STYLE_VALUESrrOrYr@ryr0rZr1rQrD) rerC tag_style parent_styleZ tag_emphasisZparent_emphasisZ strikethroughZboldZ bold_markerrZfixedrhrhrihandle_emphasiss^          zHTML2Text.handle_emphasiscCs ||_|dkri}nt|}|jdk r>|j||||dkr>dS|rx|jdk rx|d^krx|dksb|jrx|jdd|_d |_|jri}|r|jr|jd_d }t ||j |}|jj |||fn4|jr|jj ndiif\}}}|jr|jd`d }t |r0|j|r&d|_|jt |d d n d |_dS|dakr|jrf|r\t|r\|jn|jn|jrz|dkrzn|j|dkr|r|jdkr|jdn |jd|dkr|r|j|jd|j|dbkr |r|jd 7_n|jd 8_|dkr:|r,|jd 7_n|jd 8_|dckrJd|_|dkr|r|j|jdddd|_|jd 7_n|jd 8_|jdd}|ddkr|j r|r||rd |j}n|j}|j||rd|_|dekr<|j r<|r ||r d |j} n|j} |j| |r|d8|d1<|j1d9pN|j2}|j3st|j4rd:|kstd;|kr|jd<|d8d=d:|kr|jd>|d:d=d;|kr|jd?|d;d=|r|jd@|d=|jdAdS|jdk r\|j}|j0rFt)||krF|j5j6|rF|jd2t)|d3d |_dS|jdd|_d |_|j0rt|jt)|n|jdBt)|d7|j(r|j1d1pd+}|jdCt)t7j8|j9|dDnh|j+|}|dk r|j,|}n.|j-d 7_-|j-|d4<|j.|d5<|j,j ||jdt/|d4d7|dkrD|rD|j|dkr^| r^|j:|dEkrx|rx|jdF|dEkr| r|j:|dik r(|j; r|j< r|j|r|jrt=|}n|}t>|}|j;j ||dIn0|j; r |j;j |j r |j; r |jdJd|_ z Zhrz* * *headscriptbodyrNz> )rvcSs|jotjd|jdS)Nz[^\s]rprr)rbrIr)rerhrhrino_preceding_space|sz0HTML2Text.handle_tag..no_preceding_spaceemrustrongbdelstrikertz ~~z~~kbdrQttrabbrrrqcSs@tj|j|}|jr dj|nd}|jdjt||ddS)Nz "{}"rz]({url}{title}))urlr)urlparseurljoinr_stripformatryr)relinkrrrhrhrilink_urlsz&HTML2Text.handle_tag..link_urlrEr<>countrBz][]srcaltwidthZheightz )rerrrCrrZdummyrrYrrrrErrrrZ list_styleZnumbering_startr nest_countrhrhrirs$                                                                             zHTML2Text.handle_tagcCs|jdkrd|_dS)zPretty print has a line breakrrpN)rA)rerhrhrirxs z HTML2Text.pbrcCs|jr dnd|_dS)z Set pretty print to 1 or 2 linesrprN)r2rA)rerhrhrirsz HTML2Text.pcCs|jd|_dS)z Soft breaksz N)rxrS)rerhrhrirszHTML2Text.soft_brFc CsF|jdk r|j|7_|jsB|jrT|j}|jrF|jp>|j rF|}|dkrTd|_|r|j rtjdd|}|r|ddkrd|_ |dd}| r| rdS|j r|j d r|j d  rd|}|j r|j d d|_d |j}|o|o|dd k r|jr|d7}|jrX|js&|d 7}x tt|jD]}|d 7}q6W|jdd|}|j rxd |_ |jrx|jd}|jrd |_ d|_d |_|dkrd|_|j dd |_ |jr|j |jd||jd |_ d|_|j r|js|j dd |_ |jr|jdkr|js&|dkr|dkr:|j dg}x|jD]x}|j|dkr|j dt|ddtj|j|dd|kr|j d|dd|j dn |j|qFW|j|kr|j d||_|jr$|dkr$x2|jj D]$\} } |j d| d| dqWd|_|j ||jd7_dS)z6 Deal with indentation and whitespace Nrrz\s+rwTrprqz z [code]rz FrurrBz [rz]: rrz (rz *[)!r]r@r.lstriprZrOrQrIsubrDrPrr5r>rArNrMrangerrjrCrSrTrEr rBrrrr_rsr^items) rerlpuredatarvZlstripped_dataZbqrZnewarrZ definitionrhrhrirys              (      z HTML2Text.ocCs |sdS|jr$|j}d|_d|_n:|jr^tjd|drXt|j rX|jdkrXd|}d|_|jrt|jj t ||j dk r|j }||kr|j j|r|j r|jd |d d|_dS|jd d|_ d|_|j r|j r| rt||jd }||_|j|dd dS)NFTz[^\s.!?]rrErQrOrwrrr)Zsnob)r)rErQrO)r`rrarIrr rcrVrWupdaterrGrKr3ryrHrQrOrrrb)rerlZ entity_charrrhrhrir~s:     zHTML2Text.handle_datac Csb|ddkr t|ddd}nt|}|j r@|tkr@t|Syt|Stk r\dSXdS)NrxXrpr)rr)intrrchr ValueError)rerrrhrhrir=s zHTML2Text.charrefc Csd|j r|tjkrtj|Sytjj|d}Wntk rLd|dSX|dkr`tj|S|S)N;&r)rrrdr{r|r}r)rerZchrhrhrirKs zHTML2Text.entityrefcCs*d}d|kr&t|ddd|j}|S)zq Calculate the nesting count of google doc lists :type style: dict :rtype: int rz margin-leftNr)rr%)rerVrrhrhrirTszHTML2Text.google_nest_countcCs|js |Sd}d}|jsd|_x|jdD]}t|dkrt||j|jsd}|jd|jrdd}n|jdrrd}t ||jd|d}|dj |7}|j dr|d 7}d }q|r|d7}d }q|d 7}d }nt j j|s||d7}d }q*|d kr*|d7}|d 7}q*W|S) zi Wrap all paragraphs in the provided text. :type text: str :rtype: str rrFrqz z z> )Zbreak_long_wordsZsubsequent_indentz rpz r)r!r7r#splitrrr6rr/rrzendswithrZRE_SPACEr)retextresultnewlinesZparaindentwrappedrhrhrirmbsF      zHTML2Text.optwrap)FF)F)__name__ __module__ __qualname__r BODY_WIDTHrrkror=rnrrrrrrrrxrrryr~rrrrm __classcell__rhrh)rgrir!s.Z    G} o + rrcCs$|dkrtj}t||d}|j|S)N)r_rf)rrrro)r{r_rfhrhrhri html2texts r)rrr)rN)__doc__Z html.entitiesr{Z html.parserrIZ urllib.parseparsertextwraprrrZhtml2text.utilsrrrrrr r r r r rrr __version__parserZ HTMLParserrrhrhrhris"   <PKAge[F @@)__pycache__/__init__.cpython-36.opt-1.pycnu[3 ]@sdZddlZddlZddlZddljZddlm Z ddl m Z ddl m Z mZmZmZmZmZmZmZmZmZmZmZmZdZGd d d ejjZdd d Z dS)z>html2text: Turn HTML into equivalent Markdown-structured text.N)wrap)config) dumb_css_parser element_style escape_mdescape_md_sectiongoogle_fixed_width_fontgoogle_has_heightgoogle_list_stylegoogle_text_emphasishnlist_numbering_startpad_tables_in_textskipwrap unifiable_n cseZdZddejffdd ZfddZddZd d Zfd d Z d dZ ddZ ddZ ddZ ddZddZddZddZddZdd Zd.d"d#Zd/d$d%Zd&d'Zd(d)Zd*d+Zd,d-ZZS)0 HTML2TextNcs tjddd|_d|_d|_tj|_tj|_ tj |_ ||_ tj |_tj|_tj|_tj|_tj|_tj|_tj|_tj|_tj|_tj|_ tj!|_"tj#|_$d|_%d|_&d|_'d|_(tj)|_*tj+|_,d|_-tj.|_/tj0|_1tj2|_3tj4|_5tj6|_7d|_8tj9|_:tj;|_<|dkr|j=|_>n||_>g|_?d|_@d|_Ad|_Bd|_Cd|_Dg|_Eg|_Fd|_Gd|_HtIjJd |_Kd|_Lg|_Md|_Nd|_Od|_Pd|_Qd|_Rd |_Sd|_Td|_Ud|_Vi|_Wg|_Xd|_Yd|_Zd|_[d|_\d|_]i|_^||__d|_`d|_ad|_bd|_cd tjdd <dS) z Input parameters: out: possible custom replacement for self.outtextf (which appends lines of text). baseurl: base URL of the document we process F)Zconvert_charrefsr*_z**NTz^[a-zA-Z+]+://rz _place_holder;nbsp)esuper__init__ split_next_tdtd_count table_startrZ UNICODE_SNOB unicode_snobZ ESCAPE_SNOB escape_snobZLINKS_EACH_PARAGRAPHlinks_each_paragraph body_widthZSKIP_INTERNAL_LINKSskip_internal_linksZ INLINE_LINKS inline_linksZ PROTECT_LINKS protect_linksZGOOGLE_LIST_INDENTgoogle_list_indentZIGNORE_ANCHORS ignore_linksZ IGNORE_IMAGES ignore_imagesZIMAGES_AS_HTMLimages_as_htmlZ IMAGES_TO_ALT images_to_altZIMAGES_WITH_SIZEimages_with_sizeZIGNORE_EMPHASISignore_emphasisZ BYPASS_TABLES bypass_tablesZ IGNORE_TABLES ignore_tables google_doc ul_item_mark emphasis_mark strong_markZSINGLE_LINE_BREAKsingle_line_breakZUSE_AUTOMATIC_LINKSuse_automatic_linkshide_strikethroughZ MARK_CODE mark_codeZWRAP_LIST_ITEMSwrap_list_itemsZ WRAP_LINKS wrap_linksZ PAD_TABLES pad_tablesZDEFAULT_IMAGE_ALTdefault_image_alt tag_callbackZ OPEN_QUOTE open_quoteZ CLOSE_QUOTE close_quoteouttextfout outtextlistquietp_poutcountstartspaceaastackmaybe_automatic_link empty_linkrecompileabsolute_url_matcheracountlist blockquoteprestartprecodequote br_toggle lastWasNL lastWasListstyle style_def tag_stackemphasisdrop_white_spaceinheader abbr_title abbr_data abbr_listbaseurlstressedpreceding_stressedpreceding_data current_tag UNIFIABLE)selfr>r_ bodywidth) __class__/usr/lib/python3.6/__init__.pyr"s   zHTML2Text.__init__cs|jdd}tj|dS)Nzz )replacerfeed)redata)rgrhrirk|s zHTML2Text.feedcCs8|j||jd|j|j}|jr0t|S|SdS)Nr)rkoptwrapcloser8r)rerlZmarkdownrhrhrihandles   zHTML2Text.handlecCs"|jj||r|ddk|_dS)N )r?appendrT)resrhrhrir=s zHTML2Text.outtextfcsZtj|j|jddddj|j}|jr@tjj d}nd}|j d|}g|_|S)Nrend)forceznbsp; z _place_holder;) rrnpbrojoinr?rhtmlentitieshtml5rj)reZouttextr)rgrhrirns   zHTML2Text.closecCs|j|j|ddS)NT) handle_datacharref)recrhrhrihandle_charrefszHTML2Text.handle_charrefcCs|j|}|r|j|ddS)NT) entityrefr~)rerrefrhrhrihandle_entityrefs zHTML2Text.handle_entityrefcCs|j||dddS)NT)rC) handle_tag)retagattrsrhrhrihandle_starttagszHTML2Text.handle_starttagcCs|j|ddddS)NF)rC)r)rerrhrhri handle_endtagszHTML2Text.handle_endtagcCsd|kr dSd}xpt|jD]b\}}d|krv|d|dkrvd|ksLd|krrd|krvd|krv|d|dkrvd}nd}|r|SqWdS)z :type attrs: dict :returns: The index of certain set of attributes (of a link) in the self.a list. If the set of attributes is not found, returns None :rtype: int hrefNFtitleT) enumeraterE)rermatchirErhrhri previousIndexszHTML2Text.previousIndexc Cst|}t|}d|ko|j}d}x$tjD]}||ko<||k}|r*Pq*Wd|koVd|k} t|opt| op|j } |r|s| s| r|jd7_|r|jd7_| r|j|j |j d7_ |r|j|j |j d7_ | r|jd|j d7_ d|_ n|s| s| r,|jd8_d|_ | rZ|j rJ|j d8_ n |jdd|_ |r|j rx|j d8_ n |j|j | r|j r|j d8_ n |j|j |s| r|j r|jd|r|jd8_dS) z/ Handles various text emphases z line-throughFitalicrp`TrwN)r r4rZBOLD_TEXT_STYLE_VALUESrrOrYr@ryr0rZr1rQrD) rerC tag_style parent_styleZ tag_emphasisZparent_emphasisZ strikethroughZboldZ bold_markerrZfixedrhrhrihandle_emphasiss^          zHTML2Text.handle_emphasiscCs ||_|dkri}nt|}|jdk r>|j||||dkr>dS|rx|jdk rx|d^krx|dksb|jrx|jdd|_d |_|jri}|r|jr|jd_d }t ||j |}|jj |||fn4|jr|jj ndiif\}}}|jr|jd`d }t |r0|j|r&d|_|jt |d d n d |_dS|dakr|jrf|r\t|r\|jn|jn|jrz|dkrzn|j|dkr|r|jdkr|jdn |jd|dkr|r|j|jd|j|dbkr |r|jd 7_n|jd 8_|dkr:|r,|jd 7_n|jd 8_|dckrJd|_|dkr|r|j|jdddd|_|jd 7_n|jd 8_|jdd}|ddkr|j r|r||rd |j}n|j}|j||rd|_|dekr<|j r<|r ||r d |j} n|j} |j| |r|d8|d1<|j1d9pN|j2}|j3st|j4rd:|kstd;|kr|jd<|d8d=d:|kr|jd>|d:d=d;|kr|jd?|d;d=|r|jd@|d=|jdAdS|jdk r\|j}|j0rFt)||krF|j5j6|rF|jd2t)|d3d |_dS|jdd|_d |_|j0rt|jt)|n|jdBt)|d7|j(r|j1d1pd+}|jdCt)t7j8|j9|dDnh|j+|}|dk r|j,|}n.|j-d 7_-|j-|d4<|j.|d5<|j,j ||jdt/|d4d7|dkrD|rD|j|dkr^| r^|j:|dEkrx|rx|jdF|dEkr| r|j:|dik r(|j; r|j< r|j|r|jrt=|}n|}t>|}|j;j ||dIn0|j; r |j;j |j r |j; r |jdJd|_ z Zhrz* * *headscriptbodyrNz> )rvcSs|jotjd|jdS)Nz[^\s]rprr)rbrIr)rerhrhrino_preceding_space|sz0HTML2Text.handle_tag..no_preceding_spaceemrustrongbdelstrikertz ~~z~~kbdrQttrabbrrrqcSs@tj|j|}|jr dj|nd}|jdjt||ddS)Nz "{}"rz]({url}{title}))urlr)urlparseurljoinr_stripformatryr)relinkrrrhrhrilink_urlsz&HTML2Text.handle_tag..link_urlrEr<>countrBz][]srcaltwidthZheightz )rerrrCrrZdummyrrYrrrrErrrrZ list_styleZnumbering_startr nest_countrhrhrirs$                                                                             zHTML2Text.handle_tagcCs|jdkrd|_dS)zPretty print has a line breakrrpN)rA)rerhrhrirxs z HTML2Text.pbrcCs|jr dnd|_dS)z Set pretty print to 1 or 2 linesrprN)r2rA)rerhrhrirsz HTML2Text.pcCs|jd|_dS)z Soft breaksz N)rxrS)rerhrhrirszHTML2Text.soft_brFc CsF|jdk r|j|7_|jsB|jrT|j}|jrF|jp>|j rF|}|dkrTd|_|r|j rtjdd|}|r|ddkrd|_ |dd}| r| rdS|j r|j d r|j d  rd|}|j r|j d d|_d |j}|o|o|dd k r|jr|d7}|jrX|js&|d 7}x tt|jD]}|d 7}q6W|jdd|}|j rxd |_ |jrx|jd}|jrd |_ d|_d |_|dkrd|_|j dd |_ |jr|j |jd||jd |_ d|_|j r|js|j dd |_ |jr|jdkr|js&|dkr|dkr:|j dg}x|jD]x}|j|dkr|j dt|ddtj|j|dd|kr|j d|dd|j dn |j|qFW|j|kr|j d||_|jr$|dkr$x2|jj D]$\} } |j d| d| dqWd|_|j ||jd7_dS)z6 Deal with indentation and whitespace Nrrz\s+rwTrprqz z [code]rz FrurrBz [rz]: rrz (rz *[)!r]r@r.lstriprZrOrQrIsubrDrPrr5r>rArNrMrangerrjrCrSrTrEr rBrrrr_rsr^items) rerlpuredatarvZlstripped_dataZbqrZnewarrZ definitionrhrhrirys              (      z HTML2Text.ocCs |sdS|jr$|j}d|_d|_n:|jr^tjd|drXt|j rX|jdkrXd|}d|_|jrt|jj t ||j dk r|j }||kr|j j|r|j r|jd |d d|_dS|jd d|_ d|_|j r|j r| rt||jd }||_|j|dd dS)NFTz[^\s.!?]rrErQrOrwrrr)Zsnob)r)rErQrO)r`rrarIrr rcrVrWupdaterrGrKr3ryrHrQrOrrrb)rerlZ entity_charrrhrhrir~s:     zHTML2Text.handle_datac Csb|ddkr t|ddd}nt|}|j r@|tkr@t|Syt|Stk r\dSXdS)NrxXrpr)rr)intrrchr ValueError)rerrrhrhrir=s zHTML2Text.charrefc Csd|j r|tjkrtj|Sytjj|d}Wntk rLd|dSX|dkr`tj|S|S)N;&r)rrrdr{r|r}r)rerZchrhrhrirKs zHTML2Text.entityrefcCs*d}d|kr&t|ddd|j}|S)zq Calculate the nesting count of google doc lists :type style: dict :rtype: int rz margin-leftNr)rr%)rerVrrhrhrirTszHTML2Text.google_nest_countcCs|js |Sd}d}|jsd|_x|jdD]}t|dkrt||j|jsd}|jd|jrdd}n|jdrrd}t ||jd|d}|dj |7}|j dr|d 7}d }q|r|d7}d }q|d 7}d }nt j j|s||d7}d }q*|d kr*|d7}|d 7}q*W|S) zi Wrap all paragraphs in the provided text. :type text: str :rtype: str rrFrqz z z> )Zbreak_long_wordsZsubsequent_indentz rpz r)r!r7r#splitrrr6rr/rrzendswithrZRE_SPACEr)retextresultnewlinesZparaindentwrappedrhrhrirmbsF      zHTML2Text.optwrap)FF)F)__name__ __module__ __qualname__r BODY_WIDTHrrkror=rnrrrrrrrrxrrryr~rrrrm __classcell__rhrh)rgrir!s.Z    G} o + rrcCs$|dkrtj}t||d}|j|S)N)r_rf)rrrro)r{r_rfhrhrhri html2texts r)rrr)rN)__doc__Z html.entitiesr{Z html.parserrIZ urllib.parseparsertextwraprrrZhtml2text.utilsrrrrrr r r r r rrr __version__parserZ HTMLParserrrhrhrhris"   <PKAge[AN N !__pycache__/config.cpython-36.pycnu[3 v:U]'@shddlZdZdZdZdZdZdZdZdZdZ dZ dZ d*Z dZ dZdZdZdZdZdZd Zd ZdZdZejd ZejdZejdZejdZejdZejdZejdejej BZ!ejdejej BdZ"ejdejej BdZ#dZ$ejdej%e$ej dZ&dddddddddd d!d"d#d$d$d$d$d$d$d%d%d%d%d&d&d&d&d'd'd'd'd'd(d(d(d(d d d)&Z'dZ(dZ)dZ*dZ+dZ,dS)+NFZ special_marker_for_table_paddingNT$bold700800900strictz\s\+z\d+\.\sz [-\*\+]\sz([\\\[\]\(\)])z([`\*_{}\[\]\(\)#!])z (\[.*?\] ?\(.*?\))|(\[.*?\]:.*?)z ^ # start of line (\s*\d+) # optional whitespace and a number (\.) # dot (?=\s) # lookahead assert whitespace z) ^ (\s*) (\+) (?=\s) )flagsz ^ (\s*) (-) (?=\s|\-) # followed by whitespace (bullet list, or spaced out hr) # or another dash (header or hr) z\`*_{}[]()#+-.!zg (\\) # match one slash (?=[%s]) # followed by a char that requires escaping '"z(C)z-- z->z<-*-ZoeZaeaeiou)&ZrsquoZlsquoZrdquoZldquocopyZmdashZnbspZrarrZlarrZmiddotZndashZoeligZaeligZagraveZaacuteZacircZatildeZaumlZaringZegraveZeacuteZecircZeumlZigraveZiacuteZicircZiumlZograveZoacuteZocircZotildeZoumlZugraveZuacuteZucircZuumlZlrmZrlm)rrrr)-reZ UNICODE_SNOBZTABLE_MARKER_FOR_PADZ ESCAPE_SNOBZLINKS_EACH_PARAGRAPHZ BODY_WIDTHZSKIP_INTERNAL_LINKSZ INLINE_LINKSZ PROTECT_LINKSZ WRAP_LINKSZWRAP_LIST_ITEMSZGOOGLE_LIST_INDENTZBOLD_TEXT_STYLE_VALUESZIGNORE_ANCHORSZ IGNORE_IMAGESZIMAGES_AS_HTMLZ IMAGES_TO_ALTZIMAGES_WITH_SIZEZIGNORE_EMPHASISZ MARK_CODEZ DECODE_ERRORSZDEFAULT_IMAGE_ALTZ PAD_TABLESZUSE_AUTOMATIC_LINKScompileZRE_SPACEZRE_ORDERED_LIST_MATCHERZRE_UNORDERED_LIST_MATCHERZRE_MD_CHARS_MATCHERZRE_MD_CHARS_MATCHER_ALLZRE_LINK MULTILINEVERBOSEZRE_MD_DOT_MATCHERZRE_MD_PLUS_MATCHERZRE_MD_DASH_MATCHERZRE_SLASH_CHARSescapeZRE_MD_BACKSLASH_MATCHERZ UNIFIABLEZ BYPASS_TABLESZ IGNORE_TABLESZSINGLE_LINE_BREAKZ OPEN_QUOTEZ CLOSE_QUOTErr/usr/lib/python3.6/config.pys        PKAge[ x&__pycache__/utils.cpython-36.opt-1.pycnu[3 v:U](@sddlZddlmZddejjDZddZddZd d Z d d Z d dZ ddZ ddZ ddZddZddZddZd#ddZddZd$d!d"ZdS)%N)configcCs&i|]\}}|dkr|tjj|qS)Znbsp)htmlZentitiesZname2codepoint).0kvr/usr/lib/python3.6/utils.py sr cCsD|ddkr@t|dkr@|d}d|ko2dknr@t|SdS)Nrh09)lenint)tagnrrrhn s rcCsdddd|jdDDS)z, :returns: A hash of css attributes cSs&i|]\}}|jj|jjqSr)striplower)rxyrrrr sz&dumb_property_dict..cSs g|]}d|kr|jddqS):r )split)rzrrr sz&dumb_property_dict..;)r)stylerrrdumb_property_dictsrc Cs|d7}|jd}x:|d krL|d|||jd|dd}|jd}qWdd|jdD}yd d |D}Wntk ri}YnX|S) z :type data: str :returns: A hash of css selectors, each of which contains a hash of css attributes. :rtype: dict rz@importr rNcSs"g|]}d|jkr|jdqS){)rr)rrrrrr/sz#dumb_css_parser..}cSsi|]\}}t||jqSr)rr)rabrrrr 1sz#dumb_css_parser..)findr ValueError)dataZ importIndexelementsrrrdumb_css_parsers   $ r(cCsb|j}d|kr@x.|djD]}|jd|i}|j|qWd|kr^t|d}|j||S)z :type attrs: dict :type style_def: dict :type style_def: dict :returns: A hash of the 'final' style attributes of the element :rtype: dict class.r)copyrgetupdater)attrsZ style_defZ parent_stylerZ css_classZ css_styleZimmediate_stylerrr element_style8s   r/cCs d|kr|d}|dkrdSdS) zh Finds out whether this is an ordered or unordered list :type style: dict :rtype: str zlist-style-typedisccirclesquarenoneZulZol)r0r1r2r3r)rZ list_stylerrrgoogle_list_styleMs r4cCsd|kS)z Check if the style of the element has the 'height' attribute explicitly defined :type style: dict :rtype: bool Zheightr)rrrrgoogle_has_height]s r5cCsJg}d|kr|j|dd|kr0|j|dd|krF|j|d|S)zk :type style: dict :returns: A list of all emphasis modifiers of the element :rtype: list ztext-decorationz font-stylez font-weight)append)rZemphasisrrrgoogle_text_emphasisisr7cCs$d}d|kr|d}d|kp"d|kS)zu Check if the css of the current element defines a fixed width font :type style: dict :rtype: bool z font-familyz courier newZconsolasr)rZ font_familyrrrgoogle_fixed_width_font{sr9c Cs2d|kr.yt|ddStk r,YnXdS)zh Extract numbering from list element attributes :type attrs: dict :rtype: int or None startr r)rr%)r.rrrlist_numbering_starts r;cCsttjj|dkr| rdS|dddks:|ddkr>dS|j}|dddkrrt|dkrr|ddkrrd S|dd d kr|ddd k r| Sttjj|ptjj|S)NrTz  r z---Fr *z**)r>r?) rrZRE_LINKfindalllstripboolZRE_ORDERED_LIST_MATCHERmatchZRE_UNORDERED_LIST_MATCHER)ZparaZ wrap_linksZwrap_list_itemsstrippedrrrskipwraps(" rEcCstjjd|S)zU Escapes markdown-sensitive characters within other markdown constructs. z\\\1)rZRE_MD_CHARS_MATCHERsub)textrrr escape_mdsrHFcCsNtjjd|}|r tjjd|}tjjd|}tjjd|}tjjd|}|S)zO Escapes markdown-sensitive characters across whole document sections. z\\\1z\1\\\2)rZRE_MD_BACKSLASH_MATCHERrFZRE_MD_CHARS_MATCHER_ALLZRE_MD_DOT_MATCHERZRE_MD_PLUS_MATCHERZRE_MD_DASH_MATCHER)rGZsnobrrrescape_md_sectionsrIc s<fdd|djdD}t|}x|D]}dd|jdD}t|}||krf|dg||7}n0||kr|fdd||| dD7}|}fd dt||D}q*Wg}x|D]x}d d|jdD}t|jtd krd fd dt||D}ndfddt||D}|jdj|qW|S)zR Given the lines of a table padds the cells and returns the new lines csg|]}t|jqSr)rrstrip)rr) right_marginrrrsz"reformat_table..r|cSsg|] }|jqSr)rJ)rrrrrrsr8csg|]}t|qSr)r)rr)rKrrrsNcs"g|]\}}tt||qSr)maxr)rrZold_len)rKrrrscSsg|] }|jqSr)rJ)rrrrrrsz-|r>cs,g|]$\}}|j|t|jqSr)rJr)rrM)fillerrrrs cs,g|]$\}}|j|t|jqSr)rJr)rrrN)rOrrrs)rrzipsetrr6join) linesrKZ max_widthZmax_colslineZcolsZnum_cols new_linesZnew_colsr)rOrKrreformat_tables. $   rWr cCs|jd}gd}}g}x^|D]V}tj|krZ| }|st||}|j|g}|jdq|rj|j|q|j|qWdj|S)z0 Provide padding for tables in the text  Fr8)rrZTABLE_MARKER_FOR_PADrWextendr6rS)rGrKrTZ table_bufferZ table_startedrVrUtablerrrpad_tables_in_texts         r[)F)r )Z html.entitiesrZ html2textrZ UNIFIABLEitemsZ unifiable_nrrr(r/r4r5r7r9r;rErHrIrWr[rrrrs"    +PKAge[ x __pycache__/utils.cpython-36.pycnu[3 v:U](@sddlZddlmZddejjDZddZddZd d Z d d Z d dZ ddZ ddZ ddZddZddZddZd#ddZddZd$d!d"ZdS)%N)configcCs&i|]\}}|dkr|tjj|qS)Znbsp)htmlZentitiesZname2codepoint).0kvr/usr/lib/python3.6/utils.py sr cCsD|ddkr@t|dkr@|d}d|ko2dknr@t|SdS)Nrh09)lenint)tagnrrrhn s rcCsdddd|jdDDS)z, :returns: A hash of css attributes cSs&i|]\}}|jj|jjqSr)striplower)rxyrrrr sz&dumb_property_dict..cSs g|]}d|kr|jddqS):r )split)rzrrr sz&dumb_property_dict..;)r)stylerrrdumb_property_dictsrc Cs|d7}|jd}x:|d krL|d|||jd|dd}|jd}qWdd|jdD}yd d |D}Wntk ri}YnX|S) z :type data: str :returns: A hash of css selectors, each of which contains a hash of css attributes. :rtype: dict rz@importr rNcSs"g|]}d|jkr|jdqS){)rr)rrrrrr/sz#dumb_css_parser..}cSsi|]\}}t||jqSr)rr)rabrrrr 1sz#dumb_css_parser..)findr ValueError)dataZ importIndexelementsrrrdumb_css_parsers   $ r(cCsb|j}d|kr@x.|djD]}|jd|i}|j|qWd|kr^t|d}|j||S)z :type attrs: dict :type style_def: dict :type style_def: dict :returns: A hash of the 'final' style attributes of the element :rtype: dict class.r)copyrgetupdater)attrsZ style_defZ parent_stylerZ css_classZ css_styleZimmediate_stylerrr element_style8s   r/cCs d|kr|d}|dkrdSdS) zh Finds out whether this is an ordered or unordered list :type style: dict :rtype: str zlist-style-typedisccirclesquarenoneZulZol)r0r1r2r3r)rZ list_stylerrrgoogle_list_styleMs r4cCsd|kS)z Check if the style of the element has the 'height' attribute explicitly defined :type style: dict :rtype: bool Zheightr)rrrrgoogle_has_height]s r5cCsJg}d|kr|j|dd|kr0|j|dd|krF|j|d|S)zk :type style: dict :returns: A list of all emphasis modifiers of the element :rtype: list ztext-decorationz font-stylez font-weight)append)rZemphasisrrrgoogle_text_emphasisisr7cCs$d}d|kr|d}d|kp"d|kS)zu Check if the css of the current element defines a fixed width font :type style: dict :rtype: bool z font-familyz courier newZconsolasr)rZ font_familyrrrgoogle_fixed_width_font{sr9c Cs2d|kr.yt|ddStk r,YnXdS)zh Extract numbering from list element attributes :type attrs: dict :rtype: int or None startr r)rr%)r.rrrlist_numbering_starts r;cCsttjj|dkr| rdS|dddks:|ddkr>dS|j}|dddkrrt|dkrr|ddkrrd S|dd d kr|ddd k r| Sttjj|ptjj|S)NrTz  r z---Fr *z**)r>r?) rrZRE_LINKfindalllstripboolZRE_ORDERED_LIST_MATCHERmatchZRE_UNORDERED_LIST_MATCHER)ZparaZ wrap_linksZwrap_list_itemsstrippedrrrskipwraps(" rEcCstjjd|S)zU Escapes markdown-sensitive characters within other markdown constructs. z\\\1)rZRE_MD_CHARS_MATCHERsub)textrrr escape_mdsrHFcCsNtjjd|}|r tjjd|}tjjd|}tjjd|}tjjd|}|S)zO Escapes markdown-sensitive characters across whole document sections. z\\\1z\1\\\2)rZRE_MD_BACKSLASH_MATCHERrFZRE_MD_CHARS_MATCHER_ALLZRE_MD_DOT_MATCHERZRE_MD_PLUS_MATCHERZRE_MD_DASH_MATCHER)rGZsnobrrrescape_md_sectionsrIc s<fdd|djdD}t|}x|D]}dd|jdD}t|}||krf|dg||7}n0||kr|fdd||| dD7}|}fd dt||D}q*Wg}x|D]x}d d|jdD}t|jtd krd fd dt||D}ndfddt||D}|jdj|qW|S)zR Given the lines of a table padds the cells and returns the new lines csg|]}t|jqSr)rrstrip)rr) right_marginrrrsz"reformat_table..r|cSsg|] }|jqSr)rJ)rrrrrrsr8csg|]}t|qSr)r)rr)rKrrrsNcs"g|]\}}tt||qSr)maxr)rrZold_len)rKrrrscSsg|] }|jqSr)rJ)rrrrrrsz-|r>cs,g|]$\}}|j|t|jqSr)rJr)rrM)fillerrrrs cs,g|]$\}}|j|t|jqSr)rJr)rrrN)rOrrrs)rrzipsetrr6join) linesrKZ max_widthZmax_colslineZcolsZnum_cols new_linesZnew_colsr)rOrKrreformat_tables. $   rWr cCs|jd}gd}}g}x^|D]V}tj|krZ| }|st||}|j|g}|jdq|rj|j|q|j|qWdj|S)z0 Provide padding for tables in the text  Fr8)rrZTABLE_MARKER_FOR_PADrWextendr6rS)rGrKrTZ table_bufferZ table_startedrVrUtablerrrpad_tables_in_texts         r[)F)r )Z html.entitiesrZ html2textrZ UNIFIABLEitemsZ unifiable_nrrr(r/r4r5r7r9r;rErHrIrWr[rrrrs"    +PKAge[#iG#__pycache__/__main__.cpython-36.pycnu[3 8Pu\'@sddlmZedS))mainN)Z html2text.clirrr/usr/lib/python3.6/__main__.pys PKAge[#iG)__pycache__/__main__.cpython-36.opt-1.pycnu[3 8Pu\'@sddlmZedS))mainN)Z html2text.clirrr/usr/lib/python3.6/__main__.pys PKAge[II__pycache__/cli.cpython-36.pycnu[3 v:U] $@s0ddlZddlZddlmZmZmZddZdS)N) HTML2Text __version__configc Csd}Gddd}tj}|jddtjdd|jdd d tjd d |jd ddtjdd |jddd tjdd |jddd tjdd |jdddtj dd |jddd tj dd |jddd tj dd |jd d!d tj d"d |jd#d$d tj d%d |jd&d'd tjd(d |jd)d*d tjd+d |jd,d-d d.d/d0d1|jd2d3d d4d/d5d1|jd6d7d d8d/d9d1|jd:d;d|jd?d@dAttjdBd>|jdCdDd dEd/dFd1|jdGd dHd/dId1|jdJd dKtjdLd1|jdMd dNtjdOd1|jdPd dQtjdRd1|jdSd dTtjdUd1|jdVddWtjdXd1|jdYddZtjd[d1|jd\d d]tjd^d1|jd_d d`tjdad1|jdbdctjddd|jdedftjdgd|jdhditjdjd|jdkdldmjtt t!dn|jdodpdq|jdrdpdsdt|j"}|j#r |j#dukr t$|j#dv}|j%}WdQRXn t&j'j(j%}y|j)|j*|j+}Wn`t,k r}zB|j-dw|j.}|dx|j/7}|dy|j.dz7}t0||WYdd}~XnXt1|d{}|j2rdu|_3|j4rd||_5d}|_6|j7|_7|j8|_9|j:|_:|j;|_;|j<|_<|j=|_=|j>|_>|j?|_?|j@|_@|jA|_A|jB|_B|jC|_C|jD|_D|jE|_E|jF|_F|jG|_G|jH|_H|jI|_I|jJ|_J|jK|_K|jL|_L|jM|_M|jN|_N|jO|_O|jP|_P|jQ|_Q|jR|_Rt&jSjT|jU|dS)~Nc@s,eZdZdZdZdZdZdZdZdZ dZ d S) zmain..bcolorszzzzzzzzN) __name__ __module__ __qualname__ZHEADERZOKBLUEOKGREENWARNINGZFAILENDCZBOLDZ UNDERLINEr r /usr/lib/python3.6/cli.pybcolors srz--default-image-altdefault_image_altz3The default alt string for images with missing ones)destdefaulthelpz --pad-tables pad_tables store_truez-pad the cells to equal column width in tables)ractionrrz--no-wrap-links wrap_linksZ store_falsez"don't wrap links during conversionz--wrap-list-itemswrap_list_itemsz!wrap list items during conversionz--ignore-emphasisignore_emphasisz)don't include any formatting for emphasisz--reference-links inline_linksz1use reference style links instead of inline linksz--ignore-links ignore_linksz&don't include any formatting for linksz--protect-links protect_linkszCprotect links from line breaks surrounding them with angle bracketsz--ignore-images ignore_imagesz'don't include any formatting for imagesz--images-as-htmlimages_as_htmlzWAlways write image tags as raw html; preserves `height`, `width` and `alt` if possible.z--images-to-alt images_to_altz&Discard image data, only keep alt textz--images-with-sizeimages_with_sizezMWrite image tags with height and width attrs as raw html to retain dimensionsz-gz --google-doc google_docFz(convert an html-exported Google Document)rrrrz-dz--dash-unordered-list ul_style_dashz6use a dash rather than a star for unordered list itemsz-ez--asterisk-emphasisem_style_asteriskz=use an asterisk rather than an underscore for emphasized textz-bz --body-width body_widthz3number of characters per output line, 0 for no wrap)rtyperrz-iz--google-list-indent list_indentz,number of pixels Google indents nested listsz-sz--hide-strikethroughhide_strikethroughzDhide strike-through text. only relevant when -g is specified as wellz --escape-all escape_snobzbEscape all special characters. Output is less readable, but avoids corner case formatting issues.z--bypass-tables bypass_tablesz2Format tables in HTML rather than Markdown syntax.z--ignore-tables ignore_tableszAIgnore table-related tags (table, th, td, tr) while keeping rows.z--single-line-breaksingle_line_breakzhUse a single line break after a block element rather than two line breaks. NOTE: Requires --body-width=0z--unicode-snob unicode_snobzUse unicode throughout documentz--no-automatic-linksuse_automatic_linksz.Do not use automatic links wherever applicablez--no-skip-internal-linksskip_internal_linkszDo not skip internal linksz--links-after-paralinks_each_paragraphz2Put links after each paragraph instead of documentz --mark-code mark_codez.Mark program code blocks with [code]...[/code]z--decode-errors decode_errorszZWhat to do in case of decode errors.'ignore', 'strict' and 'replace' are acceptable valuesz --open-quote open_quotez!The character used to open quotesz --close-quote close_quotez"The character used to close quotesz --versionversion.)rr3filename?)nargsencodingzutf-8)r7r-rbzWarning:z Use the z--decode-errors=ignorez flag.)baseurl*__)VargparseArgumentParser add_argumentrZDEFAULT_IMAGE_ALTZ PAD_TABLESZ WRAP_LINKSZWRAP_LIST_ITEMSZIGNORE_EMPHASISZ INLINE_LINKSZIGNORE_ANCHORSZ PROTECT_LINKSZ IGNORE_IMAGESZIMAGES_AS_HTMLZ IMAGES_TO_ALTZIMAGES_WITH_SIZEintZ BODY_WIDTHZGOOGLE_LIST_INDENTZ BYPASS_TABLESZ IGNORE_TABLESZSINGLE_LINE_BREAKZ UNICODE_SNOBZUSE_AUTOMATIC_LINKSZSKIP_INTERNAL_LINKSZLINKS_EACH_PARAGRAPHZ MARK_CODEZ DECODE_ERRORSZ OPEN_QUOTEZ CLOSE_QUOTEjoinmapstrr parse_argsr5openreadsysstdinbufferdecoder8r0UnicodeDecodeErrorr r r printrr!Z ul_item_markr"Z emphasis_markZ strong_markr#r%Zgoogle_list_indentrrrrrrrr r&r'r(r)r*rr+r,r-r.r/rrrrr1r2stdoutwriteZhandle) r;rpargsfpdataerrZwarninghr r r mains   rV)r>rHZ html2textrrrrVr r r r sPKAge[AN N '__pycache__/config.cpython-36.opt-1.pycnu[3 v:U]'@shddlZdZdZdZdZdZdZdZdZdZ dZ dZ d*Z dZ dZdZdZdZdZdZd Zd ZdZdZejd ZejdZejdZejdZejdZejdZejdejej BZ!ejdejej BdZ"ejdejej BdZ#dZ$ejdej%e$ej dZ&dddddddddd d!d"d#d$d$d$d$d$d$d%d%d%d%d&d&d&d&d'd'd'd'd'd(d(d(d(d d d)&Z'dZ(dZ)dZ*dZ+dZ,dS)+NFZ special_marker_for_table_paddingNT$bold700800900strictz\s\+z\d+\.\sz [-\*\+]\sz([\\\[\]\(\)])z([`\*_{}\[\]\(\)#!])z (\[.*?\] ?\(.*?\))|(\[.*?\]:.*?)z ^ # start of line (\s*\d+) # optional whitespace and a number (\.) # dot (?=\s) # lookahead assert whitespace z) ^ (\s*) (\+) (?=\s) )flagsz ^ (\s*) (-) (?=\s|\-) # followed by whitespace (bullet list, or spaced out hr) # or another dash (header or hr) z\`*_{}[]()#+-.!zg (\\) # match one slash (?=[%s]) # followed by a char that requires escaping '"z(C)z-- z->z<-*-ZoeZaeaeiou)&ZrsquoZlsquoZrdquoZldquocopyZmdashZnbspZrarrZlarrZmiddotZndashZoeligZaeligZagraveZaacuteZacircZatildeZaumlZaringZegraveZeacuteZecircZeumlZigraveZiacuteZicircZiumlZograveZoacuteZocircZotildeZoumlZugraveZuacuteZucircZuumlZlrmZrlm)rrrr)-reZ UNICODE_SNOBZTABLE_MARKER_FOR_PADZ ESCAPE_SNOBZLINKS_EACH_PARAGRAPHZ BODY_WIDTHZSKIP_INTERNAL_LINKSZ INLINE_LINKSZ PROTECT_LINKSZ WRAP_LINKSZWRAP_LIST_ITEMSZGOOGLE_LIST_INDENTZBOLD_TEXT_STYLE_VALUESZIGNORE_ANCHORSZ IGNORE_IMAGESZIMAGES_AS_HTMLZ IMAGES_TO_ALTZIMAGES_WITH_SIZEZIGNORE_EMPHASISZ MARK_CODEZ DECODE_ERRORSZDEFAULT_IMAGE_ALTZ PAD_TABLESZUSE_AUTOMATIC_LINKScompileZRE_SPACEZRE_ORDERED_LIST_MATCHERZRE_UNORDERED_LIST_MATCHERZRE_MD_CHARS_MATCHERZRE_MD_CHARS_MATCHER_ALLZRE_LINK MULTILINEVERBOSEZRE_MD_DOT_MATCHERZRE_MD_PLUS_MATCHERZRE_MD_DASH_MATCHERZRE_SLASH_CHARSescapeZRE_MD_BACKSLASH_MATCHERZ UNIFIABLEZ BYPASS_TABLESZ IGNORE_TABLESZSINGLE_LINE_BREAKZ OPEN_QUOTEZ CLOSE_QUOTErr/usr/lib/python3.6/config.pys        PKAge[zt config.pynu[import re # Use Unicode characters instead of their ascii pseudo-replacements UNICODE_SNOB = False # Marker to use for marking tables for padding post processing TABLE_MARKER_FOR_PAD = "special_marker_for_table_padding" # Escape all special characters. Output is less readable, but avoids # corner case formatting issues. ESCAPE_SNOB = False # Put the links after each paragraph instead of at the end. LINKS_EACH_PARAGRAPH = False # Wrap long lines at position. 0 for no wrapping. BODY_WIDTH = 78 # Don't show internal links (href="#local-anchor") -- corresponding link # targets won't be visible in the plain text file anyway. SKIP_INTERNAL_LINKS = True # Use inline, rather than reference, formatting for images and links INLINE_LINKS = True # Protect links from line breaks surrounding them with angle brackets (in # addition to their square brackets) PROTECT_LINKS = False # WRAP_LINKS = True WRAP_LINKS = True # Wrap list items. WRAP_LIST_ITEMS = False # Number of pixels Google indents nested lists GOOGLE_LIST_INDENT = 36 # Values Google and others may use to indicate bold text BOLD_TEXT_STYLE_VALUES = ("bold", "700", "800", "900") IGNORE_ANCHORS = False IGNORE_IMAGES = False IMAGES_AS_HTML = False IMAGES_TO_ALT = False IMAGES_WITH_SIZE = False IGNORE_EMPHASIS = False MARK_CODE = False DECODE_ERRORS = "strict" DEFAULT_IMAGE_ALT = "" PAD_TABLES = False # Convert links with same href and text to format # if they are absolute links USE_AUTOMATIC_LINKS = True # For checking space-only lines on line 771 RE_SPACE = re.compile(r"\s\+") RE_ORDERED_LIST_MATCHER = re.compile(r"\d+\.\s") RE_UNORDERED_LIST_MATCHER = re.compile(r"[-\*\+]\s") RE_MD_CHARS_MATCHER = re.compile(r"([\\\[\]\(\)])") RE_MD_CHARS_MATCHER_ALL = re.compile(r"([`\*_{}\[\]\(\)#!])") # to find links in the text RE_LINK = re.compile(r"(\[.*?\] ?\(.*?\))|(\[.*?\]:.*?)") RE_MD_DOT_MATCHER = re.compile( r""" ^ # start of line (\s*\d+) # optional whitespace and a number (\.) # dot (?=\s) # lookahead assert whitespace """, re.MULTILINE | re.VERBOSE, ) RE_MD_PLUS_MATCHER = re.compile( r""" ^ (\s*) (\+) (?=\s) """, flags=re.MULTILINE | re.VERBOSE, ) RE_MD_DASH_MATCHER = re.compile( r""" ^ (\s*) (-) (?=\s|\-) # followed by whitespace (bullet list, or spaced out hr) # or another dash (header or hr) """, flags=re.MULTILINE | re.VERBOSE, ) RE_SLASH_CHARS = r"\`*_{}[]()#+-.!" RE_MD_BACKSLASH_MATCHER = re.compile( r""" (\\) # match one slash (?=[%s]) # followed by a char that requires escaping """ % re.escape(RE_SLASH_CHARS), flags=re.VERBOSE, ) UNIFIABLE = { "rsquo": "'", "lsquo": "'", "rdquo": '"', "ldquo": '"', "copy": "(C)", "mdash": "--", "nbsp": " ", "rarr": "->", "larr": "<-", "middot": "*", "ndash": "-", "oelig": "oe", "aelig": "ae", "agrave": "a", "aacute": "a", "acirc": "a", "atilde": "a", "auml": "a", "aring": "a", "egrave": "e", "eacute": "e", "ecirc": "e", "euml": "e", "igrave": "i", "iacute": "i", "icirc": "i", "iuml": "i", "ograve": "o", "oacute": "o", "ocirc": "o", "otilde": "o", "ouml": "o", "ugrave": "u", "uacute": "u", "ucirc": "u", "uuml": "u", "lrm": "", "rlm": "", } # Format tables in HTML rather than Markdown syntax BYPASS_TABLES = False # Ignore table-related tags (table, th, td, tr) while keeping rows IGNORE_TABLES = False # Use a single line break after a block element rather than two line breaks. # NOTE: Requires body width setting to be 0. SINGLE_LINE_BREAK = False # Use double quotation marks when converting the tag. OPEN_QUOTE = '"' CLOSE_QUOTE = '"' PKAge[{"ݸ __init__.pynu["""html2text: Turn HTML into equivalent Markdown-structured text.""" import html.entities import html.parser import re import urllib.parse as urlparse from textwrap import wrap from html2text import config from html2text.utils import ( dumb_css_parser, element_style, escape_md, escape_md_section, google_fixed_width_font, google_has_height, google_list_style, google_text_emphasis, hn, list_numbering_start, pad_tables_in_text, skipwrap, unifiable_n, ) __version__ = (2019, 9, 26) # TODO: # Support decoded entities with UNIFIABLE. class HTML2Text(html.parser.HTMLParser): def __init__(self, out=None, baseurl="", bodywidth=config.BODY_WIDTH): """ Input parameters: out: possible custom replacement for self.outtextf (which appends lines of text). baseurl: base URL of the document we process """ super().__init__(convert_charrefs=False) # Config options self.split_next_td = False self.td_count = 0 self.table_start = False self.unicode_snob = config.UNICODE_SNOB # covered in cli self.escape_snob = config.ESCAPE_SNOB # covered in cli self.links_each_paragraph = config.LINKS_EACH_PARAGRAPH self.body_width = bodywidth # covered in cli self.skip_internal_links = config.SKIP_INTERNAL_LINKS # covered in cli self.inline_links = config.INLINE_LINKS # covered in cli self.protect_links = config.PROTECT_LINKS # covered in cli self.google_list_indent = config.GOOGLE_LIST_INDENT # covered in cli self.ignore_links = config.IGNORE_ANCHORS # covered in cli self.ignore_images = config.IGNORE_IMAGES # covered in cli self.images_as_html = config.IMAGES_AS_HTML # covered in cli self.images_to_alt = config.IMAGES_TO_ALT # covered in cli self.images_with_size = config.IMAGES_WITH_SIZE # covered in cli self.ignore_emphasis = config.IGNORE_EMPHASIS # covered in cli self.bypass_tables = config.BYPASS_TABLES # covered in cli self.ignore_tables = config.IGNORE_TABLES # covered in cli self.google_doc = False # covered in cli self.ul_item_mark = "*" # covered in cli self.emphasis_mark = "_" # covered in cli self.strong_mark = "**" self.single_line_break = config.SINGLE_LINE_BREAK # covered in cli self.use_automatic_links = config.USE_AUTOMATIC_LINKS # covered in cli self.hide_strikethrough = False # covered in cli self.mark_code = config.MARK_CODE self.wrap_list_items = config.WRAP_LIST_ITEMS # covered in cli self.wrap_links = config.WRAP_LINKS # covered in cli self.pad_tables = config.PAD_TABLES # covered in cli self.default_image_alt = config.DEFAULT_IMAGE_ALT # covered in cli self.tag_callback = None self.open_quote = config.OPEN_QUOTE # covered in cli self.close_quote = config.CLOSE_QUOTE # covered in cli if out is None: self.out = self.outtextf else: self.out = out # empty list to store output characters before they are "joined" self.outtextlist = [] self.quiet = 0 self.p_p = 0 # number of newline character to print before next output self.outcount = 0 self.start = True self.space = False self.a = [] self.astack = [] self.maybe_automatic_link = None self.empty_link = False self.absolute_url_matcher = re.compile(r"^[a-zA-Z+]+://") self.acount = 0 self.list = [] self.blockquote = 0 self.pre = False self.startpre = False self.code = False self.quote = False self.br_toggle = "" self.lastWasNL = False self.lastWasList = False self.style = 0 self.style_def = {} self.tag_stack = [] self.emphasis = 0 self.drop_white_space = 0 self.inheader = False self.abbr_title = None # current abbreviation definition self.abbr_data = None # last inner HTML (for abbr being defined) self.abbr_list = {} # stack of abbreviations to write later self.baseurl = baseurl self.stressed = False self.preceding_stressed = False self.preceding_data = None self.current_tag = None config.UNIFIABLE["nbsp"] = " _place_holder;" def feed(self, data): data = data.replace("", "") super().feed(data) def handle(self, data): self.feed(data) self.feed("") markdown = self.optwrap(self.close()) if self.pad_tables: return pad_tables_in_text(markdown) else: return markdown def outtextf(self, s): self.outtextlist.append(s) if s: self.lastWasNL = s[-1] == "\n" def close(self): super().close() self.pbr() self.o("", force="end") outtext = "".join(self.outtextlist) if self.unicode_snob: nbsp = html.entities.html5["nbsp;"] else: nbsp = " " outtext = outtext.replace(" _place_holder;", nbsp) # Clear self.outtextlist to avoid memory leak of its content to # the next handling. self.outtextlist = [] return outtext def handle_charref(self, c): self.handle_data(self.charref(c), True) def handle_entityref(self, c): ref = self.entityref(c) # ref may be an empty string (e.g. for ‎/‏ markers that should # not contribute to the final output). # self.handle_data cannot handle a zero-length string right after a # stressed tag or mid-text within a stressed tag (text get split and # self.stressed/self.preceding_stressed gets switched after the first # part of that text). if ref: self.handle_data(ref, True) def handle_starttag(self, tag, attrs): self.handle_tag(tag, attrs, start=True) def handle_endtag(self, tag): self.handle_tag(tag, None, start=False) def previousIndex(self, attrs): """ :type attrs: dict :returns: The index of certain set of attributes (of a link) in the self.a list. If the set of attributes is not found, returns None :rtype: int """ if "href" not in attrs: return None match = False for i, a in enumerate(self.a): if "href" in a and a["href"] == attrs["href"]: if "title" in a or "title" in attrs: if ( "title" in a and "title" in attrs and a["title"] == attrs["title"] ): match = True else: match = True if match: return i return None def handle_emphasis(self, start, tag_style, parent_style): """ Handles various text emphases """ tag_emphasis = google_text_emphasis(tag_style) parent_emphasis = google_text_emphasis(parent_style) # handle Google's text emphasis strikethrough = "line-through" in tag_emphasis and self.hide_strikethrough # google and others may mark a font's weight as `bold` or `700` bold = False for bold_marker in config.BOLD_TEXT_STYLE_VALUES: bold = bold_marker in tag_emphasis and bold_marker not in parent_emphasis if bold: break italic = "italic" in tag_emphasis and "italic" not in parent_emphasis fixed = ( google_fixed_width_font(tag_style) and not google_fixed_width_font(parent_style) and not self.pre ) if start: # crossed-out text must be handled before other attributes # in order not to output qualifiers unnecessarily if bold or italic or fixed: self.emphasis += 1 if strikethrough: self.quiet += 1 if italic: self.o(self.emphasis_mark) self.drop_white_space += 1 if bold: self.o(self.strong_mark) self.drop_white_space += 1 if fixed: self.o("`") self.drop_white_space += 1 self.code = True else: if bold or italic or fixed: # there must not be whitespace before closing emphasis mark self.emphasis -= 1 self.space = False if fixed: if self.drop_white_space: # empty emphasis, drop it self.drop_white_space -= 1 else: self.o("`") self.code = False if bold: if self.drop_white_space: # empty emphasis, drop it self.drop_white_space -= 1 else: self.o(self.strong_mark) if italic: if self.drop_white_space: # empty emphasis, drop it self.drop_white_space -= 1 else: self.o(self.emphasis_mark) # space is only allowed after *all* emphasis marks if (bold or italic) and not self.emphasis: self.o(" ") if strikethrough: self.quiet -= 1 def handle_tag(self, tag, attrs, start): self.current_tag = tag # attrs is None for endtags if attrs is None: attrs = {} else: attrs = dict(attrs) if self.tag_callback is not None: if self.tag_callback(self, tag, attrs, start) is True: return # first thing inside the anchor tag is another tag # that produces some output if ( start and self.maybe_automatic_link is not None and tag not in ["p", "div", "style", "dl", "dt"] and (tag != "img" or self.ignore_images) ): self.o("[") self.maybe_automatic_link = None self.empty_link = False if self.google_doc: # the attrs parameter is empty for a closing tag. in addition, we # need the attributes of the parent nodes in order to get a # complete style description for the current element. we assume # that google docs export well formed html. parent_style = {} if start: if self.tag_stack: parent_style = self.tag_stack[-1][2] tag_style = element_style(attrs, self.style_def, parent_style) self.tag_stack.append((tag, attrs, tag_style)) else: dummy, attrs, tag_style = ( self.tag_stack.pop() if self.tag_stack else (None, {}, {}) ) if self.tag_stack: parent_style = self.tag_stack[-1][2] if hn(tag): self.p() if start: self.inheader = True self.o(hn(tag) * "#" + " ") else: self.inheader = False return # prevent redundant emphasis marks on headers if tag in ["p", "div"]: if self.google_doc: if start and google_has_height(tag_style): self.p() else: self.soft_br() elif self.astack and tag == "div": pass else: self.p() if tag == "br" and start: if self.blockquote > 0: self.o(" \n> ") else: self.o(" \n") if tag == "hr" and start: self.p() self.o("* * *") self.p() if tag in ["head", "style", "script"]: if start: self.quiet += 1 else: self.quiet -= 1 if tag == "style": if start: self.style += 1 else: self.style -= 1 if tag in ["body"]: self.quiet = 0 # sites like 9rules.com never close if tag == "blockquote": if start: self.p() self.o("> ", force=True) self.start = True self.blockquote += 1 else: self.blockquote -= 1 self.p() def no_preceding_space(self): return self.preceding_data and re.match(r"[^\s]", self.preceding_data[-1]) if tag in ["em", "i", "u"] and not self.ignore_emphasis: if start and no_preceding_space(self): emphasis = " " + self.emphasis_mark else: emphasis = self.emphasis_mark self.o(emphasis) if start: self.stressed = True if tag in ["strong", "b"] and not self.ignore_emphasis: if start and no_preceding_space(self): strong = " " + self.strong_mark else: strong = self.strong_mark self.o(strong) if start: self.stressed = True if tag in ["del", "strike", "s"]: if start and no_preceding_space(self): strike = " ~~" else: strike = "~~" self.o(strike) if start: self.stressed = True if self.google_doc: if not self.inheader: # handle some font attributes, but leave headers clean self.handle_emphasis(start, tag_style, parent_style) if tag in ["kbd", "code", "tt"] and not self.pre: self.o("`") # TODO: `` `this` `` self.code = not self.code if tag == "abbr": if start: self.abbr_title = None self.abbr_data = "" if "title" in attrs: self.abbr_title = attrs["title"] else: if self.abbr_title is not None: self.abbr_list[self.abbr_data] = self.abbr_title self.abbr_title = None self.abbr_data = None if tag == "q": if not self.quote: self.o(self.open_quote) else: self.o(self.close_quote) self.quote = not self.quote def link_url(self, link, title=""): url = urlparse.urljoin(self.baseurl, link) title = ' "{}"'.format(title) if title.strip() else "" self.o("]({url}{title})".format(url=escape_md(url), title=title)) if tag == "a" and not self.ignore_links: if start: if ( "href" in attrs and attrs["href"] is not None and not (self.skip_internal_links and attrs["href"].startswith("#")) ): self.astack.append(attrs) self.maybe_automatic_link = attrs["href"] self.empty_link = True if self.protect_links: attrs["href"] = "<" + attrs["href"] + ">" else: self.astack.append(None) else: if self.astack: a = self.astack.pop() if self.maybe_automatic_link and not self.empty_link: self.maybe_automatic_link = None elif a: if self.empty_link: self.o("[") self.empty_link = False self.maybe_automatic_link = None if self.inline_links: try: title = a["title"] if a["title"] else "" title = escape_md(title) except KeyError: link_url(self, a["href"], "") else: link_url(self, a["href"], title) else: i = self.previousIndex(a) if i is not None: a = self.a[i] else: self.acount += 1 a["count"] = self.acount a["outcount"] = self.outcount self.a.append(a) self.o("][" + str(a["count"]) + "]") if tag == "img" and start and not self.ignore_images: if "src" in attrs: if not self.images_to_alt: attrs["href"] = attrs["src"] alt = attrs.get("alt") or self.default_image_alt # If we have images_with_size, write raw html including width, # height, and alt attributes if self.images_as_html or ( self.images_with_size and ("width" in attrs or "height" in attrs) ): self.o("") return # If we have a link to create, output the start if self.maybe_automatic_link is not None: href = self.maybe_automatic_link if ( self.images_to_alt and escape_md(alt) == href and self.absolute_url_matcher.match(href) ): self.o("<" + escape_md(alt) + ">") self.empty_link = False return else: self.o("[") self.maybe_automatic_link = None self.empty_link = False # If we have images_to_alt, we discard the image itself, # considering only the alt text. if self.images_to_alt: self.o(escape_md(alt)) else: self.o("![" + escape_md(alt) + "]") if self.inline_links: href = attrs.get("href") or "" self.o( "(" + escape_md(urlparse.urljoin(self.baseurl, href)) + ")" ) else: i = self.previousIndex(attrs) if i is not None: attrs = self.a[i] else: self.acount += 1 attrs["count"] = self.acount attrs["outcount"] = self.outcount self.a.append(attrs) self.o("[" + str(attrs["count"]) + "]") if tag == "dl" and start: self.p() if tag == "dt" and not start: self.pbr() if tag == "dd" and start: self.o(" ") if tag == "dd" and not start: self.pbr() if tag in ["ol", "ul"]: # Google Docs create sub lists as top level lists if not self.list and not self.lastWasList: self.p() if start: if self.google_doc: list_style = google_list_style(tag_style) else: list_style = tag numbering_start = list_numbering_start(attrs) self.list.append({"name": list_style, "num": numbering_start}) else: if self.list: self.list.pop() if not self.google_doc and not self.list: self.o("\n") self.lastWasList = True else: self.lastWasList = False if tag == "li": self.pbr() if start: if self.list: li = self.list[-1] else: li = {"name": "ul", "num": 0} if self.google_doc: nest_count = self.google_nest_count(tag_style) else: nest_count = len(self.list) # TODO: line up
  1. s > 9 correctly. self.o(" " * nest_count) if li["name"] == "ul": self.o(self.ul_item_mark + " ") elif li["name"] == "ol": li["num"] += 1 self.o(str(li["num"]) + ". ") self.start = True if tag in ["table", "tr", "td", "th"]: if self.ignore_tables: if tag == "tr": if start: pass else: self.soft_br() else: pass elif self.bypass_tables: if start: self.soft_br() if tag in ["td", "th"]: if start: self.o("<{}>\n\n".format(tag)) else: self.o("\n".format(tag)) else: if start: self.o("<{}>".format(tag)) else: self.o("".format(tag)) else: if tag == "table": if start: self.table_start = True if self.pad_tables: self.o("<" + config.TABLE_MARKER_FOR_PAD + ">") self.o(" \n") else: if self.pad_tables: self.o("") self.o(" \n") if tag in ["td", "th"] and start: if self.split_next_td: self.o("| ") self.split_next_td = True if tag == "tr" and start: self.td_count = 0 if tag == "tr" and not start: self.split_next_td = False self.soft_br() if tag == "tr" and not start and self.table_start: # Underline table header self.o("|".join(["---"] * self.td_count)) self.soft_br() self.table_start = False if tag in ["td", "th"] and start: self.td_count += 1 if tag == "pre": if start: self.startpre = True self.pre = True else: self.pre = False if self.mark_code: self.out("\n[/code]") self.p() # TODO: Add docstring for these one letter functions def pbr(self): "Pretty print has a line break" if self.p_p == 0: self.p_p = 1 def p(self): "Set pretty print to 1 or 2 lines" self.p_p = 1 if self.single_line_break else 2 def soft_br(self): "Soft breaks" self.pbr() self.br_toggle = " " def o(self, data, puredata=False, force=False): """ Deal with indentation and whitespace """ if self.abbr_data is not None: self.abbr_data += data if not self.quiet: if self.google_doc: # prevent white space immediately after 'begin emphasis' # marks ('**' and '_') lstripped_data = data.lstrip() if self.drop_white_space and not (self.pre or self.code): data = lstripped_data if lstripped_data != "": self.drop_white_space = 0 if puredata and not self.pre: # This is a very dangerous call ... it could mess up # all handling of   when not handled properly # (see entityref) data = re.sub(r"\s+", r" ", data) if data and data[0] == " ": self.space = True data = data[1:] if not data and not force: return if self.startpre: # self.out(" :") #TODO: not output when already one there if not data.startswith("\n") and not data.startswith("\r\n"): #
    stuff...
                        data = "\n" + data
                    if self.mark_code:
                        self.out("\n[code]")
                        self.p_p = 0
    
                bq = ">" * self.blockquote
                if not (force and data and data[0] == ">") and self.blockquote:
                    bq += " "
    
                if self.pre:
                    if not self.list:
                        bq += "    "
                    # else: list content is already partially indented
                    for i in range(len(self.list)):
                        bq += "    "
                    data = data.replace("\n", "\n" + bq)
    
                if self.startpre:
                    self.startpre = False
                    if self.list:
                        # use existing initial indentation
                        data = data.lstrip("\n")
    
                if self.start:
                    self.space = False
                    self.p_p = 0
                    self.start = False
    
                if force == "end":
                    # It's the end.
                    self.p_p = 0
                    self.out("\n")
                    self.space = False
    
                if self.p_p:
                    self.out((self.br_toggle + "\n" + bq) * self.p_p)
                    self.space = False
                    self.br_toggle = ""
    
                if self.space:
                    if not self.lastWasNL:
                        self.out(" ")
                    self.space = False
    
                if self.a and (
                    (self.p_p == 2 and self.links_each_paragraph) or force == "end"
                ):
                    if force == "end":
                        self.out("\n")
    
                    newa = []
                    for link in self.a:
                        if self.outcount > link["outcount"]:
                            self.out(
                                "   ["
                                + str(link["count"])
                                + "]: "
                                + urlparse.urljoin(self.baseurl, link["href"])
                            )
                            if "title" in link:
                                self.out(" (" + link["title"] + ")")
                            self.out("\n")
                        else:
                            newa.append(link)
    
                    # Don't need an extra line when nothing was done.
                    if self.a != newa:
                        self.out("\n")
    
                    self.a = newa
    
                if self.abbr_list and force == "end":
                    for abbr, definition in self.abbr_list.items():
                        self.out("  *[" + abbr + "]: " + definition + "\n")
    
                self.p_p = 0
                self.out(data)
                self.outcount += 1
    
        def handle_data(self, data, entity_char=False):
            if not data:
                # Data may be empty for some HTML entities. For example,
                # LEFT-TO-RIGHT MARK.
                return
    
            if self.stressed:
                data = data.strip()
                self.stressed = False
                self.preceding_stressed = True
            elif self.preceding_stressed:
                if (
                    re.match(r"[^\s.!?]", data[0])
                    and not hn(self.current_tag)
                    and self.current_tag not in ["a", "code", "pre"]
                ):
                    # should match a letter or common punctuation
                    data = " " + data
                self.preceding_stressed = False
    
            if self.style:
                self.style_def.update(dumb_css_parser(data))
    
            if self.maybe_automatic_link is not None:
                href = self.maybe_automatic_link
                if (
                    href == data
                    and self.absolute_url_matcher.match(href)
                    and self.use_automatic_links
                ):
                    self.o("<" + data + ">")
                    self.empty_link = False
                    return
                else:
                    self.o("[")
                    self.maybe_automatic_link = None
                    self.empty_link = False
    
            if not self.code and not self.pre and not entity_char:
                data = escape_md_section(data, snob=self.escape_snob)
            self.preceding_data = data
            self.o(data, puredata=True)
    
        def charref(self, name):
            if name[0] in ["x", "X"]:
                c = int(name[1:], 16)
            else:
                c = int(name)
    
            if not self.unicode_snob and c in unifiable_n:
                return unifiable_n[c]
            else:
                try:
                    return chr(c)
                except ValueError:  # invalid unicode
                    return ""
    
        def entityref(self, c):
            if not self.unicode_snob and c in config.UNIFIABLE:
                return config.UNIFIABLE[c]
            try:
                ch = html.entities.html5[c + ";"]
            except KeyError:
                return "&" + c + ";"
            return config.UNIFIABLE[c] if c == "nbsp" else ch
    
        def google_nest_count(self, style):
            """
            Calculate the nesting count of google doc lists
    
            :type style: dict
    
            :rtype: int
            """
            nest_count = 0
            if "margin-left" in style:
                nest_count = int(style["margin-left"][:-2]) // self.google_list_indent
    
            return nest_count
    
        def optwrap(self, text):
            """
            Wrap all paragraphs in the provided text.
    
            :type text: str
    
            :rtype: str
            """
            if not self.body_width:
                return text
    
            result = ""
            newlines = 0
            # I cannot think of a better solution for now.
            # To avoid the non-wrap behaviour for entire paras
            # because of the presence of a link in it
            if not self.wrap_links:
                self.inline_links = False
            for para in text.split("\n"):
                if len(para) > 0:
                    if not skipwrap(para, self.wrap_links, self.wrap_list_items):
                        indent = ""
                        if para.startswith("  " + self.ul_item_mark):
                            # list item continuation: add a double indent to the
                            # new lines
                            indent = "    "
                        elif para.startswith("> "):
                            # blockquote continuation: add the greater than symbol
                            # to the new lines
                            indent = "> "
                        wrapped = wrap(
                            para,
                            self.body_width,
                            break_long_words=False,
                            subsequent_indent=indent,
                        )
                        result += "\n".join(wrapped)
                        if para.endswith("  "):
                            result += "  \n"
                            newlines = 1
                        elif indent:
                            result += "\n"
                            newlines = 1
                        else:
                            result += "\n\n"
                            newlines = 2
                    else:
                        # Warning for the tempted!!!
                        # Be aware that obvious replacement of this with
                        # line.isspace()
                        # DOES NOT work! Explanations are welcome.
                        if not config.RE_SPACE.match(para):
                            result += para + "\n"
                            newlines = 1
                else:
                    if newlines < 2:
                        result += "\n"
                        newlines += 1
            return result
    
    
    def html2text(html, baseurl="", bodywidth=None):
        if bodywidth is None:
            bodywidth = config.BODY_WIDTH
        h = HTML2Text(baseurl=baseurl, bodywidth=bodywidth)
    
        return h.handle(html)
    PKAge[wsK''__main__.pynu[PKAge[t= $ $bcli.pynu[PKAge[S,(($utils.pynu[PKAge[II$B__pycache__/cli.cpython-36.opt-1.pycnu[PKAge[F	@@#TZ__pycache__/__init__.cpython-36.pycnu[PKAge[F	@@)*__pycache__/__init__.cpython-36.opt-1.pycnu[PKAge[AN	N	!__pycache__/config.cpython-36.pycnu[PKAge[	x&__pycache__/utils.cpython-36.opt-1.pycnu[PKAge[	x __pycache__/utils.cpython-36.pycnu[PKAge[#iG#__pycache__/__main__.cpython-36.pycnu[PKAge[#iG)__pycache__/__main__.cpython-36.opt-1.pycnu[PKAge[II __pycache__/cli.cpython-36.pycnu[PKAge[AN	N	'G8__pycache__/config.cpython-36.opt-1.pycnu[PKAge[zt	Aconfig.pynu[PKAge[{"ݸ&Q__init__.pynu[PKUe