feat(ST2.EditorPackages): bump up all packages

- Refresh PackageCache with latest versions of everything
This commit is contained in:
Iristyle
2013-09-16 22:29:05 -04:00
parent 951be33c9e
commit 5ed4214a22
180 changed files with 9360 additions and 1211 deletions

View File

@@ -0,0 +1,26 @@
Changes in Markdown Preview
===========================
## 1.0.3
* The `messages.json` should OK for this time.
## 1.0.2
* Fixes messages.json and changelog versions.
## 1.0.1
* Removed markdown2 parser for its not well maintained and buggy.
* Make Python Markdown parser as default.
* Split the preview commands for *Python Markdown* parser and *Github Flavored Markdown* parser.
* Add markdown file build support, build parser are config via the origin `"parser"` settings.
* Add this changlog file for both developpers and users.
* Add messages.json which make display of `README.md` and `CHANGES.md`
* Try use `Markdown Extended.tmLanguage` for cheat sheet if you installed `Markdown Extended`.
## 1.0.0
* Support for ST3.
* Added Python Markdown parser.
* CSS search first in markdown file directory and the the build-in.

View File

@@ -1,28 +1,61 @@
[
{
"caption": "Markdown Preview: preview in Browser",
"caption": "Markdown Preview: Python Markdown: Preview in Browser",
"command": "markdown_preview",
"args": {
"target": "browser"
"target": "browser",
"parser": "markdown"
}
},
{
"caption": "Markdown Preview: export HTML in Sublime Text",
"caption": "Markdown Preview: Python Markdown: Export HTML in Sublime Text",
"command": "markdown_preview",
"args": {
"target": "sublime"
"target": "sublime",
"parser": "markdown"
}
},
{
"caption": "Markdown Preview: copy to clipboard",
"caption": "Markdown Preview: Python Markdown: Copy to Clipboard",
"command": "markdown_preview",
"args": {
"target": "clipboard"
"target": "clipboard",
"parser": "markdown"
}
},
{
"caption": "Markdown Preview: Github Flavored Markdown: Preview in Browser",
"command": "markdown_preview",
"args": {
"target": "browser",
"parser": "github"
}
},
{
"caption": "Markdown Preview: open Markdown Cheat sheet",
"caption": "Markdown Preview: Github Flavored Markdown: Export HTML in Sublime Text",
"command": "markdown_preview",
"args": {
"target": "sublime",
"parser": "github"
}
},
{
"caption": "Markdown Preview: Github Flavored Markdown: Copy to Clipboard",
"command": "markdown_preview",
"args": {
"target": "clipboard",
"parser": "github"
}
},
{
"caption": "Markdown Preview: Open Markdown Cheat sheet",
"command": "markdown_cheatsheet",
"args": {}
}
]

View File

@@ -0,0 +1,12 @@
{
"target": "markdown_build",
"selector": "text.html.markdown",
"variants": [
{
"target": "markdown_build_github",
"name": "Build with Github API"
}
]
}

View File

@@ -1,31 +1,130 @@
# -*- encoding: UTF-8 -*-
import sublime
import sublime_plugin
import desktop
import tempfile
import markdown2
import os
import sys
import traceback
import tempfile
import re
import json
import urllib2
import time
import traceback
settings = sublime.load_settings('MarkdownPreview.sublime-settings')
if sublime.version() >= '3000':
from . import desktop
from . import markdown2
from . import markdown
from .helper import INSTALLED_DIRECTORY
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
def Request(url, data, headers):
''' Adapter for urllib2 used in ST2 '''
import urllib.request
return urllib.request.Request(url, data=data, headers=headers, method='POST')
else: # ST2
import desktop
import markdown2
import markdown
from helper import INSTALLED_DIRECTORY
from urllib2 import Request, urlopen, HTTPError, URLError
_CANNOT_CONVERT = u'cannot convert markdown'
def getTempMarkdownPreviewPath(view):
''' return a permanent full path of the temp markdown preview file '''
settings = sublime.load_settings('MarkdownPreview.sublime-settings')
tmp_filename = '%s.html' % view.id()
tmp_fullpath = os.path.join(tempfile.gettempdir(), tmp_filename)
if settings.get('path_tempfile'):
tmp_fullpath = os.path.join(settings.get('path_tempfile'), tmp_filename)
else:
tmp_fullpath = os.path.join(tempfile.gettempdir(), tmp_filename)
return tmp_fullpath
def save_utf8(filename, text):
v = sublime.version()
if v >= '3000':
f = open(filename, 'w', encoding='utf-8')
f.write(text)
f.close()
else: # 2.x
f = open(filename, 'w')
f.write(text.encode('utf-8'))
f.close()
def load_utf8(filename):
v = sublime.version()
if v >= '3000':
return open(filename, 'r', encoding='utf-8').read()
else: # 2.x
return open(filename, 'r').read().decode('utf-8')
def load_resource(name):
''' return file contents for files within the package root folder '''
v = sublime.version()
if v >= '3000':
filename = '/'.join(['Packages', INSTALLED_DIRECTORY, name])
try:
return sublime.load_resource(filename)
except:
print("Error while load_resource('%s')" % filename)
traceback.print_exc()
return ''
else: # 2.x
filename = os.path.join(sublime.packages_path(), INSTALLED_DIRECTORY, name)
if not os.path.isfile(filename):
print('Error while lookup resources file: %s', name)
return ''
try:
return open(filename, 'r').read().decode('utf-8')
except:
print("Error while load_resource('%s')" % filename)
traceback.print_exc()
return ''
def exists_resource(resource_file_path):
if sublime.version() >= '3000':
try:
sublime.load_resource(resource_file_path)
return True
except:
return False
else:
filename = os.path.join(os.path.dirname(sublime.packages_path()), resource_file_path)
return os.path.isfile(filename)
def new_scratch_view(window, text):
''' create a new scratch view and paste text content
return the new view
'''
new_view = window.new_file()
new_view.set_scratch(True)
if sublime.version() >= '3000':
new_view.run_command('append', {
'characters': text,
})
else: # 2.x
new_edit = new_view.begin_edit()
new_view.insert(new_edit, 0, text)
new_view.end_edit(new_edit)
return new_view
class MarkdownPreviewListener(sublime_plugin.EventListener):
''' auto update the output html if markdown file has already been converted once '''
def on_post_save(self, view):
if view.file_name().endswith(tuple(settings.get('markdown_filetypes', (".md", ".markdown", ".mdown")))):
settings = sublime.load_settings('MarkdownPreview.sublime-settings')
filetypes = settings.get('markdown_filetypes')
if filetypes and view.file_name().endswith(tuple(filetypes)):
temp_file = getTempMarkdownPreviewPath(view)
if os.path.isfile(temp_file):
# reexec markdown conversion
@@ -36,57 +135,107 @@ class MarkdownPreviewListener(sublime_plugin.EventListener):
class MarkdownCheatsheetCommand(sublime_plugin.TextCommand):
''' open our markdown cheat sheet in ST2 '''
def run(self, edit):
cheatsheet = os.path.join(sublime.packages_path(), 'Markdown Preview', 'sample.md')
self.view.window().open_file(cheatsheet)
lines = '\n'.join(load_resource('sample.md').splitlines())
view = new_scratch_view(self.view.window(), lines)
view.set_name("Markdown Cheatsheet")
# Set syntax file
syntax_files = ["Packages/Markdown Extended/Syntaxes/Markdown Extended.tmLanguage", "Packages/Markdown/Markdown.tmLanguage"]
for file in syntax_files:
if exists_resource(file):
view.set_syntax_file(file)
break # Done if any syntax is set.
sublime.status_message('Markdown cheat sheet opened')
class MarkdownPreviewCommand(sublime_plugin.TextCommand):
''' preview file contents with python-markdown and your web browser '''
def getCSS(self):
''' return the correct CSS file based on parser and settings '''
config_parser = settings.get('parser')
config_css = settings.get('css')
class MarkdownCompiler():
''' Do the markdown converting '''
styles = ''
if config_css and config_css != 'default':
styles += u"<link href='%s' rel='stylesheet' type='text/css'>" % config_css
else:
css_filename = 'markdown.css'
if config_parser and config_parser == 'github':
css_filename = 'github.css'
# path via package manager
css_path = os.path.join(sublime.packages_path(), 'Markdown Preview', css_filename)
if not os.path.isfile(css_path):
# path via git repo
css_path = os.path.join(sublime.packages_path(), 'sublimetext-markdown-preview', css_filename)
if not os.path.isfile(css_path):
sublime.error_message('markdown.css file not found!')
raise Exception("markdown.css file not found!")
styles += u"<style>%s</style>" % open(css_path, 'r').read().decode('utf-8')
def get_search_path_css(self):
css_name = self.settings.get('css', 'default')
if os.path.isabs(css_name):
return u"<link href='%s' rel='stylesheet' type='text/css'>" % css_name
if settings.get('allow_css_overrides'):
if css_name == 'default':
css_name = 'github.css' if self.settings.get('parser', 'default') == 'github' else 'markdown.css'
# Try the local folder for css file.
mdfile = self.view.file_name()
if mdfile is not None:
css_path = os.path.join(os.path.dirname(mdfile), css_name)
if os.path.isfile(css_path):
return u"<style>%s</style>" % load_utf8(css_path)
# Try the build-in css files.
return u"<style>%s</style>" % load_resource(css_name)
def get_override_css(self):
''' handls allow_css_overrides setting. '''
if self.settings.get('allow_css_overrides'):
filename = self.view.file_name()
filetypes = settings.get('markdown_filetypes')
filetypes = self.settings.get('markdown_filetypes')
if filename and filetypes:
for filetype in filetypes:
if filename.endswith(filetype):
css_filename = filename.rpartition(filetype)[0] + '.css'
if (os.path.isfile(css_filename)):
styles += u"<style>%s</style>" % open(css_filename, 'r').read().decode('utf-8')
return u"<style>%s</style>" % load_utf8(css_filename)
return ''
return styles
def get_stylesheet(self):
''' return the correct CSS file based on parser and settings '''
return self.get_search_path_css() + self.get_override_css()
def get_contents(self, region):
def get_javascript(self):
js_files = self.settings.get('js')
scripts = ''
if js_files is not None:
# Ensure string values become a list.
if isinstance(js_files, str) or isinstance(js_files, unicode):
js_files = [js_files]
# Only load scripts if we have a list.
if isinstance(js_files, list):
for js_file in js_files:
if os.path.isabs(js_file):
# Load the script inline to avoid cross-origin.
scripts += u"<script>%s</script>" % load_utf8(js_file)
else:
scripts += u"<script type='text/javascript' src='%s'></script>" % js_file
return scripts
def get_mathjax(self):
''' return the MathJax script if enabled '''
if self.settings.get('enable_mathjax') is True:
return load_resource('mathjax.html')
return ''
def get_highlight(self):
''' return the Highlight.js and css if enabled '''
highlight = ''
if self.settings.get('enable_highlight') is True and self.settings.get('parser') == 'default':
highlight += "<style>%s</style>" % load_resource('highlight.css')
highlight += "<script>%s</script>" % load_resource('highlight.js')
highlight += "<script>hljs.initHighlightingOnLoad();</script>"
return highlight
def get_contents(self, wholefile=False):
''' Get contents or selection from view and optionally strip the YAML front matter '''
region = sublime.Region(0, self.view.size())
contents = self.view.substr(region)
# use selection if any
selection = self.view.substr(self.view.sel()[0])
if selection.strip() != '':
contents = selection
if settings.get('strip_yaml_front_matter') and contents.startswith('---'):
if not wholefile:
# use selection if any
selection = self.view.substr(self.view.sel()[0])
if selection.strip() != '':
contents = selection
if self.settings.get('strip_yaml_front_matter') and contents.startswith('---'):
title = ''
title_match = re.search('(?:title:)(.+)', contents, flags=re.IGNORECASE)
if title_match:
@@ -110,39 +259,62 @@ class MarkdownPreviewCommand(sublime_plugin.TextCommand):
html = RE_SOURCES.sub(tag_fix, html)
return html
def convert_markdown(self, markdown):
''' convert input markdown to HTML, with github or builtin parser '''
config_parser = settings.get('parser')
github_oauth_token = settings.get('github_oauth_token')
def get_config_extensions(self, default_extensions):
config_extensions = self.settings.get('enabled_extensions')
if not config_extensions or config_extensions == 'default':
return default_extensions
if 'default' in config_extensions:
config_extensions.remove( 'default' )
config_extensions.extend( default_extensions )
return config_extensions
def convert_markdown(self, markdown_text, parser):
''' convert input markdown to HTML, with github or builtin parser '''
markdown_html = _CANNOT_CONVERT
if parser == 'github':
github_oauth_token = self.settings.get('github_oauth_token')
markdown_html = u'cannot convert markdown'
if config_parser and config_parser == 'github':
# use the github API
sublime.status_message('converting markdown with github API...')
try:
github_mode = settings.get('github_mode', 'gfm')
data = {"text": markdown, "mode": github_mode}
json_data = json.dumps(data)
github_mode = self.settings.get('github_mode', 'gfm')
data = {
"text": markdown_text,
"mode": github_mode
}
headers = {
'Content-Type': 'application/json'
}
if github_oauth_token:
headers['Authorization'] = "token %s" % github_oauth_token
data = json.dumps(data).encode('utf-8')
url = "https://api.github.com/markdown"
sublime.status_message(url)
request = urllib2.Request(url, json_data, {'Content-Type': 'application/json'})
if github_oauth_token:
request.add_header('Authorization', "token %s" % github_oauth_token)
markdown_html = urllib2.urlopen(request).read().decode('utf-8')
except urllib2.HTTPError, e:
request = Request(url, data, headers)
markdown_html = urlopen(request).read().decode('utf-8')
except HTTPError:
e = sys.exc_info()[1]
if e.code == 401:
sublime.error_message('github API auth failed. Please check your OAuth token.')
else:
sublime.error_message('github API responded in an unfashion way :/')
except urllib2.URLError:
except URLError:
sublime.error_message('cannot use github API to convert markdown. SSL is not included in your Python installation')
except:
e = sys.exc_info()[1]
print(e)
traceback.print_exc()
sublime.error_message('cannot use github API to convert markdown. Please check your settings.')
else:
sublime.status_message('converted markdown with github API successfully')
else:
elif parser == 'markdown2':
# convert the markdown
markdown_html = markdown2.markdown(markdown, extras=['footnotes', 'toc', 'fenced-code-blocks', 'cuddled-lists'])
enabled_extras = set(self.get_config_extensions(['footnotes', 'toc', 'fenced-code-blocks', 'cuddled-lists']))
if self.settings.get("enable_mathjax") is True or self.settings.get("enable_highlight") is True:
enabled_extras.add('code-friendly')
markdown_html = markdown2.markdown(markdown_text, extras=list(enabled_extras))
toc_html = markdown_html.toc_html
if toc_html:
toc_markers = ['[toc]', '[TOC]', '<!--TOC-->']
@@ -151,41 +323,62 @@ class MarkdownPreviewCommand(sublime_plugin.TextCommand):
# postprocess the html from internal parser
markdown_html = self.postprocessor(markdown_html)
else:
sublime.status_message('converting markdown with Python markdown...')
config_extensions = self.get_config_extensions(['extra', 'toc'])
markdown_html = markdown.markdown(markdown_text, extensions=config_extensions)
markdown_html = self.postprocessor(markdown_html)
return markdown_html
def run(self, edit, target='browser'):
region = sublime.Region(0, self.view.size())
encoding = self.view.encoding()
if encoding == 'Undefined':
encoding = 'utf-8'
elif encoding == 'Western (Windows 1252)':
encoding = 'windows-1252'
elif encoding == 'UTF-8 with BOM':
encoding = 'utf-8'
def get_title(self):
title = self.view.name()
if not title:
fn = self.view.file_name()
title = 'untitled' if not fn else os.path.splitext(os.path.basename(fn))[0]
return '<title>%s</title>' % title
contents = self.get_contents(region)
def run(self, view, parser, wholefile=False):
''' return full html and body html for view. '''
self.settings = sublime.load_settings('MarkdownPreview.sublime-settings')
self.view = view
contents = self.get_contents(wholefile)
body = self.convert_markdown(contents, parser)
markdown_html = self.convert_markdown(contents)
html = u'<!DOCTYPE html>'
html += '<html><head><meta charset="utf-8">'
html += self.get_stylesheet()
html += self.get_javascript()
html += self.get_highlight()
html += self.get_mathjax()
html += self.get_title()
html += '</head><body>'
html += body
html += '</body>'
html += '</html>'
return html, body
full_html = u'<!DOCTYPE html>'
full_html += '<html><head><meta charset="%s">' % encoding
full_html += self.getCSS()
full_html += '</head><body>'
full_html += markdown_html
full_html += '</body>'
full_html += '</html>'
compiler = MarkdownCompiler()
class MarkdownPreviewCommand(sublime_plugin.TextCommand):
def run(self, edit, parser='markdown', target='browser'):
settings = sublime.load_settings('MarkdownPreview.sublime-settings')
html, body = compiler.run(self.view, parser)
if target in ['disk', 'browser']:
# check if LiveReload ST2 extension installed and add its script to the resulting HTML
livereload_installed = ('LiveReload' in os.listdir(sublime.packages_path()))
# build the html
if livereload_installed:
full_html += '<script>document.write(\'<script src="http://\' + (location.host || \'localhost\').split(\':\')[0] + \':35729/livereload.js?snipver=1"></\' + \'script>\')</script>'
html += '<script>document.write(\'<script src="http://\' + (location.host || \'localhost\').split(\':\')[0] + \':35729/livereload.js?snipver=1"></\' + \'script>\')</script>'
# update output html file
tmp_fullpath = getTempMarkdownPreviewPath(self.view)
tmp_html = open(tmp_fullpath, 'w')
tmp_html.write(full_html.encode(encoding))
tmp_html.close()
save_utf8(tmp_fullpath, html)
# now opens in browser if needed
if target == 'browser':
config_browser = settings.get('browser')
@@ -205,13 +398,65 @@ class MarkdownPreviewCommand(sublime_plugin.TextCommand):
sublime.status_message('Markdown preview launched in default html viewer')
elif target == 'sublime':
# create a new buffer and paste the output HTML
new_view = self.view.window().new_file()
new_view.set_scratch(True)
new_edit = new_view.begin_edit()
new_view.insert(new_edit, 0, markdown_html)
new_view.end_edit(new_edit)
new_scratch_view(self.view.window(), body)
sublime.status_message('Markdown preview launched in sublime')
elif target == 'clipboard':
# clipboard copy the full HTML
sublime.set_clipboard(full_html)
sublime.set_clipboard(html)
sublime.status_message('Markdown export copied to clipboard')
class MarkdownBuildCommand(sublime_plugin.WindowCommand):
def init_panel(self):
if not hasattr(self, 'output_view'):
if sublime.version() >= '3000':
self.output_view = self.window.create_output_panel("markdown")
else:
self.output_view = self.window.get_output_panel("markdown")
def puts(self, message):
message = message + '\n'
if sublime.version() >= '3000':
self.output_view.run_command('append', {'characters': message, 'force': True, 'scroll_to_end': True})
else:
selection_was_at_end = (len(self.output_view.sel()) == 1
and self.output_view.sel()[0]
== sublime.Region(self.output_view.size()))
self.output_view.set_read_only(False)
edit = self.output_view.begin_edit()
self.output_view.insert(edit, self.output_view.size(), message)
if selection_was_at_end:
self.output_view.show(self.output_view.size())
self.output_view.end_edit(edit)
self.output_view.set_read_only(True)
def run(self):
view = self.window.active_view()
if not view:
return
start_time = time.time()
self.init_panel()
show_panel_on_build = sublime.load_settings("Preferences.sublime-settings").get("show_panel_on_build", True)
if show_panel_on_build:
self.window.run_command("show_panel", {"panel": "output.markdown"})
mdfile = view.file_name()
if mdfile is None:
self.puts("Can't build a unsaved markdown file.")
return
self.puts("Compiling %s..." % mdfile)
html, body = compiler.run(view, 'markdown', True)
htmlfile = os.path.splitext(mdfile)[0]+'.html'
self.puts(" ->"+htmlfile)
save_utf8(htmlfile, html)
elapsed = time.time() - start_time
if body == _CANNOT_CONVERT:
self.puts(_CANNOT_CONVERT)
self.puts("[Finished in %.1fs]" % (elapsed))
sublime.status_message("Build finished")

View File

@@ -11,14 +11,50 @@
"browser": "default",
/*
Sets the default parser for converting markdown to html.
Sets the parser used for build markdown to html.
NOTE: The parser setting is not for the preview commands now.
The preivew have sperated commands for each parser markdown.
Warning for github API : if you have a ST2 linux build, Python is not built with SSL o it may not work
default - Use the builtin python-markdown2 parser
default - The current default parser is python-markdown parser.
markdown - Use the buildin python-markdown parser
markdown2 - (Deprecated) Use the builtin python-markdown2 parser.
github - User github API to convert markdown, so you can use GitHub flavored Markdown, see http://github.github.com/github-flavored-markdown/
*/
"parser": "default",
/*
Enable or not mathjax support.
*/
"enable_mathjax": false,
/*
Enable or not highlight.js support for syntax highlighting.
*/
"enable_highlight": false,
/*
List of enabled extensions of the selected markdown parser.
You can get the full list of extensions at:
* The markdown2 parser, the `default`: https://github.com/trentm/python-markdown2/wiki/Extras
* The python markdown parser, the `markdown`: http://pythonhosted.org/Markdown/extensions/index.html
default - use the default set of extensions, see table latter.
[ "default", "def_list", ... ] - a list of extensions. Use "default" to include the default extensions.
Parser | "default" Values
------------|---------------------------
default | ["footnotes", "toc", "fenced-code-blocks", "cuddled-lists" ]
markdown | ["extra", "toc"]
github | extensions values are not used.
*/
"enabled_extensions": "default",
/*
Default mode for the github Markdon parser : markdown (documents) or gfm (comments)
see http://developer.github.com/v3/markdown/#render-an-arbitrary-markdown-document
@@ -46,11 +82,29 @@
*/
"allow_css_overrides": true,
/*
Sets the JavaScript files to embed in the HTML
Set an array of URLs or filepaths to JavaScript files. Absolute filepaths will be loaded
into the script tag; others will be set as the `src` attribute. The order of files in the
array is the order in which they are embedded.
*/
// "js": ["http://example.com/script.js", "/path/to/script.js"],
/*
Sets the supported filetypes for auto-reload on save
*/
"markdown_filetypes": [".md", ".markdown", ".mdown"],
/*
Sets a custom temporary folder for MarkdownPreview-generated html files. Useful if you're
using LiveReload and don't want to use the OS default. The directory must already exist.
Examples: /tmp/custom_folder (Linux/OSX)
C:/TEMP/MYNOTES (Windows - note it's forward slash, even on Windows)
*/
// "path_tempfile": "/tmp/my_notes",
/*
Strips the YAML front matter header and converts title to a heading
*/

View File

@@ -1,9 +1,9 @@
Sublime Text 2 MarkDown preview
===============================
Sublime Text 2/3 Markdown Preview
=================================
A simple ST2 plugin to help you preview your markdown files quickly in you web browser.
Preview your markdown files quickly in you web browser from sublime text 2/3.
You can use builtin [python-markdown2][0] parser (default) or use the [github markdown API][5] for the conversion (edit your settings to select it).
You can use builtin [python-markdown][10] parser or use the [github markdown API][5] for the conversion.
**NOTE:** If you choose the GitHub API for conversion (set parser: github in your settings), your code will be sent through https to github for live conversion. You'll have [Github flavored markdown][6], syntax highlighting and EMOJI support for free :heart: :octocat: :gift:. If you make more than 60 calls a day, be sure to set your GitHub API key in the settings :)
@@ -11,42 +11,81 @@ You can use builtin [python-markdown2][0] parser (default) or use the [github ma
## Features :
- Markdown conversion via builtin Markdown Parser ([python-markdown2][0]) or via Github API : just choose in your settings.
- Markdown preivew using the [Python-markdown][10] or the Github API just choose select the build commands.
- Build markdown file using Sublime Text build system. The build parser are config via the `"parser"` config.
- Browser preview auto reload on save if you have the [ST2 LiveReload plugin][7] installed.
- Builtin parser : Support TOC, footnotes markdown extensions
- CSS overriding if you need
- Builtin parser : supports `abbr`, `attr_list`, `def_list`, `fenced_code`, `footnotes`, `tables`, `smart_strong` and `toc` markdown extensions.
- CSS search path for local and build-in CSS files (always enabled) and/or CSS overriding if you need
- YAML support thanks to @tommi
- Clipboard selection and copy to clipboard thanks to @hexatrope
- MathJax support : \\(\frac{\pi}{2}\\) thanks to @bps10
## Installation :
- you should use [sublime package manager][3]
- use `cmd+shift+P` then `Package Control: Install Package`
- look for `Markdown Preview` and install it.
### Using [Package Control][3] (*Recommanded*)
For all Sublime Text 2/3 users we recommand installe via [Package Control][3].
1. [Install][11] Package Control if you haven't yet.
2. Use `cmd+shift+P` then `Package Control: Install Package`
3. Look for `Markdown Preview` and install it.
### Manual Install
1. Click the `Preferences > Browse Packages…` menu
2. Browse up a folder and then into the `Installed Packages/` folder
3. Download [zip package][12] rename it to `Markdown Preview.sublime-package` and copy it into the `Installed Packages/` directory
4. Restart Sublime Text
## Usage :
### To preivew :
- optionnaly select some of your markdown for conversion
- use `cmd+shift+P` then `Markdown Preview` to launch a preview
- use `cmd+shift+P` then `Markdown Preview` to show the follow commands:
- Markdown Preview: Python Markdown: Preview in Browser
- Markdown Preview: Python Markdown: Export HTML in Sublime Text
- Markdown Preview: Python Markdown: Copy to Clipboard
- Markdown Preview: Github Flavored Markdown: Preview in Browser
- Markdown Preview: Github Flavored Markdown: Export HTML in Sublime Text
- Markdown Preview: Github Flavored Markdown: Copy to Clipboard
- Markdown Preview: Open Markdown Cheat sheet
- or bind some key in your user key binding, using a line like this one:
`{ "keys": ["alt+m"], "command": "markdown_preview", "args": {"target": "browser"} },`
`{ "keys": ["alt+m"], "command": "markdown_preview", "args": {"target": "browser", "parser":"markdown"} },`
- once converted a first time, the output HTML will be updated on each file save (with LiveReload plugin)
## Uses :
### To build :
- [python-markdown2][0] for markdown parsing **OR** the GitHub markdown API.
- Just use `Ctrl+B` (Windows/Linux) or `cmd+B` (Mac) to build current file.
### To config :
Using Sublime Text menu: `Preferences`->`Package Settings`->`Markdown Preivew`
- `Settings - User` is where you change your serrings for Markdown Preview.
- `Settings - Default` is a good reference with detail description for each setting.
## Support :
- Any bugs about Markdown Preview please fell free to report [here][issue].
- And you are welcome to fork and submit pullrequests.
## Licence :
The code is available at github [https://github.com/revolunet/sublimetext-markdown-preview][2] under MIT licence : [http://revolunet.mit-license.org][4]
The code is available at github [project][home] under [MIT licence][4].
[0]: https://github.com/trentm/python-markdown2
[2]: https://github.com/revolunet/sublimetext-markdown-preview
[3]: http://wbond.net/sublime_packages/package_control
[home]: https://github.com/revolunet/sublimetext-markdown-preview
[3]: https://sublime.wbond.net/
[4]: http://revolunet.mit-license.org
[5]: http://developer.github.com/v3/markdown
[6]: http://github.github.com/github-flavored-markdown/
[7]: https://github.com/dz0ny/LiveReload-sublimetext2
[8]: https://github.com/revolunet/sublimetext-markdown-preview/issues/27#issuecomment-11772098
[9]: https://github.com/revolunet/sublimetext-markdown-preview/issues/78#issuecomment-15644727
[10]: https://github.com/waylan/Python-Markdown
[11]: https://sublime.wbond.net/installation
[12]: https://github.com/revolunet/sublimetext-markdown-preview/archive/master.zip
[issue]: https://github.com/revolunet/sublimetext-markdown-preview/issues

View File

@@ -116,7 +116,7 @@ except ImportError:
opener.wait()
return opener.poll() == 0
import commands
import subprocess
# Private functions.
@@ -136,7 +136,7 @@ def _is_xfce():
# XFCE detection involves testing the output of a program.
try:
return _readfrom(_get_x11_vars() + "xprop -root _DT_SAVE_MODE", shell=1).strip().endswith(' = "xfce4"')
return _readfrom(_get_x11_vars() + "xprop -root _DT_SAVE_MODE", shell=1).decode("utf-8").strip().endswith(' = "xfce4"')
except OSError:
return 0
@@ -144,7 +144,7 @@ def _is_x11():
"Return whether the X Window System is in use."
return os.environ.has_key("DISPLAY")
return "DISPLAY" in os.environ
# Introspection functions.
@@ -155,14 +155,14 @@ def get_desktop():
environment. If no environment could be detected, None is returned.
"""
if os.environ.has_key("KDE_FULL_SESSION") or \
os.environ.has_key("KDE_MULTIHEAD"):
if "KDE_FULL_SESSION" in os.environ or \
"KDE_MULTIHEAD" in os.environ:
return "KDE"
elif os.environ.has_key("GNOME_DESKTOP_SESSION_ID") or \
os.environ.has_key("GNOME_KEYRING_SOCKET"):
elif "GNOME_DESKTOP_SESSION_ID" in os.environ or \
"GNOME_KEYRING_SOCKET" in os.environ:
return "GNOME"
elif os.environ.has_key("MATE_DESKTOP_SESSION_ID") or \
os.environ.has_key("MATE_KEYRING_SOCKET"):
elif "MATE_DESKTOP_SESSION_ID" in os.environ or \
"MATE_KEYRING_SOCKET" in os.environ:
return "MATE"
elif sys.platform == "darwin":
return "Mac OS X"
@@ -222,7 +222,7 @@ def is_standard():
launching.
"""
return os.environ.has_key("DESKTOP_LAUNCH")
return "DESKTOP_LAUNCH" in os.environ
# Activity functions.
@@ -255,7 +255,7 @@ def open(url, desktop=None, wait=0):
desktop_in_use = use_desktop(desktop)
if desktop_in_use == "standard":
arg = "".join([os.environ["DESKTOP_LAUNCH"], commands.mkarg(url)])
arg = "".join([os.environ["DESKTOP_LAUNCH"], subprocess.mkarg(url)])
return _run(arg, 1, wait)
elif desktop_in_use == "Windows":
@@ -277,13 +277,16 @@ def open(url, desktop=None, wait=0):
elif desktop_in_use == "Mac OS X":
cmd = ["open", url]
elif desktop_in_use == "X11" and os.environ.has_key("BROWSER"):
elif desktop_in_use == "X11" and "BROWSER" in os.environ:
cmd = [os.environ["BROWSER"], url]
elif desktop_in_use == "X11":
cmd = ["xdg-open", url]
# Finish with an error where no suitable desktop was identified.
else:
raise OSError, "Desktop '%s' not supported (neither DESKTOP_LAUNCH nor os.startfile could be used)" % desktop_in_use
raise OSError("Desktop '%s' not supported (neither DESKTOP_LAUNCH nor os.startfile could be used)" % desktop_in_use)
return _run(cmd, 0, wait)

View File

@@ -280,7 +280,7 @@ class Dialogue:
try:
program = self.commands[desktop_in_use]
except KeyError:
raise OSError, "Desktop '%s' not supported (no known dialogue box command could be suggested)" % desktop_in_use
raise OSError("Desktop '%s' not supported (no known dialogue box command could be suggested)" % desktop_in_use)
# The handler is one of the functions communicating with the subprocess.
# Some handlers return boolean values, others strings.
@@ -475,7 +475,7 @@ class Pulldown(Menu):
"Xdialog" : (_readvalue(_readfrom),
["--stdout", "--combobox", String("text"), Integer("height"), Integer("width"), Strings("items")]),
}
item = unicode
item = str
number_of_titles = 2
class Input(Simple):
@@ -546,6 +546,6 @@ available = [Question, Warning, Message, Error, Menu, CheckList, RadioList, Inpu
# Supported desktop environments.
supported = Dialogue.commands.keys()
supported = list(Dialogue.commands.keys())
# vim: tabstop=4 expandtab shiftwidth=4

View File

@@ -141,7 +141,7 @@ class Window:
if match:
return self._get_handle_and_name(line[:match.start()].strip())
else:
raise OSError, "Window information from %r did not contain window details." % line
raise OSError("Window information from %r did not contain window details." % line)
def _descendants(self, s, fn):
handles = []
@@ -258,7 +258,7 @@ def root(desktop=None):
if _is_x11():
return Window(None)
else:
raise OSError, "Desktop '%s' not supported" % use_desktop(desktop)
raise OSError("Desktop '%s' not supported" % use_desktop(desktop))
def find(callable, desktop=None):

View File

@@ -0,0 +1,49 @@
import sublime, os, pkgutil
import os.path
import re
'''
INSTALLED_DIRECTORY - The install directory name for this plugin.
For ST3
As descriped in http://www.sublimetext.com/docs/3/packages.html this script locations is one of
Zipped:
"<executable_path>/Packages/Markdown Preview.sublime-package/Markdown Preview.MarkdownPreview"
"<data_path>/Installed Packages/Markdown Preview.sublime-package/Markdown Preview.MarkdownPreview"
Not Zipped:
"<data_path>/Packages/Markdown Preview/MarkdownPreview.py"
All passable path for ST3 are abspath (tested on windows)
For ST2
The __file__ will be '.\MarkdownPreview.pyc' that means when this script is loaded,
Sublime Text entered the directoy of this script. So we make use of os.path.abspath()
'''
try:
INSTALLED_DIRECTORY = re.search("[ \\\\/]Packages[\\\\/]([^\\\\/\.]+)", os.path.abspath(__file__)).group(1)
except:
print('Warning failed to detect the install directory, defaulting to: "Markdown Preview"')
INSTALLED_DIRECTORY = "Markdown Preview"
"""
Preload all python-markdown extensions (ST2 only)
"""
# By default sublime 2 only imports python packages from the top level of the plugin directory.
# Trying to import packages from subdirectories dynamically at a later time is NOT possible.
# This package automatically imports all packages from the extension directory
# so they are available when we need them.
if sublime.version() < '3000':
packages_path = sublime.packages_path()
extension_module = "markdown.extensions"
for _, package, _ in pkgutil.walk_packages("."):
if package.startswith(extension_module):
print ("Reloading plugin extension " + os.path.join(packages_path, INSTALLED_DIRECTORY, *package.split(".")) + ".py")
__import__(package)

View File

@@ -0,0 +1,129 @@
/*
github.com style (c) Vasily Polovnyov <vast@whiteants.net>
*/
pre code {
display: block; padding: 0.5em;
color: #333;
background: #f8f8ff
}
pre .comment,
pre .template_comment,
pre .diff .header,
pre .javadoc {
color: #998;
font-style: italic
}
pre .keyword,
pre .css .rule .keyword,
pre .winutils,
pre .javascript .title,
pre .nginx .title,
pre .subst,
pre .request,
pre .status {
color: #333;
font-weight: bold
}
pre .number,
pre .hexcolor,
pre .ruby .constant {
color: #099;
}
pre .string,
pre .tag .value,
pre .phpdoc,
pre .tex .formula {
color: #d14
}
pre .title,
pre .id,
pre .coffeescript .params,
pre .scss .preprocessor {
color: #900;
font-weight: bold
}
pre .javascript .title,
pre .lisp .title,
pre .clojure .title,
pre .subst {
font-weight: normal
}
pre .class .title,
pre .haskell .type,
pre .vhdl .literal,
pre .tex .command {
color: #458;
font-weight: bold
}
pre .tag,
pre .tag .title,
pre .rules .property,
pre .django .tag .keyword {
color: #000080;
font-weight: normal
}
pre .attribute,
pre .variable,
pre .lisp .body {
color: #008080
}
pre .regexp {
color: #009926
}
pre .class {
color: #458;
font-weight: bold
}
pre .symbol,
pre .ruby .symbol .string,
pre .lisp .keyword,
pre .tex .special,
pre .prompt {
color: #990073
}
pre .built_in,
pre .lisp .title,
pre .clojure .built_in {
color: #0086b3
}
pre .preprocessor,
pre .pi,
pre .doctype,
pre .shebang,
pre .cdata {
color: #999;
font-weight: bold
}
pre .deletion {
background: #fdd
}
pre .addition {
background: #dfd
}
pre .diff .change {
background: #0086b3
}
pre .chunk {
color: #aaa
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,437 @@
# Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""\
This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
"""
import textwrap
import _ssl # if we can't import it, let the error propagate
from _ssl import SSLError
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
from _ssl import RAND_status, RAND_egd, RAND_add
from _ssl import \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
from socket import socket, _fileobject, _delegate_methods
from socket import error as socket_error
from socket import getnameinfo as _getnameinfo
import base64 # for DER-to-PEM translation
import errno
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if certfile and not keyfile:
keyfile = certfile
# see if it's connected
try:
socket.getpeername(self)
except socket_error, e:
if e.errno != errno.ENOTCONN:
raise
# no, no connection yet
self._sslobj = None
else:
# yes, create the SSL object
self._sslobj = _ssl.sslwrap(self._sock, server_side,
keyfile, certfile,
cert_reqs, ssl_version, ca_certs)
if do_handshake_on_connect:
self.do_handshake()
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._sslobj.read(len)
except SSLError, x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
return self._sslobj.peer_certificate(binary_form)
def cipher(self):
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def send(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
v = self._sslobj.write(data)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return socket.send(self, data, flags)
def sendto(self, data, addr, flags=0):
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
else:
return socket.sendto(self, data, addr, flags)
def sendall(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return socket.recv(self, buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
tmp_buffer = self.read(nbytes)
v = len(tmp_buffer)
buffer[:v] = tmp_buffer
return v
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom(self, addr, buflen=1024, flags=0):
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, addr, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def pending(self):
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def unwrap(self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def shutdown(self, how):
self._sslobj = None
socket.shutdown(self, how)
def close(self):
if self._makefile_refs < 1:
self._sslobj = None
socket.close(self)
else:
self._makefile_refs -= 1
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
self._sslobj.do_handshake()
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._sslobj:
raise ValueError("attempt to connect already-connected SSLSocket!")
socket.connect(self, addr)
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.ca_certs)
if self.do_handshake_on_connect:
self.do_handshake()
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
return (SSLSocket(newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
ca_certs=self.ca_certs,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs),
addr)
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
# close=True so as to decrement the reference count when done with
# the file-like object.
return _fileobject(self, mode, bufsize, close=True)
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
return SSLSocket(sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
if hasattr(base64, 'standard_b64encode'):
# preferred because older API gets line-length wrong
f = base64.standard_b64encode(der_cert_bytes)
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
else:
return (PEM_HEADER + '\n' +
base64.encodestring(der_cert_bytes) +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodestring(d)
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if (ca_certs is not None):
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
s = wrap_socket(socket(), ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
if protocol_code == PROTOCOL_TLSv1:
return "TLSv1"
elif protocol_code == PROTOCOL_SSLv23:
return "SSLv23"
elif protocol_code == PROTOCOL_SSLv3:
return "SSLv3"
else:
return "<unknown>"
# a replacement for the old socket.ssl function
def sslwrap_simple(sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ssl_sock = _ssl.sslwrap(sock, 0, keyfile, certfile, CERT_NONE,
PROTOCOL_SSLv23, None)
try:
sock.getpeername()
except:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock

View File

@@ -0,0 +1,30 @@
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,30 @@
[Python-Markdown][]
===================
This is a Python implementation of John Gruber's [Markdown][].
It is almost completely compliant with the reference implementation,
though there are a few known issues. See [Features][] for information
on what exactly is supported and what is not. Additional features are
supported by the [Available Extensions][].
[Python-Markdown]: http://packages.python.org/Markdown/
[Markdown]: http://daringfireball.net/projects/markdown/
[Features]: http://packages.python.org/Markdown/index.html#Features
[Available Extensions]: http://packages.python.org/Markdown/extensions/index.html
Documentation
-------------
Installation and usage documentation is available in the `docs/` directory
of the distribution and on the project website at
<http://packages.python.org/Markdown/>.
Support
-------
You may ask for help and discuss various other issues on the [mailing list][] and report bugs on the [bug tracker][].
[mailing list]: http://lists.sourceforge.net/lists/listinfo/python-markdown-discuss
[bug tracker]: http://github.com/waylan/Python-Markdown/issues

View File

@@ -0,0 +1,448 @@
"""
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
html = markdown.markdown(your_text_string)
See <http://packages.python.org/Markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007-2013 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE for details).
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from .__version__ import version, version_info
import re
import codecs
import sys
import logging
from . import util
from .preprocessors import build_preprocessors
from .blockprocessors import build_block_parser
from .treeprocessors import build_treeprocessors
from .inlinepatterns import build_inlinepatterns
from .postprocessors import build_postprocessors
from .extensions import Extension
from .serializers import to_html_string, to_xhtml_string
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
logger = logging.getLogger('MARKDOWN')
class Markdown(object):
"""Convert Markdown to HTML."""
doc_tag = "div" # Element used to wrap document - later removed
option_defaults = {
'html_replacement_text' : '[HTML_REMOVED]',
'tab_length' : 4,
'enable_attributes' : True,
'smart_emphasis' : True,
'lazy_ol' : True,
}
output_formats = {
'html' : to_html_string,
'html4' : to_html_string,
'html5' : to_html_string,
'xhtml' : to_xhtml_string,
'xhtml1': to_xhtml_string,
'xhtml5': to_xhtml_string,
}
ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
'(', ')', '>', '#', '+', '-', '.', '!']
def __init__(self, *args, **kwargs):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension_configs: Configuration settingis for extensions.
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml5": Outputs XHTML style tags of HTML 5
* "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html5": Outputs HTML style tags of HTML 5
* "html": Outputs latest supported version of HTML (currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
* safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
* html_replacement_text: Text used when safe_mode is set to "replace".
* tab_length: Length of tabs in the source. Default: 4
* enable_attributes: Enable the conversion of attributes. Default: True
* smart_emphasis: Treat `_connected_words_` intelegently Default: True
* lazy_ol: Ignore number of first item of ordered lists. Default: True
"""
# For backward compatibility, loop through old positional args
pos = ['extensions', 'extension_configs', 'safe_mode', 'output_format']
c = 0
for arg in args:
if pos[c] not in kwargs:
kwargs[pos[c]] = arg
c += 1
if c == len(pos):
# ignore any additional args
break
# Loop through kwargs and assign defaults
for option, default in self.option_defaults.items():
setattr(self, option, kwargs.get(option, default))
self.safeMode = kwargs.get('safe_mode', False)
if self.safeMode and 'enable_attributes' not in kwargs:
# Disable attributes in safeMode when not explicitly set
self.enable_attributes = False
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.set_output_format(kwargs.get('output_format', 'xhtml1'))
self.registerExtensions(extensions=kwargs.get('extensions', []),
configs=kwargs.get('extension_configs', {}))
self.reset()
def build_parser(self):
""" Build the parser from the various parts. """
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword arguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, util.string_type):
ext = self.build_extension(ext, configs.get(ext, []))
if isinstance(ext, Extension):
ext.extendMarkdown(self, globals())
elif ext is not None:
raise TypeError(
'Extension "%s.%s" must be of type: "markdown.Extension"'
% (ext.__class__.__module__, ext.__class__.__name__))
return self
def build_extension(self, ext_name, configs = []):
"""Build extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
# Parse extensions config params (ignore the order)
configs = dict(configs)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
# Setup the module name
module_name = ext_name
if '.' not in ext_name:
import sublime
if sublime.version() >= '3000':
from ..helper import INSTALLED_DIRECTORY
module_name = '.'.join([INSTALLED_DIRECTORY, 'markdown.extensions', ext_name])
else:
module_name = '.'.join(['markdown.extensions', ext_name])
# Try loading the extension first from one place, then another
try: # New style (markdown.extensons.<extension>)
module = __import__(module_name, {}, {}, [module_name.rpartition('.')[0]])
except ImportError:
module_name_old_style = '_'.join(['mdx', ext_name])
try: # Old style (mdx_<extension>)
module = __import__(module_name_old_style)
except ImportError as e:
message = "Failed loading extension '%s' from '%s' or '%s'" \
% (ext_name, module_name, module_name_old_style)
e.args = (message,) + e.args[1:]
raise
# If the module is loaded successfully, we expect it to define a
# function called makeExtension()
try:
return module.makeExtension(configs.items())
except AttributeError as e:
message = e.args[0]
message = "Failed to initiate extension " \
"'%s': %s" % (ext_name, message)
e.args = (message,) + e.args[1:]
raise
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
return self
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self
def set_output_format(self, format):
""" Set the output format for the class instance. """
self.output_format = format.lower()
try:
self.serializer = self.output_formats[self.output_format]
except KeyError as e:
valid_formats = list(self.output_formats.keys())
valid_formats.sort()
message = 'Invalid Output Format: "%s". Use one of %s.' \
% (self.output_format,
'"' + '", "'.join(valid_formats) + '"')
e.args = (message,) + e.args[1:]
raise
return self
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string.
"""
# Fixup the source text
if not source.strip():
return '' # a blank unicode string
try:
source = util.text_type(source)
except UnicodeDecodeError as e:
# Customise error message while maintaining original trackback
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = output.index('<%s>'%self.doc_tag)+len(self.doc_tag)+2
end = output.rindex('</%s>'%self.doc_tag)
output = output[start:end].strip()
except ValueError:
if output.strip().endswith('<%s />'%self.doc_tag):
# We have an empty document
output = ''
else:
# We have a serious problem
raise ValueError('Markdown failed to strip top-level tags. Document=%r' % output.strip())
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file. The 'xmlcharrefreplace' error handler is
used when encoding the output.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: File object or path. Reads from stdin if `None`.
* output: File object or path. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
if input:
if isinstance(input, util.string_type):
input_file = codecs.open(input, mode="r", encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
if not isinstance(text, util.text_type):
text = text.decode(encoding)
text = text.lstrip('\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if output:
if isinstance(output, util.string_type):
output_file = codecs.open(output, "w",
encoding=encoding,
errors="xmlcharrefreplace")
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors="xmlcharrefreplace")
output_file.write(html)
# Don't close here. User may want to write more.
else:
# Encode manually and write bytes to stdout.
html = html.encode(encoding, "xmlcharrefreplace")
try:
# Write bytes directly to buffer (Python 3).
sys.stdout.buffer.write(html)
except AttributeError:
# Probably Python 2, which works with bytes by default.
sys.stdout.write(html)
return self
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text, *args, **kwargs):
"""Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* Any arguments accepted by the Markdown class.
Returns: An HTML document as a string.
"""
md = Markdown(*args, **kwargs)
return md.convert(text)
def markdownFromFile(*args, **kwargs):
"""Read markdown code from a file and write it to a file or a stream.
This is a shortcut function which initializes an instance of Markdown,
and calls the convertFile method rather than convert.
Keyword arguments:
* input: a file name or readable object.
* output: a file name or writable object.
* encoding: Encoding of input and output.
* Any arguments accepted by the Markdown class.
"""
# For backward compatibility loop through positional args
pos = ['input', 'output', 'extensions', 'encoding']
c = 0
for arg in args:
if pos[c] not in kwargs:
kwargs[pos[c]] = arg
c += 1
if c == len(pos):
break
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None),
kwargs.get('output', None),
kwargs.get('encoding', None))

View File

@@ -0,0 +1,87 @@
"""
COMMAND-LINE SPECIFIC STUFF
=============================================================================
"""
import markdown
import sys
import optparse
import logging
from logging import DEBUG, INFO, CRITICAL
logger = logging.getLogger('MARKDOWN')
def parse_options():
"""
Define and parse `optparse` options for command-line usage.
"""
usage = """%prog [options] [INPUTFILE]
(STDIN is assumed if no INPUTFILE is given)"""
desc = "A Python implementation of John Gruber's Markdown. " \
"http://packages.python.org/Markdown/"
ver = "%%prog %s" % markdown.version
parser = optparse.OptionParser(usage=usage, description=desc, version=ver)
parser.add_option("-f", "--file", dest="filename", default=None,
help="Write output to OUTPUT_FILE. Defaults to STDOUT.",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="Encoding for input and output files.",)
parser.add_option("-q", "--quiet", default = CRITICAL,
action="store_const", const=CRITICAL+10, dest="verbose",
help="Suppress all warnings.")
parser.add_option("-v", "--verbose",
action="store_const", const=INFO, dest="verbose",
help="Print all warnings.")
parser.add_option("-s", "--safe", dest="safe", default=False,
metavar="SAFE_MODE",
help="'replace', 'remove' or 'escape' HTML tags in input")
parser.add_option("-o", "--output_format", dest="output_format",
default='xhtml1', metavar="OUTPUT_FORMAT",
help="'xhtml1' (default), 'html4' or 'html5'.")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="Print debug messages.")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help = "Load extension EXTENSION.", metavar="EXTENSION")
parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol",
action='store_false', default=True,
help="Observe number of first item of ordered lists.")
(options, args) = parser.parse_args()
if len(args) == 0:
input_file = None
else:
input_file = args[0]
if not options.extensions:
options.extensions = []
return {'input': input_file,
'output': options.filename,
'safe_mode': options.safe,
'extensions': options.extensions,
'encoding': options.encoding,
'output_format': options.output_format,
'lazy_ol': options.lazy_ol}, options.verbose
def run():
"""Run Markdown from the command line."""
# Parse options and adjust logging level if necessary
options, logging_level = parse_options()
if not options: sys.exit(2)
logger.setLevel(logging_level)
logger.addHandler(logging.StreamHandler())
# Run
markdown.markdownFromFile(**options)
if __name__ == '__main__':
# Support running module as a commandline command.
# Python 2.5 & 2.6 do: `python -m markdown.__main__ [options] [args]`.
# Python 2.7 & 3.x do: `python -m markdown [options] [args]`.
run()

View File

@@ -0,0 +1,28 @@
#
# markdown/__version__.py
#
# version_info should conform to PEP 386
# (major, minor, micro, alpha/beta/rc/final, #)
# (1, 1, 2, 'alpha', 0) => "1.1.2.dev"
# (1, 2, 0, 'beta', 2) => "1.2b2"
version_info = (2, 3, 1, 'final', 0)
def _get_version():
" Returns a PEP 386-compliant version number from version_info. "
assert len(version_info) == 5
assert version_info[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if version_info[2] == 0 else 3
main = '.'.join(map(str, version_info[:parts]))
sub = ''
if version_info[3] == 'alpha' and version_info[4] == 0:
# TODO: maybe append some sort of git info here??
sub = '.dev'
elif version_info[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version_info[3]] + str(version_info[4])
return str(main + sub)
version = _get_version()

View File

@@ -0,0 +1,99 @@
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from . import odict
class State(list):
""" Track the current and nested state of the parser.
This utility class is used to track the state of the BlockParser and
support multiple levels if nesting. It's just a simple API wrapped around
a list. Each time a state is set, that state is appended to the end of the
list. Each time a state is reset, that state is removed from the end of
the list.
Therefore, each time a state is set for a nested block, that state must be
reset when we back out of that level of nesting or the state could be
corrupted.
While all the methods of a list object are available, only the three
defined below need be used.
"""
def set(self, state):
""" Set a new state. """
self.append(state)
def reset(self):
""" Step back one step in nested state. """
self.pop()
def isstate(self, state):
""" Test that top (current) level is of given state. """
if len(self):
return self[-1] == state
else:
return False
class BlockParser:
""" Parse Markdown blocks into an ElementTree object.
A wrapper class that stitches the various BlockProcessors together,
looping through them and creating an ElementTree object.
"""
def __init__(self, markdown):
self.blockprocessors = odict.OrderedDict()
self.state = State()
self.markdown = markdown
def parseDocument(self, lines):
""" Parse a markdown document into an ElementTree.
Given a list of lines, an ElementTree object (not just a parent Element)
is created and the root element is passed to the parser as the parent.
The ElementTree object is returned.
This should only be called on an entire document, not pieces.
"""
# Create a ElementTree from the lines
self.root = util.etree.Element(self.markdown.doc_tag)
self.parseChunk(self.root, '\n'.join(lines))
return util.etree.ElementTree(self.root)
def parseChunk(self, parent, text):
""" Parse a chunk of markdown text and attach to given etree node.
While the ``text`` argument is generally assumed to contain multiple
blocks which will be split on blank lines, it could contain only one
block. Generally, this method would be called by extensions when
block parsing is required.
The ``parent`` etree Element passed in is altered in place.
Nothing is returned.
"""
self.parseBlocks(parent, text.split('\n\n'))
def parseBlocks(self, parent, blocks):
""" Process blocks of markdown text and attach to given etree node.
Given a list of ``blocks``, each blockprocessor is stepped through
until there are no blocks left. While an extension could potentially
call this method directly, it's generally expected to be used internally.
This is a public method as an extension may need to add/alter additional
BlockProcessors which call this method to recursively parse a nested
block.
"""
while blocks:
for processor in self.blockprocessors.values():
if processor.test(parent, blocks[0]):
if processor.run(parent, blocks) is not False:
# run returns True or None
break

View File

@@ -0,0 +1,558 @@
"""
CORE MARKDOWN BLOCKPARSER
===========================================================================
This parser handles basic parsing of Markdown blocks. It doesn't concern itself
with inline elements such as **bold** or *italics*, but rather just catches
blocks, lists, quotes, etc.
The BlockParser is made up of a bunch of BlockProssors, each handling a
different type of block. Extensions may add/replace/remove BlockProcessors
as they need to alter how markdown blocks are parsed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import re
from . import util
from .blockparser import BlockParser
logger = logging.getLogger('MARKDOWN')
def build_block_parser(md_instance, **kwargs):
""" Build the default block parser used by Markdown. """
parser = BlockParser(md_instance)
parser.blockprocessors['empty'] = EmptyBlockProcessor(parser)
parser.blockprocessors['indent'] = ListIndentProcessor(parser)
parser.blockprocessors['code'] = CodeBlockProcessor(parser)
parser.blockprocessors['hashheader'] = HashHeaderProcessor(parser)
parser.blockprocessors['setextheader'] = SetextHeaderProcessor(parser)
parser.blockprocessors['hr'] = HRProcessor(parser)
parser.blockprocessors['olist'] = OListProcessor(parser)
parser.blockprocessors['ulist'] = UListProcessor(parser)
parser.blockprocessors['quote'] = BlockQuoteProcessor(parser)
parser.blockprocessors['paragraph'] = ParagraphProcessor(parser)
return parser
class BlockProcessor:
""" Base class for block processors.
Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own ``test`` and ``run``
methods. The ``test`` method should return True or False, to indicate
whether the current block should be processed by this processor. If the
test passes, the parser will call the processors ``run`` method.
"""
def __init__(self, parser):
self.parser = parser
self.tab_length = parser.markdown.tab_length
def lastChild(self, parent):
""" Return the last child of an etree element. """
if len(parent):
return parent[-1]
else:
return None
def detab(self, text):
""" Remove a tab from the front of each line of the given text. """
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' '*self.tab_length):
newtext.append(line[self.tab_length:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def looseDetab(self, text, level=1):
""" Remove a tab from front of lines but allowing dedented lines. """
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(' '*self.tab_length*level):
lines[i] = lines[i][self.tab_length*level:]
return '\n'.join(lines)
def test(self, parent, block):
""" Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the ``test`` method
on each to determine if the given block of text is of that type. This
method must return a boolean ``True`` or ``False``. The actual method of
testing is left to the needs of that particular block type. It could
be as simple as ``block.startswith(some_string)`` or a complex regular
expression. As the block type may be different depending on the parent
of the block (i.e. inside a list), the parent etree element is also
provided and may be used as part of the test.
Keywords:
* ``parent``: A etree element which will be the parent of the block.
* ``block``: A block of text from the source which has been split at
blank lines.
"""
pass
def run(self, parent, blocks):
""" Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's ``run`` method. This method
should parse the individual lines of the block and append them to
the etree.
Note that both the ``parent`` and ``etree`` keywords are pointers
to instances of the objects which should be edited in place. Each
processor must make changes to the existing objects as there is no
mechanism to return new/different objects to replace them.
This means that this method should be adding SubElements or adding text
to the parent, and should remove (``pop``) or add (``insert``) items to
the list of blocks.
Keywords:
* ``parent``: A etree element which is the parent of the current block.
* ``blocks``: A list of all remaining blocks of the document.
"""
pass
class ListIndentProcessor(BlockProcessor):
""" Process children of list items.
Example:
* a list item
process this part
or this part
"""
ITEM_TYPES = ['li']
LIST_TYPES = ['ul', 'ol']
def __init__(self, *args):
BlockProcessor.__init__(self, *args)
self.INDENT_RE = re.compile(r'^(([ ]{%s})+)'% self.tab_length)
def test(self, parent, block):
return block.startswith(' '*self.tab_length) and \
not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or \
(len(parent) and parent[-1] and \
(parent[-1].tag in self.LIST_TYPES)
)
)
def run(self, parent, blocks):
block = blocks.pop(0)
level, sibling = self.get_level(parent, block)
block = self.looseDetab(block, level)
self.parser.state.set('detabbed')
if parent.tag in self.ITEM_TYPES:
# It's possible that this parent has a 'ul' or 'ol' child list
# with a member. If that is the case, then that should be the
# parent. This is intended to catch the edge case of an indented
# list whose first member was parsed previous to this point
# see OListProcessor
if len(parent) and parent[-1].tag in self.LIST_TYPES:
self.parser.parseBlocks(parent[-1], [block])
else:
# The parent is already a li. Just parse the child block.
self.parser.parseBlocks(parent, [block])
elif sibling.tag in self.ITEM_TYPES:
# The sibling is a li. Use it as parent.
self.parser.parseBlocks(sibling, [block])
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
# The parent is a list (``ol`` or ``ul``) which has children.
# Assume the last child li is the parent of this block.
if sibling[-1].text:
# If the parent li has text, that text needs to be moved to a p
# The p must be 'inserted' at beginning of list in the event
# that other children already exist i.e.; a nested sublist.
p = util.etree.Element('p')
p.text = sibling[-1].text
sibling[-1].text = ''
sibling[-1].insert(0, p)
self.parser.parseChunk(sibling[-1], block)
else:
self.create_item(sibling, block)
self.parser.state.reset()
def create_item(self, parent, block):
""" Create a new li and parse the block with it as the parent. """
li = util.etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block])
def get_level(self, parent, block):
""" Get level of indent based on list level. """
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/self.tab_length
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tightlist - so we already are at correct parent.
level = 1
else:
# We're in a looselist - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of indent_level,
# we have a code block. So we stop here.
break
return level, parent
class CodeBlockProcessor(BlockProcessor):
""" Process code blocks. """
def test(self, parent, block):
return block.startswith(' '*self.tab_length)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
theRest = ''
if sibling and sibling.tag == "pre" and len(sibling) \
and sibling[0].tag == "code":
# The previous block was a code block. As blank lines do not start
# new code blocks, append this block to the previous, adding back
# linebreaks removed from the split into a list.
code = sibling[0]
block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n%s\n' % (code.text, block.rstrip()))
else:
# This is a new codeblock. Create the elements and insert text.
pre = util.etree.SubElement(parent, 'pre')
code = util.etree.SubElement(pre, 'code')
block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n' % block.rstrip())
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
class BlockQuoteProcessor(BlockProcessor):
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing forst.
self.parser.parseBlocks(parent, [before])
# Remove ``> `` from begining of each line.
block = '\n'.join([self.clean(line) for line in
block[m.start():].split('\n')])
sibling = self.lastChild(parent)
if sibling and sibling.tag == "blockquote":
# Previous block was a blockquote so set that as this blocks parent
quote = sibling
else:
# This is a new blockquote. Create a new parent element.
quote = util.etree.SubElement(parent, 'blockquote')
# Recursively parse block with blockquote as parent.
# change parser state so blockquotes embedded in lists use p tags
self.parser.state.set('blockquote')
self.parser.parseChunk(quote, block)
self.parser.state.reset()
def clean(self, line):
""" Remove ``>`` from beginning of a line. """
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line
class OListProcessor(BlockProcessor):
""" Process ordered list blocks. """
TAG = 'ol'
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
RE = re.compile(r'^[ ]{0,3}\d+\.[ ]+(.*)')
# Detect items on secondary lines. they can be of either list type.
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ]+(.*)')
# Detect indented (nested) items of either type
INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ]+.*')
# The integer (python string) with which the lists starts (default=1)
# Eg: If list is intialized as)
# 3. Item
# The ol tag will get starts="3" attribute
STARTSWITH = '1'
# List of allowed sibling tags.
SIBLING_TAGS = ['ol', 'ul']
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
# Check fr multiple items in one block.
items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent)
if sibling and sibling.tag in self.SIBLING_TAGS:
# Previous block was a list item, so set that as parent
lst = sibling
# make sure previous item is in a p- if the item has text, then it
# it isn't in a p
if lst[-1].text:
# since it's possible there are other children for this sibling,
# we can't just SubElement the p, we need to insert it as the
# first item
p = util.etree.Element('p')
p.text = lst[-1].text
lst[-1].text = ''
lst[-1].insert(0, p)
# if the last item has a tail, then the tail needs to be put in a p
# likely only when a header is not followed by a blank line
lch = self.lastChild(lst[-1])
if lch is not None and lch.tail:
p = util.etree.SubElement(lst[-1], 'p')
p.text = lch.tail.lstrip()
lch.tail = ''
# parse first block differently as it gets wrapped in a p.
li = util.etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
elif parent.tag in ['ol', 'ul']:
# this catches the edge case of a multi-item indented list whose
# first item is in a blank parent-list item:
# * * subitem1
# * subitem2
# see also ListIndentProcessor
lst = parent
else:
# This is a new list so create parent with appropriate tag.
lst = util.etree.SubElement(parent, self.TAG)
# Check if a custom start integer is set
if not self.parser.markdown.lazy_ol and self.STARTSWITH !='1':
lst.attrib['start'] = self.STARTSWITH
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the
# appropriate parent.
for item in items:
if item.startswith(' '*self.tab_length):
# Item is indented. Parse with last item as parent
self.parser.parseBlocks(lst[-1], [item])
else:
# New item. Create li and parse with it as parent
li = util.etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
self.parser.state.reset()
def get_items(self, block):
""" Break a block into list items. """
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new list item
# Check first item for the start index
if not items and self.TAG=='ol':
# Detect the integer value of first list item
INTEGER_RE = re.compile('(\d+)')
self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
# Append to the list
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*self.tab_length):
# Previous item was indented. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
return items
class UListProcessor(OListProcessor):
""" Process unordered list blocks. """
TAG = 'ul'
RE = re.compile(r'^[ ]{0,3}[*+-][ ]+(.*)')
class HashHeaderProcessor(BlockProcessor):
""" Process Hash Headers. """
# Detect a header at start of any line in block
RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = util.etree.SubElement(parent, 'h%d' % len(m.group('level')))
h.text = m.group('header').strip()
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
logger.warn("We've got a problem header: %r" % block)
class SetextHeaderProcessor(BlockProcessor):
""" Process Setext-style Headers. """
# Detect Setext-style header. Must be first 2 lines of block.
RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE)
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
# Determine level. ``=`` is 1 and ``-`` is 2.
if lines[1].startswith('='):
level = 1
else:
level = 2
h = util.etree.SubElement(parent, 'h%d' % level)
h.text = lines[0].strip()
if len(lines) > 2:
# Block contains additional lines. Add to master blocks for later.
blocks.insert(0, '\n'.join(lines[2:]))
class HRProcessor(BlockProcessor):
""" Process Horizontal Rules. """
RE = r'^[ ]{0,3}((-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,})[ ]*'
# Detect hr on any line of a block.
SEARCH_RE = re.compile(RE, re.MULTILINE)
def test(self, parent, block):
m = self.SEARCH_RE.search(block)
# No atomic grouping in python so we simulate it here for performance.
# The regex only matches what would be in the atomic group - the HR.
# Then check if we are at end of block or if next char is a newline.
if m and (m.end() == len(block) or block[m.end()] == '\n'):
# Save match object on class instance so we can use it later.
self.match = m
return True
return False
def run(self, parent, blocks):
block = blocks.pop(0)
# Check for lines in block before hr.
prelines = block[:self.match.start()].rstrip('\n')
if prelines:
# Recursively parse lines before hr so they get parsed first.
self.parser.parseBlocks(parent, [prelines])
# create hr
util.etree.SubElement(parent, 'hr')
# check for lines in block after hr.
postlines = block[self.match.end():].lstrip('\n')
if postlines:
# Add lines after hr to master blocks for later parsing.
blocks.insert(0, postlines)
class EmptyBlockProcessor(BlockProcessor):
""" Process blocks that are empty or start with an empty line. """
def test(self, parent, block):
return not block or block.startswith('\n')
def run(self, parent, blocks):
block = blocks.pop(0)
filler = '\n\n'
if block:
# Starts with empty line
# Only replace a single line.
filler = '\n'
# Save the rest for later.
theRest = block[1:]
if theRest:
# Add remaining lines to master blocks for later.
blocks.insert(0, theRest)
sibling = self.lastChild(parent)
if sibling and sibling.tag == 'pre' and len(sibling) and sibling[0].tag == 'code':
# Last block is a codeblock. Append to preserve whitespace.
sibling[0].text = util.AtomicString('%s%s' % (sibling[0].text, filler))
class ParagraphProcessor(BlockProcessor):
""" Process Paragraph blocks. """
def test(self, parent, block):
return True
def run(self, parent, blocks):
block = blocks.pop(0)
if block.strip():
# Not a blank block. Add to parent, otherwise throw it away.
if self.parser.state.isstate('list'):
# The parent is a tight-list.
#
# Check for any children. This will likely only happen in a
# tight-list when a header isn't followed by a blank line.
# For example:
#
# * # Header
# Line 2 of list item - not part of header.
sibling = self.lastChild(parent)
if sibling is not None:
# Insetrt after sibling.
if sibling.tail:
sibling.tail = '%s\n%s' % (sibling.tail, block)
else:
sibling.tail = '\n%s' % block
else:
# Append to parent.text
if parent.text:
parent.text = '%s\n%s' % (parent.text, block)
else:
parent.text = block.lstrip()
else:
# Create a regular paragraph
p = util.etree.SubElement(parent, 'p')
p.text = block.lstrip()

View File

@@ -0,0 +1,53 @@
"""
Extensions
-----------------------------------------------------------------------------
"""
from __future__ import unicode_literals
class Extension(object):
""" Base class for extensions to subclass. """
def __init__(self, configs = {}):
"""Create an instance of an Extention.
Keyword arguments:
* configs: A dict of configuration setting used by an Extension.
"""
self.config = configs
def getConfig(self, key, default=''):
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def getConfigs(self):
""" Return all configs settings as a dict. """
return dict([(key, self.getConfig(key)) for key in self.config.keys()])
def getConfigInfo(self):
""" Return all config descriptions as a list of tuples. """
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
""" Set a config setting for `key` with the given `value`. """
self.config[key][0] = value
def extendMarkdown(self, md, md_globals):
"""
Add the various proccesors and patterns to the Markdown Instance.
This method must be overriden by every extension.
Keyword arguments:
* md: The Markdown instance.
* md_globals: Global variables in the markdown module namespace.
"""
raise NotImplementedError('Extension "%s.%s" must define an "extendMarkdown"' \
'method.' % (self.__class__.__module__, self.__class__.__name__))

View File

@@ -0,0 +1,96 @@
'''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
Simple Usage:
>>> import markdown
>>> text = """
... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
...
... *[ABBR]: Abbreviation
... *[REF]: Abbreviation Reference
... """
>>> print markdown.markdown(text, ['abbr'])
<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>
Copyright 2007-2008
* [Waylan Limberg](http://achinghead.com/)
* [Seemant Kulleen](http://www.kulleen.org/)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
from ..inlinepatterns import Pattern
from ..util import etree
import re
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
super(AbbrPattern, self).__init__(pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = m.group('abbr')
abbr.set('title', self.title)
return abbr
def makeExtension(configs=None):
return AbbrExtension(configs=configs)

View File

@@ -0,0 +1,118 @@
"""
Admonition extension for Python-Markdown
========================================
Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
The syntax is (followed by an indented block with the contents):
!!! [type] [optional explicit title]
Where `type` is used as a CSS class name of the div. If not present, `title`
defaults to the capitalized `type`, so "note" -> "Note".
rST suggests the following `types`, but you're free to use whatever you want:
attention, caution, danger, error, hint, important, note, tip, warning
A simple example:
!!! note
This is the first line inside the box.
Outputs:
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>This is the first line inside the box</p>
</div>
You can also specify the title and CSS class of the admonition:
!!! custom "Did you know?"
Another line here.
Outputs:
<div class="admonition custom">
<p class="admonition-title">Did you know?</p>
<p>Another line here.</p>
</div>
[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions
By [Tiago Serafim](http://www.tiagoserafim.com/).
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import BlockProcessor
from ..util import etree
import re
class AdmonitionExtension(Extension):
""" Admonition extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add Admonition to Markdown instance. """
md.registerExtension(self)
md.parser.blockprocessors.add('admonition',
AdmonitionProcessor(md.parser),
'_begin')
class AdmonitionProcessor(BlockProcessor):
CLASSNAME = 'admonition'
CLASSNAME_TITLE = 'admonition-title'
RE = re.compile(r'(?:^|\n)!!!\ ?([\w\-]+)(?:\ "(.*?)")?')
def test(self, parent, block):
sibling = self.lastChild(parent)
return self.RE.search(block) or \
(block.startswith(' ' * self.tab_length) and sibling and \
sibling.get('class', '').find(self.CLASSNAME) != -1)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
m = self.RE.search(block)
if m:
block = block[m.end() + 1:] # removes the first line
block, theRest = self.detab(block)
if m:
klass, title = self.get_class_and_title(m)
div = etree.SubElement(parent, 'div')
div.set('class', '%s %s' % (self.CLASSNAME, klass))
if title:
p = etree.SubElement(div, 'p')
p.text = title
p.set('class', self.CLASSNAME_TITLE)
else:
div = sibling
self.parser.parseChunk(div, block)
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
def get_class_and_title(self, match):
klass, title = match.group(1).lower(), match.group(2)
if title is None:
# no title was provided, use the capitalized classname as title
# e.g.: `!!! note` will render `<p class="admonition-title">Note</p>`
title = klass.capitalize()
elif title == '':
# an explicit blank title should not be rendered
# e.g.: `!!! warning ""` will *not* render `p` with a title
title = None
return klass, title
def makeExtension(configs={}):
return AdmonitionExtension(configs=configs)

View File

@@ -0,0 +1,140 @@
"""
Attribute List Extension for Python-Markdown
============================================
Adds attribute list syntax. Inspired by
[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
feature of the same name.
Copyright 2011 [Waylan Limberg](http://achinghead.com/).
Contact: markdown@freewisdom.org
License: BSD (see ../LICENSE.md for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import isBlockLevel
import re
try:
Scanner = re.Scanner
except AttributeError:
# must be on Python 2.4
from sre import Scanner
def _handle_double_quote(s, t):
k, v = t.split('=')
return k, v.strip('"')
def _handle_single_quote(s, t):
k, v = t.split('=')
return k, v.strip("'")
def _handle_key_value(s, t):
return t.split('=')
def _handle_word(s, t):
if t.startswith('.'):
return '.', t[1:]
if t.startswith('#'):
return 'id', t[1:]
return t, t
_scanner = Scanner([
(r'[^ ]+=".*?"', _handle_double_quote),
(r"[^ ]+='.*?'", _handle_single_quote),
(r'[^ ]+=[^ ]*', _handle_key_value),
(r'[^ ]+', _handle_word),
(r' ', None)
])
def get_attrs(str):
""" Parse attribute list and return a list of attribute tuples. """
return _scanner.scan(str)[0]
def isheader(elem):
return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
class AttrListTreeprocessor(Treeprocessor):
BASE_RE = r'\{\:?([^\}]*)\}'
HEADER_RE = re.compile(r'[ ]*%s[ ]*$' % BASE_RE)
BLOCK_RE = re.compile(r'\n[ ]*%s[ ]*$' % BASE_RE)
INLINE_RE = re.compile(r'^%s' % BASE_RE)
NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff\u0370-\u037d'
r'\u037f-\u1fff\u200c-\u200d\u2070-\u218f\u2c00-\u2fef'
r'\u3001-\ud7ff\uf900-\ufdcf\ufdf0-\ufffd'
r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+')
def run(self, doc):
for elem in doc.getiterator():
if isBlockLevel(elem.tag):
# Block level: check for attrs on last line of text
RE = self.BLOCK_RE
if isheader(elem):
# header: check for attrs at end of line
RE = self.HEADER_RE
if len(elem) and elem[-1].tail:
# has children. Get from tail of last child
m = RE.search(elem[-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[-1].tail = elem[-1].tail[:m.start()]
if isheader(elem):
# clean up trailing #s
elem[-1].tail = elem[-1].tail.rstrip('#').rstrip()
elif elem.text:
# no children. Get from text.
m = RE.search(elem.text)
if m:
self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()]
if isheader(elem):
# clean up trailing #s
elem.text = elem.text.rstrip('#').rstrip()
else:
# inline: check for attrs at start of tail
if elem.tail:
m = self.INLINE_RE.match(elem.tail)
if m:
self.assign_attrs(elem, m.group(1))
elem.tail = elem.tail[m.end():]
def assign_attrs(self, elem, attrs):
""" Assign attrs to element. """
for k, v in get_attrs(attrs):
if k == '.':
# add to class
cls = elem.get('class')
if cls:
elem.set('class', '%s %s' % (cls, v))
else:
elem.set('class', v)
else:
# assign attr k with v
elem.set(self.sanitize_name(k), v)
def sanitize_name(self, name):
"""
Sanitize name as 'an XML Name, minus the ":"'.
See http://www.w3.org/TR/REC-xml-names/#NT-NCName
"""
return self.NAME_RE.sub('_', name)
class AttrListExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.treeprocessors.add('attr_list', AttrListTreeprocessor(md), '>prettify')
def makeExtension(configs={}):
return AttrListExtension(configs=configs)

View File

@@ -0,0 +1,240 @@
"""
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/code_hilite.html>
Contact: markdown@freewisdom.org
License: BSD (see ../LICENSE.md for details)
Dependencies:
* [Python 2.3+](http://python.org/)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments](http://pygments.org/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
import warnings
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer, TextLexer
from pygments.formatters import HtmlFormatter
pygments = True
except ImportError:
pygments = False
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite(object):
"""
Determine language of source code, and pass it into the pygments hilighter.
Basic Usage:
>>> code = CodeHilite(src = 'some text')
>>> html = code.hilite()
* src: Source string or any object with a .readline attribute.
* linenums: (Boolean) Set line numbering to 'on' (True), 'off' (False) or 'auto'(None).
Set to 'auto' by default.
* guess_lang: (Boolean) Turn language auto-detection 'on' or 'off' (on by default).
* css_class: Set class name of wrapper div ('codehilite' by default).
Low Level Usage:
>>> code = CodeHilite()
>>> code.src = 'some text' # String or anything with a .readline attr.
>>> code.linenos = True # True or False; Turns line numbering on or of.
>>> html = code.hilite()
"""
def __init__(self, src=None, linenums=None, guess_lang=True,
css_class="codehilite", lang=None, style='default',
noclasses=False, tab_length=4):
self.src = src
self.lang = lang
self.linenums = linenums
self.guess_lang = guess_lang
self.css_class = css_class
self.style = style
self.noclasses = noclasses
self.tab_length = tab_length
def hilite(self):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
if self.lang is None:
self._getLang()
if pygments:
try:
lexer = get_lexer_by_name(self.lang)
except ValueError:
try:
if self.guess_lang:
lexer = guess_lexer(self.src)
else:
lexer = TextLexer()
except ValueError:
lexer = TextLexer()
formatter = HtmlFormatter(linenos=self.linenums,
cssclass=self.css_class,
style=self.style,
noclasses=self.noclasses)
return highlight(self.src, lexer, formatter)
else:
# just escape and build markup usable by JS highlighting libs
txt = self.src.replace('&', '&amp;')
txt = txt.replace('<', '&lt;')
txt = txt.replace('>', '&gt;')
txt = txt.replace('"', '&quot;')
classes = []
if self.lang:
classes.append('language-%s' % self.lang)
if self.linenums:
classes.append('linenums')
class_str = ''
if classes:
class_str = ' class="%s"' % ' '.join(classes)
return '<pre class="%s"><code%s>%s</code></pre>\n'% \
(self.css_class, class_str, txt)
def _getLang(self):
"""
Determines language of a code block from shebang line and whether said
line should be removed or left in place. If the sheband line contains a
path (even a single /) then it is assumed to be a real shebang line and
left alone. However, if no path is given (e.i.: #!python or :::python)
then it is assumed to be a mock shebang for language identifitation of a
code fragment and removed from the code block prior to processing for
code highlighting. When a mock shebang (e.i: #!python) is found, line
numbering is turned on. When colons are found in place of a shebang
(e.i.: :::python), line numbering is left in the current state - off
by default.
"""
import re
#split text into lines
lines = self.src.split("\n")
#pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons.
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w+-]*) # The language
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError:
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if self.linenums is None and m.group('shebang'):
# Overridable and Shebang exists - use line numbers
self.linenums = True
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(Treeprocessor):
""" Hilight source code in code blocks. """
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.getiterator('pre')
for block in blocks:
children = block.getchildren()
if len(children) == 1 and children[0].tag == 'code':
code = CodeHilite(children[0].text,
linenums=self.config['linenums'],
guess_lang=self.config['guess_lang'],
css_class=self.config['css_class'],
style=self.config['pygments_style'],
noclasses=self.config['noclasses'],
tab_length=self.markdown.tab_length)
placeholder = self.markdown.htmlStash.store(code.hilite(),
safe=True)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(Extension):
""" Add source code hilighting to markdown codeblocks. """
def __init__(self, configs):
# define default configs
self.config = {
'linenums': [None, "Use lines numbers. True=yes, False=no, None=auto"],
'force_linenos' : [False, "Depreciated! Use 'linenums' instead. Force line numbers - Default: False"],
'guess_lang' : [True, "Automatic language detection - Default: True"],
'css_class' : ["codehilite",
"Set class name for wrapper <div> - Default: codehilite"],
'pygments_style' : ['default', 'Pygments HTML Formatter Style (Colorscheme) - Default: default'],
'noclasses': [False, 'Use inline styles instead of CSS classes - Default false']
}
# Override defaults with user settings
for key, value in configs:
# convert strings to booleans
if value == 'True': value = True
if value == 'False': value = False
if value == 'None': value = None
if key == 'force_linenos':
warnings.warn('The "force_linenos" config setting'
' to the CodeHilite extension is deprecrecated.'
' Use "linenums" instead.', PendingDeprecationWarning)
if value:
# Carry 'force_linenos' over to new 'linenos'.
self.setConfig('linenums', True)
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
md.treeprocessors.add("hilite", hiliter, "<inline")
md.registerExtension(self)
def makeExtension(configs={}):
return CodeHiliteExtension(configs=configs)

View File

@@ -0,0 +1,118 @@
"""
Definition List Extension for Python-Markdown
=============================================
Added parsing of Definition Lists to Python-Markdown.
A simple example:
Apple
: Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.
: An american computer company.
Orange
: The fruit of an evergreen tree of the genus Citrus.
Copyright 2008 - [Waylan Limberg](http://achinghead.com)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import BlockProcessor, ListIndentProcessor
from ..util import etree
import re
class DefListProcessor(BlockProcessor):
""" Process Definition Lists. """
RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
NO_INDENT_RE = re.compile(r'^[ ]{0,3}[^ :]')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
raw_block = blocks.pop(0)
m = self.RE.search(raw_block)
terms = [l.strip() for l in raw_block[:m.start()].split('\n') if l.strip()]
block = raw_block[m.end():]
no_indent = self.NO_INDENT_RE.match(block)
if no_indent:
d, theRest = (block, None)
else:
d, theRest = self.detab(block)
if d:
d = '%s\n%s' % (m.group(2), d)
else:
d = m.group(2)
sibling = self.lastChild(parent)
if not terms and sibling is None:
# This is not a definition item. Most likely a paragraph that
# starts with a colon at the begining of a document or list.
blocks.insert(0, raw_block)
return False
if not terms and sibling.tag == 'p':
# The previous paragraph contains the terms
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
# Aquire new sibling
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling and sibling.tag == 'dl':
# This is another item on an existing list
dl = sibling
if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
# This is a new list
dl = etree.SubElement(parent, 'dl')
# Add terms
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
# Add definition
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(ListIndentProcessor):
""" Process indented children of definition list items. """
ITEM_TYPES = ['dd']
LIST_TYPES = ['dl']
def create_item(self, parent, block):
""" Create a new dd and parse the block with it as the parent. """
dd = etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.add('defindent',
DefListIndentProcessor(md.parser),
'>indent')
md.parser.blockprocessors.add('deflist',
DefListProcessor(md.parser),
'>ulist')
def makeExtension(configs={}):
return DefListExtension(configs=configs)

View File

@@ -0,0 +1,54 @@
"""
Python-Markdown Extra Extension
===============================
A compilation of various Python-Markdown extensions that imitates
[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
Note that each of the individual extensions still need to be available
on your PYTHONPATH. This extension simply wraps them all up as a
convenience so that only one extension needs to be listed when
initiating Markdown. See the documentation for each individual
extension for specifics about that extension.
In the event that one or more of the supported extensions are not
available for import, Markdown will issue a warning and simply continue
without that extension.
There may be additional extensions that are distributed with
Python-Markdown that are not included here in Extra. Those extensions
are not part of PHP Markdown Extra, and therefore, not part of
Python-Markdown Extra. If you really would like Extra to include
additional extensions, we suggest creating your own clone of Extra
under a differant name. You could also edit the `extensions` global
variable defined below, but be aware that such changes may be lost
when you upgrade to any future version of Python-Markdown.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
extensions = ['smart_strong',
'fenced_code',
'footnotes',
'attr_list',
'def_list',
'tables',
'abbr',
]
class ExtraExtension(Extension):
""" Add various extensions to Markdown class."""
def extendMarkdown(self, md, md_globals):
""" Register extension instances. """
md.registerExtensions(extensions, self.config)
if not md.safeMode:
# Turn on processing of markdown text within raw html
md.preprocessors['html_block'].markdown_in_raw = True
def makeExtension(configs={}):
return ExtraExtension(configs=dict(configs))

View File

@@ -0,0 +1,161 @@
"""
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
>>> import markdown
>>> text = '''
... A paragraph before a fenced code block:
...
... ~~~
... Fenced code block
... ~~~
... '''
>>> html = markdown.markdown(text, extensions=['fenced_code'])
>>> print html
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Works with safe_mode also (we check this because we are using the HtmlStash):
>>> print markdown.markdown(text, extensions=['fenced_code'], safe_mode='replace')
<p>A paragraph before a fenced code block:</p>
<pre><code>Fenced code block
</code></pre>
Include tilde's in a code block and wrap with blank lines:
>>> text = '''
... ~~~~~~~~
...
... ~~~~
... ~~~~~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code>
~~~~
</code></pre>
Language tags:
>>> text = '''
... ~~~~{.python}
... # Some python code
... ~~~~'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code class="python"># Some python code
</code></pre>
Optionally backticks instead of tildes as per how github's code block markdown is identified:
>>> text = '''
... `````
... # Arbitrary code
... ~~~~~ # these tildes will not close the block
... `````'''
>>> print markdown.markdown(text, extensions=['fenced_code'])
<pre><code># Arbitrary code
~~~~~ # these tildes will not close the block
</code></pre>
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/fenced_code_blocks.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
* [Pygments (optional)](http://pygments.org)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
from .codehilite import CodeHilite, CodeHiliteExtension
import re
# Global vars
FENCED_BLOCK_RE = re.compile( \
r'(?P<fence>^(?:~{3,}|`{3,}))[ ]*(\{?\.?(?P<lang>[a-zA-Z0-9_+-]*)\}?)?[ ]*\n(?P<code>.*?)(?<=\n)(?P=fence)[ ]*$',
re.MULTILINE|re.DOTALL
)
CODE_WRAP = '<pre><code%s>%s</code></pre>'
LANG_TAG = ' class="%s"'
class FencedCodeExtension(Extension):
def extendMarkdown(self, md, md_globals):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md),
">normalize_whitespace")
class FencedBlockPreprocessor(Preprocessor):
def __init__(self, md):
super(FencedBlockPreprocessor, self).__init__(md)
self.checked_for_codehilite = False
self.codehilite_conf = {}
def run(self, lines):
""" Match and store Fenced Code Blocks in the HtmlStash. """
# Check for code hilite extension
if not self.checked_for_codehilite:
for ext in self.markdown.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.config
break
self.checked_for_codehilite = True
text = "\n".join(lines)
while 1:
m = FENCED_BLOCK_RE.search(text)
if m:
lang = ''
if m.group('lang'):
lang = LANG_TAG % m.group('lang')
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlite the code
if self.codehilite_conf:
highliter = CodeHilite(m.group('code'),
linenums=self.codehilite_conf['linenums'][0],
guess_lang=self.codehilite_conf['guess_lang'][0],
css_class=self.codehilite_conf['css_class'][0],
style=self.codehilite_conf['pygments_style'][0],
lang=(m.group('lang') or None),
noclasses=self.codehilite_conf['noclasses'][0])
code = highliter.hilite()
else:
code = CODE_WRAP % (lang, self._escape(m.group('code')))
placeholder = self.markdown.htmlStash.store(code, safe=True)
text = '%s\n%s\n%s'% (text[:m.start()], placeholder, text[m.end():])
else:
break
return text.split("\n")
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&amp;')
txt = txt.replace('<', '&lt;')
txt = txt.replace('>', '&gt;')
txt = txt.replace('"', '&quot;')
return txt
def makeExtension(configs=None):
return FencedCodeExtension(configs=configs)

View File

@@ -0,0 +1,313 @@
"""
========================= FOOTNOTES =================================
This section adds footnote handling to markdown. It can be used as
an example for extending python-markdown with relatively complex
functionality. While in this case the extension is included inside
the module itself, it could just as easily be added from outside the
module. Not that all markdown classes above are ignorant about
footnotes. All footnote functionality is provided separately and
then added to the markdown instance at the run time.
Footnote functionality is attached by calling extendMarkdown()
method of FootnoteExtension. The method also registers the
extension to allow it's state to be reset by a call to reset()
method.
Example:
Footnotes[^1] have a label[^label] and a definition[^!DEF].
[^1]: This is a footnote
[^label]: A footnote on "label"
[^!DEF]: The footnote for definition
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
from ..inlinepatterns import Pattern
from ..treeprocessors import Treeprocessor
from ..postprocessors import Postprocessor
from ..util import etree, text_type
from ..odict import OrderedDict
import re
FN_BACKLINK_TEXT = "zz1337820767766393qq"
NBSP_PLACEHOLDER = "qq3936677670287331zz"
DEF_RE = re.compile(r'[ ]{0,3}\[\^([^\]]*)\]:\s*(.*)')
TABBED_RE = re.compile(r'((\t)|( ))(.*)')
class FootnoteExtension(Extension):
""" Footnote Extension. """
def __init__ (self, configs):
""" Setup configs. """
self.config = {'PLACE_MARKER':
["///Footnotes Go Here///",
"The text string that marks where the footnotes go"],
'UNIQUE_IDS':
[False,
"Avoid name collisions across "
"multiple calls to reset()."],
"BACKLINK_TEXT":
["&#8617;",
"The text string that links from the footnote to the reader's place."]
}
for key, value in configs:
self.config[key][0] = value
# In multiple invocations, emit links that don't get tangled.
self.unique_prefix = 0
self.reset()
def extendMarkdown(self, md, md_globals):
""" Add pieces to Markdown. """
md.registerExtension(self)
self.parser = md.parser
self.md = md
self.sep = ':'
if self.md.output_format in ['html5', 'xhtml5']:
self.sep = '-'
# Insert a preprocessor before ReferencePreprocessor
md.preprocessors.add("footnote", FootnotePreprocessor(self),
"<reference")
# Insert an inline pattern before ImageReferencePattern
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
md.inlinePatterns.add("footnote", FootnotePattern(FOOTNOTE_RE, self),
"<reference")
# Insert a tree-processor that would actually add the footnote div
# This must be before all other treeprocessors (i.e., inline and
# codehilite) so they can run on the the contents of the div.
md.treeprocessors.add("footnote", FootnoteTreeprocessor(self),
"_begin")
# Insert a postprocessor after amp_substitute oricessor
md.postprocessors.add("footnote", FootnotePostprocessor(self),
">amp_substitute")
def reset(self):
""" Clear the footnotes on reset, and prepare for a distinct document. """
self.footnotes = OrderedDict()
self.unique_prefix += 1
def findFootnotesPlaceholder(self, root):
""" Return ElementTree Element that contains Footnote placeholder. """
def finder(element):
for child in element:
if child.text:
if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, True
if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, False
finder(child)
return None
res = finder(root)
return res
def setFootnote(self, id, text):
""" Store a footnote for later retrieval. """
self.footnotes[id] = text
def makeFootnoteId(self, id):
""" Return footnote link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fn%s%d-%s' % (self.sep, self.unique_prefix, id)
else:
return 'fn%s%s' % (self.sep, id)
def makeFootnoteRefId(self, id):
""" Return footnote back-link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fnref%s%d-%s' % (self.sep, self.unique_prefix, id)
else:
return 'fnref%s%s' % (self.sep, id)
def makeFootnotesDiv(self, root):
""" Return div of footnotes as et Element. """
if not list(self.footnotes.keys()):
return None
div = etree.Element("div")
div.set('class', 'footnote')
etree.SubElement(div, "hr")
ol = etree.SubElement(div, "ol")
for id in self.footnotes.keys():
li = etree.SubElement(ol, "li")
li.set("id", self.makeFootnoteId(id))
self.parser.parseChunk(li, self.footnotes[id])
backlink = etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id))
if self.md.output_format not in ['html5', 'xhtml5']:
backlink.set("rev", "footnote") # Invalid in HTML5
backlink.set("class", "footnote-backref")
backlink.set("title", "Jump back to footnote %d in the text" % \
(self.footnotes.index(id)+1))
backlink.text = FN_BACKLINK_TEXT
if li.getchildren():
node = li[-1]
if node.tag == "p":
node.text = node.text + NBSP_PLACEHOLDER
node.append(backlink)
else:
p = etree.SubElement(li, "p")
p.append(backlink)
return div
class FootnotePreprocessor(Preprocessor):
""" Find all footnote references and store for later use. """
def __init__ (self, footnotes):
self.footnotes = footnotes
def run(self, lines):
"""
Loop through lines and find, set, and remove footnote definitions.
Keywords:
* lines: A list of lines of text
Return: A list of lines of text with footnote definitions removed.
"""
newlines = []
i = 0
while True:
m = DEF_RE.match(lines[i])
if m:
fn, _i = self.detectTabbed(lines[i+1:])
fn.insert(0, m.group(2))
i += _i-1 # skip past footnote
self.footnotes.setFootnote(m.group(1), "\n".join(fn))
else:
newlines.append(lines[i])
if len(lines) > i+1:
i += 1
else:
break
return newlines
def detectTabbed(self, lines):
""" Find indented text and remove indent before further proccesing.
Keyword arguments:
* lines: an array of strings
Returns: a list of post processed items and the index of last line.
"""
items = []
blank_line = False # have we encountered a blank line yet?
i = 0 # to keep track of where we are
def detab(line):
match = TABBED_RE.match(line)
if match:
return match.group(4)
for line in lines:
if line.strip(): # Non-blank line
detabbed_line = detab(line)
if detabbed_line:
items.append(detabbed_line)
i += 1
continue
elif not blank_line and not DEF_RE.match(line):
# not tabbed but still part of first par.
items.append(line)
i += 1
continue
else:
return items, i+1
else: # Blank line: _maybe_ we are done.
blank_line = True
i += 1 # advance
# Find the next non-blank line
for j in range(i, len(lines)):
if lines[j].strip():
next_line = lines[j]; break
else:
break # There is no more text; we are done.
# Check if the next non-blank line is tabbed
if detab(next_line): # Yes, more work to do.
items.append("")
continue
else:
break # No, we are done.
else:
i += 1
return items, i
class FootnotePattern(Pattern):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, footnotes):
super(FootnotePattern, self).__init__(pattern)
self.footnotes = footnotes
def handleMatch(self, m):
id = m.group(2)
if id in self.footnotes.footnotes.keys():
sup = etree.Element("sup")
a = etree.SubElement(sup, "a")
sup.set('id', self.footnotes.makeFootnoteRefId(id))
a.set('href', '#' + self.footnotes.makeFootnoteId(id))
if self.footnotes.md.output_format not in ['html5', 'xhtml5']:
a.set('rel', 'footnote') # invalid in HTML5
a.set('class', 'footnote-ref')
a.text = text_type(self.footnotes.footnotes.index(id) + 1)
return sup
else:
return None
class FootnoteTreeprocessor(Treeprocessor):
""" Build and append footnote div to end of document. """
def __init__ (self, footnotes):
self.footnotes = footnotes
def run(self, root):
footnotesDiv = self.footnotes.makeFootnotesDiv(root)
if footnotesDiv:
result = self.footnotes.findFootnotesPlaceholder(root)
if result:
child, parent, isText = result
ind = parent.getchildren().index(child)
if isText:
parent.remove(child)
parent.insert(ind, footnotesDiv)
else:
parent.insert(ind + 1, footnotesDiv)
child.tail = None
else:
root.append(footnotesDiv)
class FootnotePostprocessor(Postprocessor):
""" Replace placeholders with html entities. """
def __init__(self, footnotes):
self.footnotes = footnotes
def run(self, text):
text = text.replace(FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT"))
return text.replace(NBSP_PLACEHOLDER, "&#160;")
def makeExtension(configs=[]):
""" Return an instance of the FootnoteExtension """
return FootnoteExtension(configs=configs)

View File

@@ -0,0 +1,207 @@
"""
HeaderID Extension for Python-Markdown
======================================
Auto-generate id attributes for HTML headers.
Basic usage:
>>> import markdown
>>> text = "# Some Header #"
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="some-header">Some Header</h1>
All header IDs are unique:
>>> text = '''
... #Header
... #Header
... #Header'''
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="header">Header</h1>
<h1 id="header_1">Header</h1>
<h1 id="header_2">Header</h1>
To fit within a html template's hierarchy, set the header base level:
>>> text = '''
... #Some Header
... ## Next Level'''
>>> md = markdown.markdown(text, ['headerid(level=3)'])
>>> print md
<h3 id="some-header">Some Header</h3>
<h4 id="next-level">Next Level</h4>
Works with inline markup.
>>> text = '#Some *Header* with [markup](http://example.com).'
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="some-header-with-markup">Some <em>Header</em> with <a href="http://example.com">markup</a>.</h1>
Turn off auto generated IDs:
>>> text = '''
... # Some Header
... # Another Header'''
>>> md = markdown.markdown(text, ['headerid(forceid=False)'])
>>> print md
<h1>Some Header</h1>
<h1>Another Header</h1>
Use with MetaData extension:
>>> text = '''header_level: 2
... header_forceid: Off
...
... # A Header'''
>>> md = markdown.markdown(text, ['headerid', 'meta'])
>>> print md
<h2>A Header</h2>
Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/header_id.html>
Contact: markdown@freewisdom.org
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
import re
import logging
try:
import unicodedata
has_unicodedata = True
except:
has_unicodedata = False
DISALLOWED_RE = re.compile(r'[^a-z0-9]')
logger = logging.getLogger('MARKDOWN')
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
def slugify(value, separator):
""" Slugify a string, to make it URL friendly. """
if has_unicodedata:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
return re.sub('[%s\s]+' % separator, separator, value)
else:
return separator.join(filter(None, re.split(DISALLOWED_RE, value.lower())))
def unique(id, ids):
""" Ensure id is unique in set of ids. Append '_1', '_2'... if not """
while id in ids or not id:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
ids.add(id)
return id
def itertext(elem):
""" Loop through all children and return text only.
Reimplements method of same name added to ElementTree in Python 2.7
"""
if elem.text:
yield elem.text
for e in elem:
for s in itertext(e):
yield s
if e.tail:
yield e.tail
class HeaderIdTreeprocessor(Treeprocessor):
""" Assign IDs to headers. """
IDs = set()
def run(self, doc):
start_level, force_id = self._get_meta()
slugify = self.config['slugify']
sep = self.config['separator']
for elem in doc.getiterator():
if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
if force_id:
if "id" in elem.attrib:
id = elem.get('id')
else:
id = slugify(''.join(itertext(elem)), sep)
elem.set('id', unique(id, self.IDs))
if start_level:
level = int(elem.tag[-1]) + start_level
if level > 6:
level = 6
elem.tag = 'h%d' % level
def _get_meta(self):
""" Return meta data suported by this ext as a tuple """
level = int(self.config['level']) - 1
force = self._str2bool(self.config['forceid'])
if hasattr(self.md, 'Meta'):
if 'header_level' in self.md.Meta:
level = int(self.md.Meta['header_level'][0]) - 1
if 'header_forceid' in self.md.Meta:
force = self._str2bool(self.md.Meta['header_forceid'][0])
return level, force
def _str2bool(self, s, default=False):
""" Convert a string to a booleen value. """
s = str(s)
if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
return False
elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
return True
return default
class HeaderIdExtension(Extension):
def __init__(self, configs):
# set defaults
self.config = {
'level' : ['1', 'Base level for headers.'],
'forceid' : ['True', 'Force all headers to have an id.'],
'separator' : ['-', 'Word separator.'],
'slugify' : [slugify, 'Callable to generate anchors'],
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = HeaderIdTreeprocessor()
self.processor.md = md
self.processor.config = self.getConfigs()
if 'attr_list' in md.treeprocessors.keys():
# insert after attr_list treeprocessor
md.treeprocessors.add('headerid', self.processor, '>attr_list')
else:
# insert after 'prettify' treeprocessor.
md.treeprocessors.add('headerid', self.processor, '>prettify')
def reset(self):
self.processor.IDs = set()
def makeExtension(configs=None):
return HeaderIdExtension(configs=configs)

View File

@@ -0,0 +1,93 @@
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
Basic Usage:
>>> import markdown
>>> text = '''Title: A Test Doc.
... Author: Waylan Limberg
... John Doe
... Blank_Data:
...
... The body. This is paragraph one.
... '''
>>> md = markdown.Markdown(['meta'])
>>> print md.convert(text)
<p>The body. This is paragraph one.</p>
>>> print md.Meta
{u'blank_data': [u''], u'author': [u'Waylan Limberg', u'John Doe'], u'title': [u'A Test Doc.']}
Make sure text without Meta Data still works (markdown < 1.6b returns a <p>).
>>> text = ' Some Code - not extra lines of meta data.'
>>> md = markdown.Markdown(['meta'])
>>> print md.convert(text)
<pre><code>Some Code - not extra lines of meta data.
</code></pre>
>>> md.Meta
{}
Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
Project website: <http://packages.python.org/Markdown/meta_data.html>
Contact: markdown@freewisdom.org
License: BSD (see ../LICENSE.md for details)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
import re
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
class MetaExtension (Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta", MetaPreprocessor(md), "_begin")
class MetaPreprocessor(Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
while 1:
line = lines.pop(0)
if line.strip() == '':
break # blank line - done
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(configs={}):
return MetaExtension(configs=configs)

View File

@@ -0,0 +1,38 @@
"""
NL2BR Extension
===============
A Python-Markdown extension to treat newlines as hard breaks; like
GitHub-flavored Markdown does.
Usage:
>>> import markdown
>>> print markdown.markdown('line 1\\nline 2', extensions=['nl2br'])
<p>line 1<br />
line 2</p>
Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import SubstituteTagPattern
BR_RE = r'\n'
class Nl2BrExtension(Extension):
def extendMarkdown(self, md, md_globals):
br_tag = SubstituteTagPattern(BR_RE, 'br')
md.inlinePatterns.add('nl', br_tag, '_end')
def makeExtension(configs=None):
return Nl2BrExtension(configs)

View File

@@ -0,0 +1,51 @@
"""
Sane List Extension for Python-Markdown
=======================================
Modify the behavior of Lists in Python-Markdown t act in a sane manor.
In standard Markdown sytex, the following would constitute a single
ordered list. However, with this extension, the output would include
two lists, the first an ordered list and the second and unordered list.
1. ordered
2. list
* unordered
* list
Copyright 2011 - [Waylan Limberg](http://achinghead.com)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import OListProcessor, UListProcessor
import re
class SaneOListProcessor(OListProcessor):
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.))[ ]+(.*)')
SIBLING_TAGS = ['ol']
class SaneUListProcessor(UListProcessor):
CHILD_RE = re.compile(r'^[ ]{0,3}(([*+-]))[ ]+(.*)')
SIBLING_TAGS = ['ul']
class SaneListExtension(Extension):
""" Add sane lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Override existing Processors. """
md.parser.blockprocessors['olist'] = SaneOListProcessor(md.parser)
md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
def makeExtension(configs={}):
return SaneListExtension(configs=configs)

View File

@@ -0,0 +1,42 @@
'''
Smart_Strong Extension for Python-Markdown
==========================================
This extention adds smarter handling of double underscores within words.
Simple Usage:
>>> import markdown
>>> print markdown.markdown('Text with double__underscore__words.',
... extensions=['smart_strong'])
<p>Text with double__underscore__words.</p>
>>> print markdown.markdown('__Strong__ still works.',
... extensions=['smart_strong'])
<p><strong>Strong</strong> still works.</p>
>>> print markdown.markdown('__this__works__too__.',
... extensions=['smart_strong'])
<p><strong>this__works__too</strong>.</p>
Copyright 2011
[Waylan Limberg](http://achinghead.com)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import SimpleTagPattern
SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\2(?!\w)'
STRONG_RE = r'(\*{2})(.+?)\2'
class SmartEmphasisExtension(Extension):
""" Add smart_emphasis extension to Markdown class."""
def extendMarkdown(self, md, md_globals):
""" Modify inline patterns. """
md.inlinePatterns['strong'] = SimpleTagPattern(STRONG_RE, 'strong')
md.inlinePatterns.add('strong2', SimpleTagPattern(SMART_STRONG_RE, 'strong'), '>emphasis2')
def makeExtension(configs={}):
return SmartEmphasisExtension(configs=dict(configs))

View File

@@ -0,0 +1,100 @@
"""
Tables Extension for Python-Markdown
====================================
Added parsing of tables to Python-Markdown.
A simple example:
First Header | Second Header
------------- | -------------
Content Cell | Content Cell
Content Cell | Content Cell
Copyright 2009 - [Waylan Limberg](http://achinghead.com)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..blockprocessors import BlockProcessor
from ..util import etree
class TableProcessor(BlockProcessor):
""" Process Tables. """
def test(self, parent, block):
rows = block.split('\n')
return (len(rows) > 2 and '|' in rows[0] and
'|' in rows[1] and '-' in rows[1] and
rows[1].strip()[0] in ['|', ':', '-'])
def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header.startswith('|'):
border = True
# Get alignment of columns
align = []
for c in self._split_row(seperator, border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
thead = etree.SubElement(table, 'thead')
self._build_row(header, thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row.strip(), tbody, align, border)
def _build_row(self, row, parent, align, border):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row, border)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip()
except IndexError:
c.text = ""
if a:
c.set('align', a)
def _split_row(self, row, border):
""" split a row of text into list of cells. """
if border:
if row.startswith('|'):
row = row[1:]
if row.endswith('|'):
row = row[:-1]
return row.split('|')
class TableExtension(Extension):
""" Add tables to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('table',
TableProcessor(md.parser),
'<hashheader')
def makeExtension(configs={}):
return TableExtension(configs=configs)

View File

@@ -0,0 +1,221 @@
"""
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import etree
from .headerid import slugify, unique, itertext
import re
def order_toc_list(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
def build_correct(remaining_list, prev_elements=[{'level': 1000}]):
if not remaining_list:
return [], []
current = remaining_list.pop(0)
if not 'children' in current.keys():
current['children'] = []
if not prev_elements:
# This happens for instance with [8, 1, 1], ie. when some
# header level is outside a scope. We treat it as a
# top-level
next_elements, children = build_correct(remaining_list, [current])
current['children'].append(children)
return [current] + next_elements, []
prev_element = prev_elements.pop()
children = []
next_elements = []
# Is current part of the child list or next list?
if current['level'] > prev_element['level']:
#print "%d is a child of %d" % (current['level'], prev_element['level'])
prev_elements.append(prev_element)
prev_elements.append(current)
prev_element['children'].append(current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children += children2
next_elements += next_elements2
else:
#print "%d is ancestor of %d" % (current['level'], prev_element['level'])
if not prev_elements:
#print "No previous elements, so appending to the next set"
next_elements.append(current)
prev_elements = [current]
next_elements2, children2 = build_correct(remaining_list, prev_elements)
current['children'].extend(children2)
else:
#print "Previous elements, comparing to those first"
remaining_list.insert(0, current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children.extend(children2)
next_elements += next_elements2
return next_elements, children
ordered_list, __ = build_correct(toc_list)
return ordered_list
class TocTreeprocessor(Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def add_anchor(self, c, elem_id): #@ReservedAssignment
if self.use_anchors:
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c.getchildren():
anchor.append(elem)
c.remove(elem)
c.append(anchor)
def build_toc_etree(self, div, toc_list):
# Add title to the div
if self.config["title"]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"]
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return build_etree_ul(toc_list, div)
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
header_rgx = re.compile("[Hh][123456]")
self.use_anchors = self.config["anchorlink"] in [1, '1', True, 'True', 'true']
# Get a list of id attributes
used_ids = set()
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.add(c.attrib["id"])
toc_list = []
marker_found = False
for (p, c) in self.iterparent(doc):
text = ''.join(itertext(c)).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text and c.text.strip() == self.config["marker"] and \
not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
marker_found = True
if header_rgx.match(c.tag):
# Do not override pre-existing ids
if not "id" in c.attrib:
elem_id = unique(self.config["slugify"](text, '-'), used_ids)
c.attrib["id"] = elem_id
else:
elem_id = c.attrib["id"]
tag_level = int(c.tag[-1])
toc_list.append({'level': tag_level,
'id': elem_id,
'name': text})
self.add_anchor(c, elem_id)
toc_list_nested = order_toc_list(toc_list)
self.build_toc_etree(div, toc_list_nested)
prettify = self.markdown.treeprocessors.get('prettify')
if prettify: prettify.run(div)
if not marker_found:
# serialize and attach to markdown instance.
toc = self.markdown.serializer(div)
for pp in self.markdown.postprocessors.values():
toc = pp.run(toc)
self.markdown.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, configs=[]):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [slugify,
"Function to generate anchors based on header text-"
"Defaults to the headerid ext's slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
tocext = self.TreeProcessorClass(md)
tocext.config = self.getConfigs()
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.add("toc", tocext, "_end")
def makeExtension(configs={}):
return TocExtension(configs=configs)

View File

@@ -0,0 +1,151 @@
'''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links. Requires Python-Markdown 2.0+
Basic usage:
>>> import markdown
>>> text = "Some text with a [[WikiLink]]."
>>> html = markdown.markdown(text, ['wikilinks'])
>>> print html
<p>Some text with a <a class="wikilink" href="/WikiLink/">WikiLink</a>.</p>
Whitespace behavior:
>>> print markdown.markdown('[[ foo bar_baz ]]', ['wikilinks'])
<p><a class="wikilink" href="/foo_bar_baz/">foo bar_baz</a></p>
>>> print markdown.markdown('foo [[ ]] bar', ['wikilinks'])
<p>foo bar</p>
To define custom settings the simple way:
>>> print markdown.markdown(text,
... ['wikilinks(base_url=/wiki/,end_url=.html,html_class=foo)']
... )
<p>Some text with a <a class="foo" href="/wiki/WikiLink.html">WikiLink</a>.</p>
Custom settings the complex way:
>>> md = markdown.Markdown(
... extensions = ['wikilinks'],
... extension_configs = {'wikilinks': [
... ('base_url', 'http://example.com/'),
... ('end_url', '.html'),
... ('html_class', '') ]},
... safe_mode = True)
>>> print md.convert(text)
<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>
Use MetaData with mdx_meta.py (Note the blank html_class in MetaData):
>>> text = """wiki_base_url: http://example.com/
... wiki_end_url: .html
... wiki_html_class:
...
... Some text with a [[WikiLink]]."""
>>> md = markdown.Markdown(extensions=['meta', 'wikilinks'])
>>> print md.convert(text)
<p>Some text with a <a href="http://example.com/WikiLink.html">WikiLink</a>.</p>
MetaData should not carry over to next document:
>>> print md.convert("No [[MetaData]] here.")
<p>No <a class="wikilink" href="/MetaData/">MetaData</a> here.</p>
Define a custom URL builder:
>>> def my_url_builder(label, base, end):
... return '/bar/'
>>> md = markdown.Markdown(extensions=['wikilinks'],
... extension_configs={'wikilinks' : [('build_url', my_url_builder)]})
>>> print md.convert('[[foo]]')
<p><a class="wikilink" href="/bar/">foo</a></p>
From the command line:
python markdown.py -x wikilinks(base_url=http://example.com/,end_url=.html,html_class=foo) src.txt
By [Waylan Limberg](http://achinghead.com/).
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import Pattern
from ..util import etree
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '%s%s%s'% (base, clean_label, end)
class WikiLinkExtension(Extension):
def __init__(self, configs):
# set extension defaults
self.config = {
'base_url' : ['/', 'String to append to beginning or URL.'],
'end_url' : ['/', 'String to append to end of URL.'],
'html_class' : ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url' : [build_url, 'Callable formats URL from label.'],
}
# Override defaults with user settings
for key, value in configs :
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
self.md = md
# append to end of inline patterns
WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
wikilinkPattern = WikiLinks(WIKILINK_RE, self.getConfigs())
wikilinkPattern.md = md
md.inlinePatterns.add('wikilink', wikilinkPattern, "<not_strong")
class WikiLinks(Pattern):
def __init__(self, pattern, config):
super(WikiLinks, self).__init__(pattern)
self.config = config
def handleMatch(self, m):
if m.group(2).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(2).strip()
url = self.config['build_url'](label, base_url, end_url)
a = etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url']
end_url = self.config['end_url']
html_class = self.config['html_class']
if hasattr(self.md, 'Meta'):
if 'wiki_base_url' in self.md.Meta:
base_url = self.md.Meta['wiki_base_url'][0]
if 'wiki_end_url' in self.md.Meta:
end_url = self.md.Meta['wiki_end_url'][0]
if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(configs=None) :
return WikiLinkExtension(configs=configs)

View File

@@ -0,0 +1,483 @@
"""
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import util
from . import odict
import re
try:
from urllib.parse import urlparse, urlunparse
except ImportError:
from urlparse import urlparse, urlunparse
try:
from html import entities
except ImportError:
import htmlentitydefs as entities
def build_inlinepatterns(md_instance, **kwargs):
""" Build the default set of inline patterns for Markdown. """
inlinePatterns = odict.OrderedDict()
inlinePatterns["backtick"] = BacktickPattern(BACKTICK_RE)
inlinePatterns["escape"] = EscapePattern(ESCAPE_RE, md_instance)
inlinePatterns["reference"] = ReferencePattern(REFERENCE_RE, md_instance)
inlinePatterns["link"] = LinkPattern(LINK_RE, md_instance)
inlinePatterns["image_link"] = ImagePattern(IMAGE_LINK_RE, md_instance)
inlinePatterns["image_reference"] = \
ImageReferencePattern(IMAGE_REFERENCE_RE, md_instance)
inlinePatterns["short_reference"] = \
ReferencePattern(SHORT_REF_RE, md_instance)
inlinePatterns["autolink"] = AutolinkPattern(AUTOLINK_RE, md_instance)
inlinePatterns["automail"] = AutomailPattern(AUTOMAIL_RE, md_instance)
inlinePatterns["linebreak"] = SubstituteTagPattern(LINE_BREAK_RE, 'br')
if md_instance.safeMode != 'escape':
inlinePatterns["html"] = HtmlPattern(HTML_RE, md_instance)
inlinePatterns["entity"] = HtmlPattern(ENTITY_RE, md_instance)
inlinePatterns["not_strong"] = SimpleTextPattern(NOT_STRONG_RE)
inlinePatterns["strong_em"] = DoubleTagPattern(STRONG_EM_RE, 'strong,em')
inlinePatterns["strong"] = SimpleTagPattern(STRONG_RE, 'strong')
inlinePatterns["emphasis"] = SimpleTagPattern(EMPHASIS_RE, 'em')
if md_instance.smart_emphasis:
inlinePatterns["emphasis2"] = SimpleTagPattern(SMART_EMPHASIS_RE, 'em')
else:
inlinePatterns["emphasis2"] = SimpleTagPattern(EMPHASIS_2_RE, 'em')
return inlinePatterns
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'(\*)([^\*]+)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.+?)\2' # **strong**
STRONG_EM_RE = r'(\*{3}|_{3})(.+?)\2' # ***strong***
SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\2(?!\w)' # _smart_emphasis_
EMPHASIS_2_RE = r'(_)(.+?)\2' # _emphasis_
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12\s*)?\)'''
# [text](url) or [text](<url>) or [text](url "title")
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
# ![alttxt](http://x.com/) or ![alttxt](<http://x.com/>)
REFERENCE_RE = NOIMG + BRK+ r'\s?\[([^\]]*)\]' # [Google][3]
SHORT_REF_RE = NOIMG + r'\[([^\]]+)\]' # [Google]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s?\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &amp;
LINE_BREAK_RE = r' \n' # two spaces at end of line
def dequote(string):
"""Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern(object):
"""Base class that inline patterns subclass. """
def __init__(self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp(self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
def unescape(self, text):
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes
except KeyError:
return text
def itertext(el):
' Reimplement Element.itertext for older python versions '
tag = el.tag
if not isinstance(tag, util.string_type) and tag is not None:
return
if el.text:
yield el.text
for e in el:
for s in itertext(e):
yield s
if e.tail:
yield e.tail
def get_stash(m):
id = m.group(1)
if id in stash:
value = stash.get(id)
if isinstance(value, util.string_type):
return value
else:
# An etree Element - return text content only
return ''.join(itertext(value))
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
class SimpleTextPattern(Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
text = m.group(2)
if text == util.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class EscapePattern(Pattern):
""" Return an escaped character. """
def handleMatch(self, m):
char = m.group(2)
if char in self.markdown.ESCAPED_CHARS:
return '%s%s%s' % (util.STX, ord(char), util.ETX)
else:
return '\\%s' % char
class SimpleTagPattern(Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = util.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern(SimpleTagPattern):
""" Return an element of type `tag` with no children. """
def handleMatch (self, m):
return util.etree.Element(self.tag)
class BacktickPattern(Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = util.etree.Element(self.tag)
el.text = util.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern(SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = util.etree.Element(tag1)
el2 = util.etree.SubElement(el1, tag2)
el2.text = m.group(3)
return el1
class HtmlPattern(Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch (self, m):
rawhtml = self.unescape(m.group(2))
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
def unescape(self, text):
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.markdown.treeprocessors['inline'].stashed_nodes
except KeyError:
return text
def get_stash(m):
id = m.group(1)
value = stash.get(id)
if value is not None:
try:
return self.markdown.serializer(value)
except:
return '\%s' % value
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
class LinkPattern(Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = util.etree.Element("a")
el.text = m.group(2)
title = m.group(13)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(self.unescape(href.strip())))
else:
el.set("href", "")
if title:
title = dequote(self.unescape(title))
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
url = url.replace(' ', '%20')
if not self.markdown.safeMode:
# Return immediately bipassing parsing.
return url
try:
scheme, netloc, path, params, query, fragment = url = urlparse(url)
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
locless_schemes = ['', 'mailto', 'news']
allowed_schemes = locless_schemes + ['http', 'https', 'ftp', 'ftps']
if scheme not in allowed_schemes:
# Not a known (allowed) scheme. Not safe.
return ''
if netloc == '' and scheme not in locless_schemes:
# This should not happen. Treat as suspect.
return ''
for part in url[2:]:
if ":" in part:
# A colon in "path", "parameters", "query" or "fragment" is suspect.
return ''
# Url passes all tests. Return url as-is.
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = util.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(self.unescape(src)))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(self.unescape(" ".join(src_parts[1:]))))
if self.markdown.enable_attributes:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', self.unescape(truealt))
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
NEWLINE_CLEANUP_RE = re.compile(r'[ ]?\n', re.MULTILINE)
def handleMatch(self, m):
try:
id = m.group(9).lower()
except IndexError:
id = None
if not id:
# if we got something like "[Google][]" or "[Goggle]"
# we'll use "google" as the id
id = m.group(2).lower()
# Clean up linebreaks in id
id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
if not id in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = util.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern(ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = util.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
if self.markdown.enable_attributes:
text = handleAttributes(text, el)
el.set("alt", self.unescape(text))
return el
class AutolinkPattern(Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = util.etree.Element("a")
el.set('href', self.unescape(m.group(2)))
el.text = util.AtomicString(m.group(2))
return el
class AutomailPattern(Pattern):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m):
el = util.etree.Element('a')
email = self.unescape(m.group(2))
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = entities.codepoint2name.get(code)
if entity:
return "%s%s;" % (util.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = util.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el

View File

@@ -0,0 +1,194 @@
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from copy import deepcopy
def iteritems_compat(d):
"""Return an iterator over the (key, value) pairs of a dictionary.
Copied from `six` module."""
return iter(getattr(d, _iteritems)())
class OrderedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
Copied from Django's SortedDict with some modifications.
"""
def __new__(cls, *args, **kwargs):
instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None or isinstance(data, dict):
data = data or []
super(OrderedDict, self).__init__(data)
self.keyOrder = list(data) if data else []
else:
super(OrderedDict, self).__init__()
super_set = super(OrderedDict, self).__setitem__
for key, value in data:
# Take the ordering from first key
if key not in self:
self.keyOrder.append(key)
# But override with last value in data (dict() does this)
super_set(key, value)
def __deepcopy__(self, memo):
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.items()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(OrderedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(OrderedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def __reversed__(self):
return reversed(self.keyOrder)
def pop(self, k, *args):
result = super(OrderedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(OrderedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def _iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def _iterkeys(self):
for key in self.keyOrder:
yield key
def _itervalues(self):
for key in self.keyOrder:
yield self[key]
if util.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return [(k, self[k]) for k in self.keyOrder]
def keys(self):
return self.keyOrder[:]
def values(self):
return [self[k] for k in self.keyOrder]
def update(self, dict_):
for k, v in iteritems_compat(dict_):
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(OrderedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(OrderedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their Ordered order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in iteritems_compat(self)])
def clear(self):
super(OrderedDict, self).clear()
self.keyOrder = []
def index(self, key):
""" Return the index of a given key. """
try:
return self.keyOrder.index(key)
except ValueError:
raise ValueError("Element '%s' was not found in OrderedDict" % key)
def index_for_location(self, location):
""" Return index or None for a given location. """
if location == '_begin':
i = 0
elif location == '_end':
i = None
elif location.startswith('<') or location.startswith('>'):
i = self.index(location[1:])
if location.startswith('>'):
if i >= len(self):
# last item
i = None
else:
i += 1
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
return i
def add(self, key, value, location):
""" Insert by key location. """
i = self.index_for_location(location)
if i is not None:
self.insert(i, key, value)
else:
self.__setitem__(key, value)
def link(self, key, location):
""" Change location of an existing item. """
n = self.keyOrder.index(key)
del self.keyOrder[n]
try:
i = self.index_for_location(location)
if i is not None:
self.keyOrder.insert(i, key)
else:
self.keyOrder.append(key)
except Exception as e:
# restore to prevent data loss and reraise
self.keyOrder.insert(n, key)
raise e

View File

@@ -0,0 +1,104 @@
"""
POST-PROCESSORS
=============================================================================
Markdown also allows post-processors, which are similar to preprocessors in
that they need to implement a "run" method. However, they are run after core
processing.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import util
from . import odict
import re
def build_postprocessors(md_instance, **kwargs):
""" Build the default postprocessors for Markdown. """
postprocessors = odict.OrderedDict()
postprocessors["raw_html"] = RawHtmlPostprocessor(md_instance)
postprocessors["amp_substitute"] = AndSubstitutePostprocessor()
postprocessors["unescape"] = UnescapePostprocessor()
return postprocessors
class Postprocessor(util.Processor):
"""
Postprocessors are run after the ElementTree it converted back into text.
Each Postprocessor implements a "run" method that takes a pointer to a
text string, modifies it as necessary and returns a text string.
Postprocessors must extend markdown.Postprocessor.
"""
def run(self, text):
"""
Subclasses of Postprocessor should implement a `run` method, which
takes the html document as a single text string and returns a
(possibly modified) string.
"""
pass
class RawHtmlPostprocessor(Postprocessor):
""" Restore raw html to the document. """
def run(self, text):
""" Iterate over html stash and restore "safe" html. """
for i in range(self.markdown.htmlStash.html_counter):
html, safe = self.markdown.htmlStash.rawHtmlBlocks[i]
if self.markdown.safeMode and not safe:
if str(self.markdown.safeMode).lower() == 'escape':
html = self.escape(html)
elif str(self.markdown.safeMode).lower() == 'remove':
html = ''
else:
html = self.markdown.html_replacement_text
if self.isblocklevel(html) and (safe or not self.markdown.safeMode):
text = text.replace("<p>%s</p>" %
(self.markdown.htmlStash.get_placeholder(i)),
html + "\n")
text = text.replace(self.markdown.htmlStash.get_placeholder(i),
html)
return text
def escape(self, html):
""" Basic html escaping """
html = html.replace('&', '&amp;')
html = html.replace('<', '&lt;')
html = html.replace('>', '&gt;')
return html.replace('"', '&quot;')
def isblocklevel(self, html):
m = re.match(r'^\<\/?([^ >]+)', html)
if m:
if m.group(1)[0] in ('!', '?', '@', '%'):
# Comment, php etc...
return True
return util.isBlockLevel(m.group(1))
return False
class AndSubstitutePostprocessor(Postprocessor):
""" Restore valid entities """
def run(self, text):
text = text.replace(util.AMP_SUBSTITUTE, "&")
return text
class UnescapePostprocessor(Postprocessor):
""" Restore escaped chars """
RE = re.compile('%s(\d+)%s' % (util.STX, util.ETX))
def unescape(self, m):
return util.int2str(int(m.group(1)))
def run(self, text):
return self.RE.sub(self.unescape, text)

View File

@@ -0,0 +1,298 @@
"""
PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
complicated.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import util
from . import odict
import re
def build_preprocessors(md_instance, **kwargs):
""" Build the default set of preprocessors used by Markdown. """
preprocessors = odict.OrderedDict()
preprocessors['normalize_whitespace'] = NormalizeWhitespace(md_instance)
if md_instance.safeMode != 'escape':
preprocessors["html_block"] = HtmlBlockPreprocessor(md_instance)
preprocessors["reference"] = ReferencePreprocessor(md_instance)
return preprocessors
class Preprocessor(util.Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
"""
def run(self, lines):
"""
Each subclass of Preprocessor should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass
class NormalizeWhitespace(Preprocessor):
""" Normalize whitespace for consistant parsing. """
def run(self, lines):
source = '\n'.join(lines)
source = source.replace(util.STX, "").replace(util.ETX, "")
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
source = source.expandtabs(self.markdown.tab_length)
source = re.sub(r'(?<=\n) +\n', '\n', source)
return source.split('\n')
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
right_tag_patterns = ["</%s>", "%s>"]
attrs_pattern = r"""
\s+(?P<attr>[^>"'/= ]+)=(?P<q>['"])(?P<value>.*?)(?P=q) # attr="value"
| # OR
\s+(?P<attr1>[^>"'/= ]+)=(?P<value1>[^> ]+) # attr=value
| # OR
\s+(?P<attr2>[^>"'/= ]+) # attr
"""
left_tag_pattern = r'^\<(?P<tag>[^> ]+)(?P<attrs>(%s)*)\s*\/?\>?' % attrs_pattern
attrs_re = re.compile(attrs_pattern, re.VERBOSE)
left_tag_re = re.compile(left_tag_pattern, re.VERBOSE)
markdown_in_raw = False
def _get_left_tag(self, block):
m = self.left_tag_re.match(block)
if m:
tag = m.group('tag')
raw_attrs = m.group('attrs')
attrs = {}
if raw_attrs:
for ma in self.attrs_re.finditer(raw_attrs):
if ma.group('attr'):
if ma.group('value'):
attrs[ma.group('attr').strip()] = ma.group('value')
else:
attrs[ma.group('attr').strip()] = ""
elif ma.group('attr1'):
if ma.group('value1'):
attrs[ma.group('attr1').strip()] = ma.group('value1')
else:
attrs[ma.group('attr1').strip()] = ""
elif ma.group('attr2'):
attrs[ma.group('attr2').strip()] = ""
return tag, len(m.group(0)), attrs
else:
tag = block[1:].split(">", 1)[0].lower()
return tag, len(tag)+2, {}
def _recursive_tagfind(self, ltag, rtag, start_index, block):
while 1:
i = block.find(rtag, start_index)
if i == -1:
return -1
j = block.find(ltag, start_index)
# if no ltag, or rtag found before another ltag, return index
if (j > i or j == -1):
return i + len(rtag)
# another ltag found before rtag, use end of ltag as starting
# point and search again
j = block.find('>', j)
start_index = self._recursive_tagfind(ltag, rtag, j + 1, block)
if start_index == -1:
# HTML potentially malformed- ltag has no corresponding
# rtag
return -1
def _get_right_tag(self, left_tag, left_index, block):
for p in self.right_tag_patterns:
tag = p % left_tag
i = self._recursive_tagfind("<%s" % left_tag, tag, left_index, block)
if i > 2:
return tag.lstrip("<").rstrip(">"), i
return block.rstrip()[-left_index:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag):
if left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] == "/":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
text = text.rsplit("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
while text:
block = text[0]
if block.startswith("\n"):
block = block[1:]
text = text[1:]
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<") and len(block.strip()) > 1:
if block[1] == "!":
# is a comment block
left_tag, left_index, attrs = "--", 2, {}
else:
left_tag, left_index, attrs = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag,
left_index,
block)
# keep checking conditions below and maybe just append
if data_index < len(block) \
and (util.isBlockLevel(left_tag)
or left_tag == '--'):
text.insert(0, block[data_index:])
block = block[:data_index]
if not (util.isBlockLevel(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
if self.markdown_in_raw and 'markdown' in attrs.keys():
start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
'', block[:left_index])
end = block[-len(right_tag)-2:]
block = block[left_index:-len(right_tag)-2]
new_blocks.append(
self.markdown.htmlStash.store(start))
new_blocks.append(block)
new_blocks.append(
self.markdown.htmlStash.store(end))
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
else:
# if is block level tag and is not complete
if util.isBlockLevel(left_tag) or left_tag == "--" \
and not block.rstrip().endswith(">"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
new_blocks.append(block)
else:
items.append(block)
right_tag, data_index = self._get_right_tag(left_tag, 0, block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
if data_index < len(block):
# we have more text after right_tag
items[-1] = block[:data_index]
text.insert(0, block[data_index:])
in_tag = False
if self.markdown_in_raw and 'markdown' in attrs.keys():
start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
'', items[0][:left_index])
items[0] = items[0][left_index:]
end = items[-1][-len(right_tag)-2:]
items[-1] = items[-1][:-len(right_tag)-2]
new_blocks.append(
self.markdown.htmlStash.store(start))
new_blocks.extend(items)
new_blocks.append(
self.markdown.htmlStash.store(end))
else:
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
items = []
if items:
if self.markdown_in_raw and 'markdown' in attrs.keys():
start = re.sub(r'\smarkdown(=[\'"]?[^> ]*[\'"]?)?',
'', items[0][:left_index])
items[0] = items[0][left_index:]
end = items[-1][-len(right_tag)-2:]
items[-1] = items[-1][:-len(right_tag)-2]
new_blocks.append(
self.markdown.htmlStash.store(start))
new_blocks.extend(items)
if end.strip():
new_blocks.append(
self.markdown.htmlStash.store(end))
else:
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
#new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
return new_text.split("\n")
class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
TITLE = r'[ ]*(\"(.*)\"|\'(.*)\'|\((.*)\))[ ]*'
RE = re.compile(r'^[ ]{0,3}\[([^\]]*)\]:\s*([^ ]*)[ ]*(%s)?$' % TITLE, re.DOTALL)
TITLE_RE = re.compile(r'^%s$' % TITLE)
def run (self, lines):
new_text = [];
while lines:
line = lines.pop(0)
m = self.RE.match(line)
if m:
id = m.group(1).strip().lower()
link = m.group(2).lstrip('<').rstrip('>')
t = m.group(5) or m.group(6) or m.group(7)
if not t:
# Check next line for title
tm = self.TITLE_RE.match(lines[0])
if tm:
lines.pop(0)
t = tm.group(2) or tm.group(3) or tm.group(4)
self.markdown.references[id] = (link, t)
else:
new_text.append(line)
return new_text #+ "\n"

View File

@@ -0,0 +1,277 @@
# markdown/searializers.py
#
# Add x/html serialization to Elementree
# Taken from ElementTree 1.3 preview with slight modifications
#
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import unicode_literals
from . import util
ElementTree = util.etree.ElementTree
QName = util.etree.QName
if hasattr(util.etree, 'test_comment'):
Comment = util.etree.test_comment
else:
Comment = util.etree.Comment
PI = util.etree.PI
ProcessingInstruction = util.etree.ProcessingInstruction
__all__ = ['to_html_string', 'to_xhtml_string']
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta" "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublic core
"http://purl.org/dc/elements/1.1/": "dc",
}
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _encode(text, encoding):
try:
return text.encode(encoding, "xmlcharrefreplace")
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&amp;")
if "<" in text:
text = text.replace("<", "&lt;")
if ">" in text:
text = text.replace(">", "&gt;")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&amp;")
if "<" in text:
text = text.replace("<", "&lt;")
if ">" in text:
text = text.replace(">", "&gt;")
if "\"" in text:
text = text.replace("\"", "&quot;")
if "\n" in text:
text = text.replace("\n", "&#10;")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&amp;")
if "<" in text:
text = text.replace("<", "&lt;")
if ">" in text:
text = text.replace(">", "&gt;")
if "\"" in text:
text = text.replace("\"", "&quot;")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _serialize_html(write, elem, qnames, namespaces, format):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None, format)
else:
write("<" + tag)
items = elem.items()
if items or namespaces:
items.sort() # lexical order
for k, v in items:
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
if qnames[k] == v and format == 'html':
# handle boolean attributes
write(" %s" % v)
else:
write(" %s=\"%s\"" % (qnames[k], v))
if namespaces:
items = namespaces.items()
items.sort(key=lambda x: x[1]) # sort on prefix
for v, k in items:
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (k, _escape_attrib(v)))
if format == "xhtml" and tag in HTML_EMPTY:
write(" />")
else:
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None, format)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _write_html(root,
encoding=None,
default_namespace=None,
format="html"):
assert root is not None
data = []
write = data.append
qnames, namespaces = _namespaces(root, default_namespace)
_serialize_html(write, root, qnames, namespaces, format)
if encoding is None:
return "".join(data)
else:
return _encode("".join(data))
# --------------------------------------------------------------------
# serialization support
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].split("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
try:
iterate = elem.iter
except AttributeError:
iterate = elem.getiterator # cET compatibility
for elem in iterate():
tag = elem.tag
if isinstance(tag, QName) and tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, util.string_type):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def to_html_string(element):
return _write_html(ElementTree(element).getroot(), format="html")
def to_xhtml_string(element):
return _write_html(ElementTree(element).getroot(), format="xhtml")

View File

@@ -0,0 +1,360 @@
from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from . import odict
from . import inlinepatterns
def build_treeprocessors(md_instance, **kwargs):
""" Build the default treeprocessors for Markdown. """
treeprocessors = odict.OrderedDict()
treeprocessors["inline"] = InlineProcessor(md_instance)
treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
return treeprocessors
def isString(s):
""" Check if it's string """
if not isinstance(s, util.AtomicString):
return isinstance(s, util.string_type)
return False
class Treeprocessor(util.Processor):
"""
Treeprocessors are run on the ElementTree object before serialization.
Each Treeprocessor implements a "run" method that takes a pointer to an
ElementTree, modifies it as necessary and returns an ElementTree
object.
Treeprocessors must extend markdown.Treeprocessor.
"""
def run(self, root):
"""
Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None.
"""
pass
class InlineProcessor(Treeprocessor):
"""
A Treeprocessor that traverses a tree, applying inline patterns.
"""
def __init__(self, md):
self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
self.__placeholder_suffix = util.ETX
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+ len(self.__placeholder_suffix)
self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
self.markdown = md
def __makePlaceholder(self, type):
""" Generate a placeholder """
id = "%04d" % len(self.stashed_nodes)
hash = util.INLINE_PLACEHOLDER % id
return hash, id
def __findPlaceholder(self, data, index):
"""
Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder.
"""
m = self.__placeholder_re.search(data, index)
if m:
return m.group(1), m.end()
else:
return None, index + 1
def __stashNode(self, node, type):
""" Add node to stash """
placeholder, id = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder
def __handleInline(self, data, patternIndex=0):
"""
Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders.
"""
if not isinstance(data, util.AtomicString):
startIndex = 0
while patternIndex < len(self.markdown.inlinePatterns):
data, matched, startIndex = self.__applyPattern(
self.markdown.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex)
if not matched:
patternIndex += 1
return data
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode)
if not isText and node is not subnode:
pos = node.getchildren().index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
def __processPlaceholders(self, data, parent):
"""
Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns.
"""
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
else:
if parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if index != -1:
id, phEndIndex = self.__findPlaceholder(data, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = data[strartIndex:index]
linkText(text)
if not isString(node): # it's Element
for child in [node] + node.getchildren():
if child.tail:
if child.tail.strip():
self.__processElementText(node, child,False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else: # it's just a string
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else: # wrong placeholder
end = index + len(self.__placeholder_prefix)
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
if isinstance(data, util.AtomicString):
# We don't want to loose the AtomicString
text = util.AtomicString(text)
linkText(text)
data = ""
return result
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
"""
Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we start searching
Returns: String with placeholders instead of ElementTree elements.
"""
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if not match:
return data, False, 0
node = pattern.handleMatch(match)
if node is None:
return data, True, len(leftData)+match.span(len(match.groups()))[0]
if not isString(node):
if not isinstance(node.text, util.AtomicString):
# We need to process current node too
for child in [node] + node.getchildren():
if not isString(node):
if child.text:
child.text = self.__handleInline(child.text,
patternIndex + 1)
if child.tail:
child.tail = self.__handleInline(child.tail,
patternIndex)
placeholder = self.__stashNode(node, pattern.type())
return "%s%s%s%s" % (leftData,
match.group(1),
placeholder, match.groups()[-1]), True, 0
def run(self, tree):
"""Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want to process your data with inline paterns, instead of normal string,
use subclass AtomicString:
node.text = markdown.AtomicString("This will not be processed.")
Arguments:
* tree: ElementTree object, representing Markdown tree.
Returns: ElementTree object with applied inline patterns.
"""
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement.getchildren():
if child.text and not isinstance(child.text, util.AtomicString):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(
text), child)
stack += lst
insertQueue.append((child, lst))
if child.tail:
tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d')
tailResult = self.__processPlaceholders(tail, dumby)
if dumby.text:
child.tail = dumby.text
else:
child.tail = None
pos = currElement.getchildren().index(child) + 1
tailResult.reverse()
for newChild in tailResult:
currElement.insert(pos, newChild)
if child.getchildren():
stack.append(child)
for element, lst in insertQueue:
if self.markdown.enable_attributes:
if element.text and isString(element.text):
element.text = \
inlinepatterns.handleAttributes(element.text,
element)
i = 0
for newChild in lst:
if self.markdown.enable_attributes:
# Processing attributes
if newChild.tail and isString(newChild.tail):
newChild.tail = \
inlinepatterns.handleAttributes(newChild.tail,
element)
if newChild.text and isString(newChild.text):
newChild.text = \
inlinepatterns.handleAttributes(newChild.text,
newChild)
element.insert(i, newChild)
i += 1
return tree
class PrettifyTreeprocessor(Treeprocessor):
""" Add linebreaks to the html document. """
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = "\n"
if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) \
and len(elem) and util.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if util.isBlockLevel(e.tag):
self._prettifyETree(e)
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
def run(self, root):
""" Add linebreaks to ElementTree root object. """
self._prettifyETree(root)
# Do <br />'s seperately as they are often in the middle of
# inline content and missed by _prettifyETree.
brs = root.getiterator('br')
for br in brs:
if not br.tail or not br.tail.strip():
br.tail = '\n'
else:
br.tail = '\n%s' % br.tail
# Clean up extra empty lines at end of code blocks.
pres = root.getiterator('pre')
for pre in pres:
if len(pre) and pre[0].tag == 'code':
pre[0].text = pre[0].text.rstrip() + '\n'

View File

@@ -0,0 +1,136 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import sys
"""
Python 3 Stuff
=============================================================================
"""
PY3 = sys.version_info[0] == 3
if PY3:
string_type = str
text_type = str
int2str = chr
else:
string_type = basestring
text_type = unicode
int2str = unichr
"""
Constants you might want to modify
-----------------------------------------------------------------------------
"""
BLOCK_LEVEL_ELEMENTS = re.compile("^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|script|noscript|form|fieldset|iframe|math"
"|hr|hr/|style|li|dt|dd|thead|tbody"
"|tr|th|td|section|footer|header|group|figure"
"|figcaption|aside|article|canvas|output"
"|progress|video)$", re.IGNORECASE)
# Placeholders
STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]{4})')
AMP_SUBSTITUTE = STX+"amp"+ETX
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
RTL_BIDI_RANGES = ( ('\u0590', '\u07FF'),
# Hebrew (0590-05FF), Arabic (0600-06FF),
# Syriac (0700-074F), Arabic supplement (0750-077F),
# Thaana (0780-07BF), Nko (07C0-07FF).
('\u2D30', '\u2D7F'), # Tifinagh
)
# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
# markdown.util import etree`). Do not import it by yourself.
try: # Is the C implemenation of ElementTree available?
import xml.etree.cElementTree as etree
from xml.etree.ElementTree import Comment
# Serializers (including ours) test with non-c Comment
etree.test_comment = Comment
if etree.VERSION < "1.0.5":
raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
except (ImportError, RuntimeError):
# Use the Python implementation of ElementTree?
import xml.etree.ElementTree as etree
if etree.VERSION < "1.1":
raise RuntimeError("ElementTree version 1.1 or higher is required")
"""
AUXILIARY GLOBAL FUNCTIONS
=============================================================================
"""
def isBlockLevel(tag):
"""Check if the tag is a block level HTML tag."""
if isinstance(tag, string_type):
return BLOCK_LEVEL_ELEMENTS.match(tag)
# Some ElementTree tags are not strings, so return False.
return False
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
class AtomicString(text_type):
"""A string which should not be further processed."""
pass
class Processor(object):
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class HtmlStash(object):
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = self.get_placeholder(self.html_counter)
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
def get_placeholder(self, key):
return "%swzxhzdk:%d%s" % (STX, key, ETX)

View File

@@ -0,0 +1,26 @@
<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script>
MathJax.Hub.Config({
config: ["MMLorHTML.js"],
extensions: ["tex2jax.js"],
jax: ["input/TeX"],
tex2jax: {
inlineMath: [ ['$','$'], ["\\(","\\)"] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
processEscapes: false
},
TeX: {
extensions: ["AMSmath.js", "AMSsymbols.js"],
TagSide: "right",
TagIndent: ".8em",
MultLineWidth: "85%",
equationNumbers: {
autoNumber: "AMS",
},
unicode: {
fonts: "STIXGeneral,'Arial Unicode MS'"
}
},
showProcessingMessages: false
});
</script>

View File

@@ -0,0 +1,4 @@
{
"install": "README.md",
"1.0.2": "CHANGES.md"
}

View File

@@ -1 +1 @@
{"url": "https://github.com/revolunet/sublimetext-markdown-preview", "version": "2013.04.01.18.50.45", "description": "markdown preview plugin for sublime text 2"}
{"url": "https://github.com/revolunet/sublimetext-markdown-preview", "version": "1.0.3", "description": "markdown preview and build plugin for sublime text 2/3"}

View File

@@ -1,108 +1,80 @@
<h1 id="sample-markdown-cheat-sheet">Sample Markdown Cheat Sheet</h1>
<p>This is a sample markdown file to help you write Markdown quickly :)</p>
<p>If you use the fabulous <a href="http://sublimetext.com">Sublime Text 2 editor</a> along with the <a href="https://github.com/revolunet/sublimetext-markdown-preview">Markdown Preview plugin</a>, open your ST2 Palette with <code>CMD+P</code> then choose <code>Markdown Preview in browser</code> to see the result in your browser.</p>
<p>If you use the fabulous <a href="http://sublimetext.com">Sublime Text 2/3 editor</a> along with the <a href="https://github.com/revolunet/sublimetext-markdown-preview">Markdown Preview plugin</a>, open your ST2 Palette with <code>CMD+P</code> then choose <code>Markdown Preview in browser</code> to see the result in your browser.</p>
<h2 id="text-basics">Text basics</h2>
<p>this is <em>italic</em> and this is <strong>bold</strong> . another <em>italic</em> and another <strong>bold</strong></p>
<p>this is <code>important</code> text. and percentage signs : % and <code>%</code></p>
<p>This is a paragraph with a footnote (builtin parser only). <sup class="footnote-ref" id="fnref-note-id"><a href="#fn-note-id">1</a></sup> </p>
<p>Insert <code>[ toc ]</code> without spaces to generate a table of contents (builtin parser only).</p>
<p>This is a paragraph with a footnote (builtin parser only). <sup id="fnref:note-id"><a class="footnote-ref" href="#fn:note-id" rel="footnote">1</a></sup></p>
<p>Insert <code>[ toc ]</code> without spaces to generate a table of contents (builtin parsers only).</p>
<h2 id="indentation">Indentation</h2>
<blockquote>
<p>Here is some indented text</p>
<blockquote>
<p>even more indented</p>
</blockquote>
<p>Here is some indented text</p>
<blockquote>
<p>even more indented</p>
</blockquote>
</blockquote>
<h2 id="titles">Titles</h2>
<h1 id="big-title-h1">Big title (h1)</h1>
<h2 id="middle-title-h2">Middle title (h2)</h2>
<h3 id="smaller-title-h3">Smaller title (h3)</h3>
<h4 id="and-so-on-hx">and so on (hX)</h4>
<h5 id="and-so-on-hx-2">and so on (hX)</h5>
<h6 id="and-so-on-hx-3">and so on (hX)</h6>
<h5 id="and-so-on-hx_1">and so on (hX)</h5>
<h6 id="and-so-on-hx_2">and so on (hX)</h6>
<h2 id="example-lists-1">Example lists (1)</h2>
<ul>
<li>bullets can be <code>-</code>, <code>+</code>, or <code>*</code></li>
<li>bullet list 1</li>
<li><p>bullet list 2</p>
<li>
<p>bullet list 2</p>
<ul>
<li>sub item 1</li>
<li><p>sub item 2</p>
<p>with indented text inside</p></li>
</ul></li>
<li><p>bullet list 3</p></li>
<li>
<p>sub item 2</p>
<p>with indented text inside</p>
</li>
</ul>
</li>
<li>
<p>bullet list 3</p>
</li>
<li>bullet list 4</li>
<li>bullet list 5</li>
</ul>
<h2 id="links">Links</h2>
<p>This is an <a href="http://lmgtfy.com/">example inline link</a> and <a href="http://lmgtfy.com/" title="Hello, world">another one with a title</a>.</p>
<p>Links can also be reference based : <a href="http://revolunet.com">reference 1</a> or <a href="http://revolunet.com" title="rich web apps">reference 2 with title</a>.</p>
<p>References are usually placed at the bottom of the document</p>
<h2 id="images">Images</h2>
<p>A sample image :</p>
<p><img src="http://www.revolunet.com/static/parisjs8/img/logo-revolunet-carre.jpg" alt="revolunet logo" title="revolunet logo" /></p>
<p><img alt="revolunet logo" src="http://www.revolunet.com/static/parisjs8/img/logo-revolunet-carre.jpg" title="revolunet logo" /></p>
<p>As links, images can also use references instead of inline links :</p>
<p><img src="http://www.revolunet.com/static/parisjs8/img/logo-revolunet-carre.jpg" alt="revolunet logo" title="revolunet logo" /></p>
<p><img alt="revolunet logo" src="http://www.revolunet.com/static/parisjs8/img/logo-revolunet-carre.jpg" title="revolunet logo" /></p>
<h2 id="code">Code</h2>
<p>It's quite easy to show code in markdown files.</p>
<p>Backticks can be used to <code>highlight</code> some words.</p>
<p>Also, any indented block is considered a code block.</p>
<p>Also, any indented block is considered a code block. If <code>enable_highlight</code> is <code>true</code>, syntax highlighting will be included (for the builtin parser - the github parser does this automatically).</p>
<pre><code>&lt;script&gt;
document.location = 'http://lmgtfy.com/?q=markdown+cheat+sheet';
&lt;/script&gt;
</code></pre>
<h2 id="math">Math</h2>
<p>When <code>enable_mathjax</code> is <code>true</code>, inline math can be included \(\frac{\pi}{2}\) $\pi$</p>
<p>Alternatively, math can be written on its own line:</p>
<p>$$F(\omega) = \frac{1}{\sqrt{2\pi}} \int_{-\infty}^{\infty} f(t) \, e^{ - i \omega t}dt$$</p>
<p>\[\int_0^1 f(t) \mathrm{d}t\]</p>
<p>\[\sum_j \gamma_j^2/d_j\]</p>
<h2 id="github-flavored-markdown">GitHub Flavored Markdown</h2>
<p>If you use the Github parser, you can use some of <a href="http://github.github.com/github-flavored-markdown/">Github Flavored Markdown</a> syntax :</p>
<ul>
<li>User/Project@SHA: revolunet/sublimetext-markdown-preview@7da61badeda468b5019869d11000307e07e07401</li>
<li>User/Project#Issue: revolunet/sublimetext-markdown-preview#1</li>
<li>User : @revolunet</li>
</ul>
<p>Some Python code :</p>
<pre><code>import random
<pre><code class="python">import random
class CardGame(object):
""" a sample python class """
&quot;&quot;&quot; a sample python class &quot;&quot;&quot;
NB_CARDS = 32
def __init__(self, cards=5):
self.cards = random.sample(range(self.NB_CARDS), 5)
@@ -110,8 +82,7 @@ class CardGame(object):
</code></pre>
<p>Some Javascript code :</p>
<pre><code>var config = {
<pre><code class="js">var config = {
duration: 5,
comment: 'WTF'
}
@@ -127,15 +98,175 @@ async_call('/path/to/api', function(json) {
})
</code></pre>
<p>The Github Markdown also brings some <a href="http://www.emoji-cheat-sheet.com/">nice Emoji support</a> : :+1: :heart: :beer:</p>
<h2 id="parsers-and-extensions">Parsers and Extensions</h2>
<p>Markdown Preview comes with <strong>Python-Markdown</strong> and <strong>Markdown2</strong> preloaded.</p>
<h3 id="python-markdown"><em>Python-Markdown</em></h3>
<p>The <a href="https://github.com/waylan/Python-Markdown">Python-Markdown Parser</a> provides support for several extensions.</p>
<h4 id="extra-extensions">Extra Extensions</h4>
<ul>
<li><code>abbr</code> -- <a href="http://pythonhosted.org/Markdown/extensions/abbreviations.html">Abbreviations</a></li>
<li><code>attr_list</code> -- <a href="http://pythonhosted.org/Markdown/extensions/attr_list.html">Attribute Lists</a></li>
<li><code>def_list</code> -- <a href="http://pythonhosted.org/Markdown/extensions/definition_lists.html">Definition Lists</a></li>
<li><code>fenced_code</code> -- <a href="http://pythonhosted.org/Markdown/extensions/fenced_code_blocks.html">Fenced Code Blocks</a></li>
<li><code>footnotes</code> -- <a href="http://pythonhosted.org/Markdown/extensions/footnotes.html">Footnotes</a></li>
<li><code>tables</code> -- <a href="http://pythonhosted.org/Markdown/extensions/tables.html">Tables</a></li>
<li><code>smart_strong</code> -- <a href="http://pythonhosted.org/Markdown/extensions/smart_strong.html">Smart Strong</a></li>
</ul>
<p>You can enable them all at once using the <code>extra</code> keyword.</p>
<pre><code>extensions: [ 'extra' ]
</code></pre>
<p>If you want all the extras plus the <code>toc</code> extension,
your settings would look like this:</p>
<pre><code>{
...
parser: 'markdown',
extensions: ['extra', 'toc'],
...
}
</code></pre>
<h4 id="other-extensions">Other Extensions</h4>
<p>There are also some extensions that are not included in Markdown Extra
but come in the standard Python-Markdown library.</p>
<ul>
<li><code>code-hilite</code> -- <a href="http://pythonhosted.org/Markdown/extensions/code_hilite.html">CodeHilite</a></li>
<li><code>html-tidy</code> -- <a href="http://pythonhosted.org/Markdown/extensions/html_tidy.html">HTML Tidy</a></li>
<li><code>header-id</code> -- <a href="http://pythonhosted.org/Markdown/extensions/header_id.html">HeaderId</a></li>
<li><code>meta_data</code> -- <a href="http://pythonhosted.org/Markdown/extensions/meta_data.html">Meta-Data</a></li>
<li><code>nl2br</code> -- <a href="http://pythonhosted.org/Markdown/extensions/nl2br.html">New Line to Break</a></li>
<li><code>rss</code> -- <a href="http://pythonhosted.org/Markdown/extensions/rss.html">RSS</a></li>
<li><code>sane_lists</code> -- <a href="http://pythonhosted.org/Markdown/extensions/sane_lists.html">Sane Lists</a></li>
<li><code>toc</code> -- <a href="http://pythonhosted.org/Markdown/extensions/toc.html">Table of Contents</a></li>
<li><code>wikilinks</code> -- <a href="http://pythonhosted.org/Markdown/extensions/wikilinks.html">WikiLinks</a></li>
</ul>
<h4 id="3rd-party-extensions">3rd Party Extensions</h4>
<p><em>Python-Markdown</em> is designed to be extended.
Just fork this repo and add your extensions inside the <code>.../Packages/Markdown Preview/markdown/extensions/</code> folder.</p>
<p>Check out the list of <a href="https://github.com/waylan/Python-Markdown/wiki/Third-Party-Extensions">3rd Party extensions</a>.</p>
<h4 id="default-extensions">Default Extensions</h4>
<p>The default extensions are:</p>
<ul>
<li><code>footnotes</code> -- <a href="http://pythonhosted.org/Markdown/extensions/footnotes.html">Footnotes</a></li>
<li><code>toc</code> -- <a href="http://pythonhosted.org/Markdown/extensions/toc.html">Table of Contents</a></li>
<li><code>fenced_code</code> -- <a href="http://pythonhosted.org/Markdown/extensions/fenced_code_blocks.html">Fenced Code Blocks</a> </li>
<li><code>tables</code> -- <a href="http://pythonhosted.org/Markdown/extensions/tables.html">Tables</a></li>
</ul>
<p>Use the <code>default</code> keyword, to select them all.
If you want all the defaults plus the <code>definition_lists</code> extension,
your settings would look like this:</p>
<pre><code>{
...
parser: 'markdown',
extensions: ['default', 'definition_lists'],
...
}
</code></pre>
<h3 id="markdown2"><em>Markdown2</em></h3>
<p>The <a href="https://github.com/trentm/python-markdown2">Markdown2 Parser</a> also provides support for extensions, known as <a href="https://github.com/trentm/python-markdown2/wiki/Extras">Extras</a>. <br />
You can configure the list of extras you want to use inside the package settings.</p>
<h4 id="default-extras">Default Extras</h4>
<p>The default extras are:</p>
<ul>
<li><code>footnotes</code> -- <a href="https://github.com/trentm/python-markdown2/wiki/footnotes">Footnotes</a></li>
<li><code>toc</code> -- Table of Contents</li>
<li><code>fenced-code-blocks</code> -- <a href="https://github.com/trentm/python-markdown2/wiki/fenced-code-blocks">Fenced CodeBlocks</a></li>
<li><code>cuddled-lists</code> -- <a href="https://github.com/trentm/python-markdown2/wiki/cuddled-lists">Cuddled Lists</a></li>
</ul>
<p>You can enable all default extras at once using the <code>default</code> keyword.
If you want all the default extras plus the 'wiki-table' extra,
your settings would look like this:</p>
<pre><code>{
...
parser: 'markdown2',
extensions: ['default', 'wiki-table'],
...
}
</code></pre>
<h4 id="other-extras">Other Extras</h4>
<p>For a complete list of extras please checkout the <a href="https://github.com/trentm/python-markdown2/wiki/Extras">Extras Wiki Page</a>.</p>
<h2 id="examples">Examples</h2>
<h3 id="tables">Tables</h3>
<p>The <code>tables</code> extension of the <em>Python-Markdown</em> parser is activated by default,
but is currently <strong>not</strong> available in <em>Markdown2</em>.</p>
<p>The syntax was adopted from the <a href="http://michelf.ca/projects/php-markdown/extra/#table">php markdown project</a>,
and is also used in github flavoured markdown.</p>
<table>
<thead>
<tr>
<th>Year</th>
<th>Temperature (low)</th>
<th>Temperature (high)</th>
</tr>
</thead>
<tbody>
<tr>
<td>1900</td>
<td>-10</td>
<td>25</td>
</tr>
<tr>
<td>1910</td>
<td>-15</td>
<td>30</td>
</tr>
<tr>
<td>1920</td>
<td>-10</td>
<td>32</td>
</tr>
</tbody>
</table>
<h3 id="wiki-tables">Wiki Tables</h3>
<p>If you are using <em>Markdown2</em> with the <code>wiki-tables</code> extra activated you should see a table below:</p>
<table>
<thead>
<tr>
<th></th>
<th><em>Year</em></th>
<th></th>
<th><em>Temperature (low)</em></th>
<th></th>
<th><em>Temperature (high)</em></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>1910</td>
<td></td>
<td>-15</td>
<td></td>
<td>30</td>
<td></td>
</tr>
<tr>
<td></td>
<td>1920</td>
<td></td>
<td>-10</td>
<td></td>
<td>32</td>
<td></td>
</tr>
</tbody>
</table>
<h3 id="definition-lists">Definition Lists</h3>
<p>This example requires <em>Python Markdown</em>'s <code>def_list</code> extension.</p>
<dl>
<dt>Apple</dt>
<dd>Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.</dd>
<dt>Orange</dt>
<dd>The fruit of an evergreen tree of the genus Citrus.</dd>
</dl>
<h2 id="about">About</h2>
<p>This plugin and this sample file is proudly brought to you by the <a href="http://revolunet.com">revolunet team</a></p>
<div class="footnotes">
<div class="footnote">
<hr />
<ol>
<li id="fn-note-id">
<p>This is the text of the note.&nbsp;<a href="#fnref-note-id" class="footnoteBackLink" title="Jump back to footnote 1 in the text.">&#8617;</a></p>
<li id="fn:note-id">
<p>This is the text of the note. &#160;<a class="footnote-backref" href="#fnref:note-id" rev="footnote" title="Jump back to footnote 1 in the text">&#8617;</a></p>
</li>
</ol>
</div>
</div>

View File

@@ -3,16 +3,16 @@ Sample Markdown Cheat Sheet
This is a sample markdown file to help you write Markdown quickly :)
If you use the fabulous [Sublime Text 2 editor][ST2] along with the [Markdown Preview plugin][MarkdownPreview], open your ST2 Palette with `CMD+P` then choose `Markdown Preview in browser` to see the result in your browser.
If you use the fabulous [Sublime Text 2/3 editor][ST] along with the [Markdown Preview plugin][MarkdownPreview], open your ST2 Palette with `CMD+P` then choose `Markdown Preview in browser` to see the result in your browser.
## Text basics
this is *italic* and this is **bold** . another _italic_ and another __bold__
this is `important` text. and percentage signs : % and `%`
This is a paragraph with a footnote (builtin parser only). [^note-id]
This is a paragraph with a footnote (builtin parser only). [^note-id]
Insert `[ toc ]` without spaces to generate a table of contents (builtin parser only).
Insert `[ toc ]` without spaces to generate a table of contents (builtin parsers only).
## Indentation
> Here is some indented text
@@ -65,12 +65,26 @@ It's quite easy to show code in markdown files.
Backticks can be used to `highlight` some words.
Also, any indented block is considered a code block.
Also, any indented block is considered a code block. If `enable_highlight` is `true`, syntax highlighting will be included (for the builtin parser - the github parser does this automatically).
<script>
document.location = 'http://lmgtfy.com/?q=markdown+cheat+sheet';
</script>
## Math
When `enable_mathjax` is `true`, inline math can be included \\(\frac{\pi}{2}\\) $\pi$
Alternatively, math can be written on its own line:
$$F(\omega) = \frac{1}{\sqrt{2\pi}} \int_{-\infty}^{\infty} f(t) \, e^{ - i \omega t}dt$$
\\[\int_0^1 f(t) \mathrm{d}t\\]
\\[\sum_j \gamma_j^2/d_j\\]
## GitHub Flavored Markdown
If you use the Github parser, you can use some of [Github Flavored Markdown][gfm] syntax :
@@ -115,6 +129,187 @@ The Github Markdown also brings some [nice Emoji support][emoji] : :+1: :heart:
[^note-id]: This is the text of the note.
## Parsers and Extensions
Markdown Preview comes with **Python-Markdown** and **Markdown2** preloaded.
### *Python-Markdown*
The [Python-Markdown Parser][] provides support for several extensions.
[Python-Markdown Parser]: https://github.com/waylan/Python-Markdown
#### Extra Extensions
* `abbr` -- [Abbreviations][]
* `attr_list` -- [Attribute Lists][]
* `def_list` -- [Definition Lists][]
* `fenced_code` -- [Fenced Code Blocks][]
* `footnotes` -- [Footnotes][]
* `tables` -- [Tables][]
* `smart_strong` -- [Smart Strong][]
[Abbreviations]: http://pythonhosted.org/Markdown/extensions/abbreviations.html
[Attribute Lists]: http://pythonhosted.org/Markdown/extensions/attr_list.html
[Definition Lists]: http://pythonhosted.org/Markdown/extensions/definition_lists.html
[Fenced Code Blocks]: http://pythonhosted.org/Markdown/extensions/fenced_code_blocks.html
[Footnotes]: http://pythonhosted.org/Markdown/extensions/footnotes.html
[Tables]: http://pythonhosted.org/Markdown/extensions/tables.html
[Smart Strong]: http://pythonhosted.org/Markdown/extensions/smart_strong.html
You can enable them all at once using the `extra` keyword.
extensions: [ 'extra' ]
If you want all the extras plus the `toc` extension,
your settings would look like this:
{
...
parser: 'markdown',
extensions: ['extra', 'toc'],
...
}
#### Other Extensions
There are also some extensions that are not included in Markdown Extra
but come in the standard Python-Markdown library.
* `code-hilite` -- [CodeHilite][]
* `html-tidy` -- [HTML Tidy][]
* `header-id` -- [HeaderId][]
* `meta_data` -- [Meta-Data][]
* `nl2br` -- [New Line to Break][]
* `rss` -- [RSS][]
* `sane_lists` -- [Sane Lists][]
* `toc` -- [Table of Contents][]
* `wikilinks` -- [WikiLinks][]
[CodeHilite]: http://pythonhosted.org/Markdown/extensions/code_hilite.html
[HTML Tidy]: http://pythonhosted.org/Markdown/extensions/html_tidy.html
[HeaderId]: http://pythonhosted.org/Markdown/extensions/header_id.html
[Meta-Data]: http://pythonhosted.org/Markdown/extensions/meta_data.html
[New Line to Break]: http://pythonhosted.org/Markdown/extensions/nl2br.html
[RSS]: http://pythonhosted.org/Markdown/extensions/rss.html
[Sane Lists]: http://pythonhosted.org/Markdown/extensions/sane_lists.html
[Table of Contents]: http://pythonhosted.org/Markdown/extensions/toc.html
[WikiLinks]: http://pythonhosted.org/Markdown/extensions/wikilinks.html
#### 3rd Party Extensions
*Python-Markdown* is designed to be extended.
Just fork this repo and add your extensions inside the `.../Packages/Markdown Preview/markdown/extensions/` folder.
Check out the list of [3rd Party extensions](
https://github.com/waylan/Python-Markdown/wiki/Third-Party-Extensions).
#### Default Extensions
The default extensions are:
* `footnotes` -- [Footnotes]
* `toc` -- [Table of Contents]
* `fenced_code` -- [Fenced Code Blocks]
* `tables` -- [Tables]
Use the `default` keyword, to select them all.
If you want all the defaults plus the `definition_lists` extension,
your settings would look like this:
{
...
parser: 'markdown',
extensions: ['default', 'definition_lists'],
...
}
### *Markdown2*
The [Markdown2 Parser][] also provides support for extensions, known as [Extras][].
You can configure the list of extras you want to use inside the package settings.
[Markdown2 Parser]: https://github.com/trentm/python-markdown2
#### Default Extras
The default extras are:
* `footnotes` -- [Footnotes][Footnotes Extra]
* `toc` -- Table of Contents
* `fenced-code-blocks` -- [Fenced CodeBlocks][]
* `cuddled-lists` -- [Cuddled Lists][]
[Footnotes Extra]: https://github.com/trentm/python-markdown2/wiki/footnotes
[Fenced CodeBlocks]: https://github.com/trentm/python-markdown2/wiki/fenced-code-blocks
[Cuddled Lists]: https://github.com/trentm/python-markdown2/wiki/cuddled-lists
You can enable all default extras at once using the `default` keyword.
If you want all the default extras plus the 'wiki-table' extra,
your settings would look like this:
{
...
parser: 'markdown2',
extensions: ['default', 'wiki-table'],
...
}
#### Other Extras
For a complete list of extras please checkout the [Extras Wiki Page][Extras].
[Extras]: https://github.com/trentm/python-markdown2/wiki/Extras
## Examples
### Tables
The `tables` extension of the *Python-Markdown* parser is activated by default,
but is currently **not** available in *Markdown2*.
The syntax was adopted from the [php markdown project](http://michelf.ca/projects/php-markdown/extra/#table),
and is also used in github flavoured markdown.
| Year | Temperature (low) | Temperature (high) |
| ---- | ----------------- | -------------------|
| 1900 | -10 | 25 |
| 1910 | -15 | 30 |
| 1920 | -10 | 32 |
### Wiki Tables
If you are using *Markdown2* with the `wiki-tables` extra activated you should see a table below:
|| *Year* || *Temperature (low)* || *Temperature (high)* ||
|| 1900 || -10 || 25 ||
|| 1910 || -15 || 30 ||
|| 1920 || -10 || 32 ||
### Definition Lists
This example requires *Python Markdown*'s `def_list` extension.
Apple
: Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.
Orange
: The fruit of an evergreen tree of the genus Citrus.
## About
This plugin and this sample file is proudly brought to you by the [revolunet team][revolunet]
@@ -123,7 +318,7 @@ This plugin and this sample file is proudly brought to you by the [revolunet tea
[ref2]: http://revolunet.com "rich web apps"
[MarkdownREF]: http://daringfireball.net/projects/markdown/basics
[MarkdownPreview]: https://github.com/revolunet/sublimetext-markdown-preview
[ST2]: http://sublimetext.com
[ST]: http://sublimetext.com
[revolunet]: http://revolunet.com
[revolunet-logo]: http://www.revolunet.com/static/parisjs8/img/logo-revolunet-carre.jpg "revolunet logo"
[gfm]: http://github.github.com/github-flavored-markdown/