diff --git a/go.mod b/go.mod
index e1bbd9ac89..02f0c46f22 100644
--- a/go.mod
+++ b/go.mod
@@ -22,7 +22,6 @@ require (
github.com/blevesearch/go-porterstemmer v0.0.0-20141230013033-23a2c8e5cf1f // indirect
github.com/blevesearch/segment v0.0.0-20160105220820-db70c57796cc // indirect
github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26 // indirect
- github.com/chaseadamsio/goorgeous v0.0.0-20170901132237-098da33fde5f
github.com/couchbase/vellum v0.0.0-20190111184608-e91b68ff3efe // indirect
github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
@@ -73,6 +72,7 @@ require (
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae // indirect
github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5
+ github.com/niklasfasching/go-org v0.1.7
github.com/oliamb/cutter v0.2.2
github.com/philhofer/fwd v1.0.0 // indirect
github.com/pkg/errors v0.8.1
@@ -80,12 +80,13 @@ require (
github.com/prometheus/client_golang v1.1.0
github.com/prometheus/procfs v0.0.4 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect
- github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff
+ github.com/russross/blackfriday v2.0.0+incompatible // indirect
+ github.com/russross/blackfriday/v2 v2.0.1
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca // indirect
github.com/satori/go.uuid v1.2.0
github.com/sergi/go-diff v1.0.0
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b // indirect
- github.com/shurcooL/sanitized_anchor_name v0.0.0-20160918041101-1dba4b3954bc // indirect
+ github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd
github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect
github.com/stretchr/testify v1.4.0
@@ -100,7 +101,7 @@ require (
github.com/willf/bitset v0.0.0-20180426185212-8ce1146b8621 // indirect
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad
- golang.org/x/net v0.0.0-20190909003024-a7b16738d86b
+ golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b
golang.org/x/text v0.3.2
diff --git a/go.sum b/go.sum
index 2eeaa79810..7445469d7e 100644
--- a/go.sum
+++ b/go.sum
@@ -86,8 +86,6 @@ github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26/go.mod h1:paBWMc
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA=
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/chaseadamsio/goorgeous v0.0.0-20170901132237-098da33fde5f h1:REH9VH5ubNR0skLaOxK7TRJeRbE2dDfvaouQo8FsRcA=
-github.com/chaseadamsio/goorgeous v0.0.0-20170901132237-098da33fde5f/go.mod h1:6QaC0vFoKWYDth94dHFNgRT2YkT5FHdQp/Yx15aAAi0=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/corbym/gocrest v1.0.3 h1:gwEdq6RkTmq+09CTuM29DfKOCtZ7G7bcyxs3IZ6EVdU=
github.com/corbym/gocrest v1.0.3/go.mod h1:maVFL5lbdS2PgfOQgGRWDYTeunSWQeiEgoNdTABShCs=
@@ -425,6 +423,10 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5 h1:BvoENQQU+fZ9uukda/RzCAL/191HHwJA5b13R6diVlY=
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
+github.com/niklasfasching/go-org v0.1.6 h1:F521WcqRNl8OJumlgAnekZgERaTA2HpfOYYfVEKOeI8=
+github.com/niklasfasching/go-org v0.1.6/go.mod h1:AsLD6X7djzRIz4/RFZu8vwRL0VGjUvGZCCH1Nz0VdrU=
+github.com/niklasfasching/go-org v0.1.7 h1:t3V+3XnS/7BhKv/7SlMUa8FvAiq577/a1T3D7mLIRXE=
+github.com/niklasfasching/go-org v0.1.7/go.mod h1:AsLD6X7djzRIz4/RFZu8vwRL0VGjUvGZCCH1Nz0VdrU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/oliamb/cutter v0.2.2 h1:Lfwkya0HHNU1YLnGv2hTkzHfasrSMkgv4Dn+5rmlk3k=
github.com/oliamb/cutter v0.2.2/go.mod h1:4BenG2/4GuRBDbVm/OPahDVqbrOemzpPiG5mi1iryBU=
@@ -487,8 +489,10 @@ github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qq
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff h1:g9ZlAHmkc/h5So+OjNCkZWh+FjuKEOOOoyRkqlGA8+c=
-github.com/russross/blackfriday v0.0.0-20180428102519-11635eb403ff/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
+github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI=
github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca/go.mod h1:uugorj2VCxiV1x+LzaIdVa9b4S4qGAcH6cbhh4qVxOU=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
@@ -499,6 +503,8 @@ github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b h1:4kg1wyftSKxLtnP
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20160918041101-1dba4b3954bc h1:3wIrJvFb3Pf6B/2mDBnN1G5IfUVev4X5apadQlWOczE=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20160918041101-1dba4b3954bc/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
@@ -650,6 +656,8 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc=
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271 h1:N66aaryRB3Ax92gH0v3hp1QYZ3zWWCCUR/j8Ifh45Ss=
+golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180620175406-ef147856a6dd/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
diff --git a/modules/markup/html_test.go b/modules/markup/html_test.go
index 91ef320b40..07747e97e1 100644
--- a/modules/markup/html_test.go
+++ b/modules/markup/html_test.go
@@ -323,6 +323,6 @@ func TestRender_ShortLinks(t *testing.T) {
`
0 {
- out.WriteByte('\n')
- }
-
- if flags&blackfriday.LIST_TYPE_DEFINITION != 0 {
- out.WriteString("")
- } else if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
- out.WriteString("")
- } else {
- out.WriteString("")
- }
- if !text() {
- out.Truncate(marker)
- return
- }
- if flags&blackfriday.LIST_TYPE_DEFINITION != 0 {
- out.WriteString(" \n")
- } else if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
- out.WriteString("\n")
- } else {
- out.WriteString("\n")
- }
-}
-
-// ListItem defines how list items should be processed to produce corresponding HTML elements.
-func (r *Renderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
- // Detect procedures to draw checkboxes.
- prefix := ""
- if bytes.HasPrefix(text, []byte("")) {
- prefix = "
"
- }
- switch {
- case bytes.HasPrefix(text, []byte(prefix+"[ ] ")):
- text = append([]byte(` `), text[3+len(prefix):]...)
- if prefix != "" {
- text = bytes.Replace(text, []byte(prefix), []byte{}, 1)
+ prefix = strings.Replace(prefix, "/src/", "/media/", 1)
+ link := node.LinkData.Destination
+ if len(link) > 0 && !markup.IsLink(link) {
+ lnk := string(link)
+ lnk = util.URLJoin(prefix, lnk)
+ lnk = strings.Replace(lnk, " ", "+", -1)
+ link = []byte(lnk)
}
- case bytes.HasPrefix(text, []byte(prefix+"[x] ")):
- text = append([]byte(` `), text[3+len(prefix):]...)
- if prefix != "" {
- text = bytes.Replace(text, []byte(prefix), []byte{}, 1)
+ node.LinkData.Destination = link
+ // Render link around image only if parent is not link already
+ if node.Parent != nil && node.Parent.Type != blackfriday.Link {
+ if entering {
+ _, _ = w.Write([]byte(``))
+ return r.Renderer.RenderNode(w, node, entering)
+ }
+ s := r.Renderer.RenderNode(w, node, entering)
+ _, _ = w.Write([]byte(` `))
+ return s
+ }
+ return r.Renderer.RenderNode(w, node, entering)
+ case blackfriday.Link:
+ // special case: this is not a link, a hash link or a mailto:, so it's a
+ // relative URL
+ link := node.LinkData.Destination
+ if len(link) > 0 && !markup.IsLink(link) &&
+ link[0] != '#' && !bytes.HasPrefix(link, byteMailto) &&
+ node.LinkData.Footnote == nil {
+ lnk := string(link)
+ if r.IsWiki {
+ lnk = util.URLJoin("wiki", lnk)
+ }
+ link = []byte(util.URLJoin(r.URLPrefix, lnk))
+ }
+ node.LinkData.Destination = link
+ return r.Renderer.RenderNode(w, node, entering)
+ case blackfriday.Text:
+ isListItem := false
+ for n := node.Parent; n != nil; n = n.Parent {
+ if n.Type == blackfriday.Item {
+ isListItem = true
+ break
+ }
+ }
+ if isListItem {
+ text := node.Literal
+ switch {
+ case bytes.HasPrefix(text, []byte("[ ] ")):
+ _, _ = w.Write([]byte(` `))
+ text = text[3:]
+ case bytes.HasPrefix(text, []byte("[x] ")):
+ _, _ = w.Write([]byte(` `))
+ text = text[3:]
+ }
+ node.Literal = text
}
}
- r.Renderer.ListItem(out, text, flags)
-}
-
-// Image defines how images should be processed to produce corresponding HTML elements.
-func (r *Renderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
- prefix := r.URLPrefix
- if r.IsWiki {
- prefix = util.URLJoin(prefix, "wiki", "raw")
- }
- prefix = strings.Replace(prefix, "/src/", "/media/", 1)
- if len(link) > 0 && !markup.IsLink(link) {
- lnk := string(link)
- lnk = util.URLJoin(prefix, lnk)
- lnk = strings.Replace(lnk, " ", "+", -1)
- link = []byte(lnk)
- }
-
- // Put a link around it pointing to itself by default
- out.WriteString(``)
- r.Renderer.Image(out, link, title, alt)
- out.WriteString(" ")
+ return r.Renderer.RenderNode(w, node, entering)
}
const (
blackfridayExtensions = 0 |
- blackfriday.EXTENSION_NO_INTRA_EMPHASIS |
- blackfriday.EXTENSION_TABLES |
- blackfriday.EXTENSION_FENCED_CODE |
- blackfriday.EXTENSION_STRIKETHROUGH |
- blackfriday.EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK |
- blackfriday.EXTENSION_DEFINITION_LISTS |
- blackfriday.EXTENSION_FOOTNOTES |
- blackfriday.EXTENSION_HEADER_IDS |
- blackfriday.EXTENSION_AUTO_HEADER_IDS
+ blackfriday.NoIntraEmphasis |
+ blackfriday.Tables |
+ blackfriday.FencedCode |
+ blackfriday.Strikethrough |
+ blackfriday.NoEmptyLineBeforeBlock |
+ blackfriday.DefinitionLists |
+ blackfriday.Footnotes |
+ blackfriday.HeadingIDs |
+ blackfriday.AutoHeadingIDs
blackfridayHTMLFlags = 0 |
- blackfriday.HTML_SKIP_STYLE |
- blackfriday.HTML_OMIT_CONTENTS |
- blackfriday.HTML_USE_SMARTYPANTS
+ blackfriday.Smartypants
)
// RenderRaw renders Markdown to HTML without handling special links.
func RenderRaw(body []byte, urlPrefix string, wikiMarkdown bool) []byte {
renderer := &Renderer{
- Renderer: blackfriday.HtmlRenderer(blackfridayHTMLFlags, "", ""),
+ Renderer: blackfriday.NewHTMLRenderer(blackfriday.HTMLRendererParameters{
+ Flags: blackfridayHTMLFlags,
+ }),
URLPrefix: urlPrefix,
IsWiki: wikiMarkdown,
}
exts := blackfridayExtensions
if setting.Markdown.EnableHardLineBreak {
- exts |= blackfriday.EXTENSION_HARD_LINE_BREAK
+ exts |= blackfriday.HardLineBreak
}
- body = blackfriday.Markdown(body, renderer, exts)
+ body = blackfriday.Run(body, blackfriday.WithRenderer(renderer), blackfriday.WithExtensions(exts))
return markup.SanitizeBytes(body)
}
diff --git a/modules/markup/markdown/markdown_test.go b/modules/markup/markdown/markdown_test.go
index 669b49367e..b29f870ce5 100644
--- a/modules/markup/markdown/markdown_test.go
+++ b/modules/markup/markdown/markdown_test.go
@@ -166,13 +166,13 @@ func testAnswers(baseURLContent, baseURLImages string) []string {
Here is a simple footnote,1 and here is a longer one.2
+
-This is the first footnote.
-
+This is the first footnote.
Here is one with multiple paragraphs and code.
@@ -180,9 +180,9 @@ func testAnswers(baseURLContent, baseURLImages string) []string {
{ my code }
-Add as many paragraphs as you like.
-
+Add as many paragraphs as you like.
+
`,
}
diff --git a/modules/markup/mdstripper/mdstripper.go b/modules/markup/mdstripper/mdstripper.go
index 7a901b17a9..d248944b68 100644
--- a/modules/markup/mdstripper/mdstripper.go
+++ b/modules/markup/mdstripper/mdstripper.go
@@ -6,43 +6,39 @@ package mdstripper
import (
"bytes"
+ "io"
- "github.com/russross/blackfriday"
+ "github.com/russross/blackfriday/v2"
)
// MarkdownStripper extends blackfriday.Renderer
type MarkdownStripper struct {
- blackfriday.Renderer
links []string
coallesce bool
+ empty bool
}
const (
blackfridayExtensions = 0 |
- blackfriday.EXTENSION_NO_INTRA_EMPHASIS |
- blackfriday.EXTENSION_TABLES |
- blackfriday.EXTENSION_FENCED_CODE |
- blackfriday.EXTENSION_STRIKETHROUGH |
- blackfriday.EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK |
- blackfriday.EXTENSION_DEFINITION_LISTS |
- blackfriday.EXTENSION_FOOTNOTES |
- blackfriday.EXTENSION_HEADER_IDS |
- blackfriday.EXTENSION_AUTO_HEADER_IDS |
+ blackfriday.NoIntraEmphasis |
+ blackfriday.Tables |
+ blackfriday.FencedCode |
+ blackfriday.Strikethrough |
+ blackfriday.NoEmptyLineBeforeBlock |
+ blackfriday.DefinitionLists |
+ blackfriday.Footnotes |
+ blackfriday.HeadingIDs |
+ blackfriday.AutoHeadingIDs |
// Not included in modules/markup/markdown/markdown.go;
// required here to process inline links
- blackfriday.EXTENSION_AUTOLINK
+ blackfriday.Autolink
)
-//revive:disable:var-naming Implementing the Rendering interface requires breaking some linting rules
-
// StripMarkdown parses markdown content by removing all markup and code blocks
// in order to extract links and other references
func StripMarkdown(rawBytes []byte) (string, []string) {
- stripper := &MarkdownStripper{
- links: make([]string, 0, 10),
- }
- body := blackfriday.Markdown(rawBytes, stripper, blackfridayExtensions)
- return string(body), stripper.GetLinks()
+ buf, links := StripMarkdownBytes(rawBytes)
+ return string(buf), links
}
// StripMarkdownBytes parses markdown content by removing all markup and code blocks
@@ -50,205 +46,67 @@ func StripMarkdown(rawBytes []byte) (string, []string) {
func StripMarkdownBytes(rawBytes []byte) ([]byte, []string) {
stripper := &MarkdownStripper{
links: make([]string, 0, 10),
+ empty: true,
}
- body := blackfriday.Markdown(rawBytes, stripper, blackfridayExtensions)
- return body, stripper.GetLinks()
+
+ parser := blackfriday.New(blackfriday.WithRenderer(stripper), blackfriday.WithExtensions(blackfridayExtensions))
+ ast := parser.Parse(rawBytes)
+ var buf bytes.Buffer
+ stripper.RenderHeader(&buf, ast)
+ ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
+ return stripper.RenderNode(&buf, node, entering)
+ })
+ stripper.RenderFooter(&buf, ast)
+ return buf.Bytes(), stripper.GetLinks()
}
-// block-level callbacks
-
-// BlockCode dummy function to proceed with rendering
-func (r *MarkdownStripper) BlockCode(out *bytes.Buffer, text []byte, infoString string) {
- // Not rendered
+// RenderNode is the main rendering method. It will be called once for
+// every leaf node and twice for every non-leaf node (first with
+// entering=true, then with entering=false). The method should write its
+// rendition of the node to the supplied writer w.
+func (r *MarkdownStripper) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
+ if !entering {
+ return blackfriday.GoToNext
+ }
+ switch node.Type {
+ case blackfriday.Text:
+ r.processString(w, node.Literal, node.Parent == nil)
+ return blackfriday.GoToNext
+ case blackfriday.Link:
+ r.processLink(w, node.LinkData.Destination)
+ r.coallesce = false
+ return blackfriday.SkipChildren
+ }
r.coallesce = false
+ return blackfriday.GoToNext
}
-// BlockQuote dummy function to proceed with rendering
-func (r *MarkdownStripper) BlockQuote(out *bytes.Buffer, text []byte) {
- // FIXME: perhaps it's better to leave out block quote for this?
- r.processString(out, text, false)
+// RenderHeader is a method that allows the renderer to produce some
+// content preceding the main body of the output document.
+func (r *MarkdownStripper) RenderHeader(w io.Writer, ast *blackfriday.Node) {
}
-// BlockHtml dummy function to proceed with rendering
-func (r *MarkdownStripper) BlockHtml(out *bytes.Buffer, text []byte) { //nolint
- // Not rendered
- r.coallesce = false
+// RenderFooter is a symmetric counterpart of RenderHeader.
+func (r *MarkdownStripper) RenderFooter(w io.Writer, ast *blackfriday.Node) {
}
-// Header dummy function to proceed with rendering
-func (r *MarkdownStripper) Header(out *bytes.Buffer, text func() bool, level int, id string) {
- text()
- r.coallesce = false
-}
-
-// HRule dummy function to proceed with rendering
-func (r *MarkdownStripper) HRule(out *bytes.Buffer) {
- // Not rendered
- r.coallesce = false
-}
-
-// List dummy function to proceed with rendering
-func (r *MarkdownStripper) List(out *bytes.Buffer, text func() bool, flags int) {
- text()
- r.coallesce = false
-}
-
-// ListItem dummy function to proceed with rendering
-func (r *MarkdownStripper) ListItem(out *bytes.Buffer, text []byte, flags int) {
- r.processString(out, text, false)
-}
-
-// Paragraph dummy function to proceed with rendering
-func (r *MarkdownStripper) Paragraph(out *bytes.Buffer, text func() bool) {
- text()
- r.coallesce = false
-}
-
-// Table dummy function to proceed with rendering
-func (r *MarkdownStripper) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
- r.processString(out, header, false)
- r.processString(out, body, false)
-}
-
-// TableRow dummy function to proceed with rendering
-func (r *MarkdownStripper) TableRow(out *bytes.Buffer, text []byte) {
- r.processString(out, text, false)
-}
-
-// TableHeaderCell dummy function to proceed with rendering
-func (r *MarkdownStripper) TableHeaderCell(out *bytes.Buffer, text []byte, flags int) {
- r.processString(out, text, false)
-}
-
-// TableCell dummy function to proceed with rendering
-func (r *MarkdownStripper) TableCell(out *bytes.Buffer, text []byte, flags int) {
- r.processString(out, text, false)
-}
-
-// Footnotes dummy function to proceed with rendering
-func (r *MarkdownStripper) Footnotes(out *bytes.Buffer, text func() bool) {
- text()
-}
-
-// FootnoteItem dummy function to proceed with rendering
-func (r *MarkdownStripper) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
- r.processString(out, text, false)
-}
-
-// TitleBlock dummy function to proceed with rendering
-func (r *MarkdownStripper) TitleBlock(out *bytes.Buffer, text []byte) {
- r.processString(out, text, false)
-}
-
-// Span-level callbacks
-
-// AutoLink dummy function to proceed with rendering
-func (r *MarkdownStripper) AutoLink(out *bytes.Buffer, link []byte, kind int) {
- r.processLink(out, link, []byte{})
-}
-
-// CodeSpan dummy function to proceed with rendering
-func (r *MarkdownStripper) CodeSpan(out *bytes.Buffer, text []byte) {
- // Not rendered
- r.coallesce = false
-}
-
-// DoubleEmphasis dummy function to proceed with rendering
-func (r *MarkdownStripper) DoubleEmphasis(out *bytes.Buffer, text []byte) {
- r.processString(out, text, false)
-}
-
-// Emphasis dummy function to proceed with rendering
-func (r *MarkdownStripper) Emphasis(out *bytes.Buffer, text []byte) {
- r.processString(out, text, false)
-}
-
-// Image dummy function to proceed with rendering
-func (r *MarkdownStripper) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
- // Not rendered
- r.coallesce = false
-}
-
-// LineBreak dummy function to proceed with rendering
-func (r *MarkdownStripper) LineBreak(out *bytes.Buffer) {
- // Not rendered
- r.coallesce = false
-}
-
-// Link dummy function to proceed with rendering
-func (r *MarkdownStripper) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
- r.processLink(out, link, content)
-}
-
-// RawHtmlTag dummy function to proceed with rendering
-func (r *MarkdownStripper) RawHtmlTag(out *bytes.Buffer, tag []byte) { //nolint
- // Not rendered
- r.coallesce = false
-}
-
-// TripleEmphasis dummy function to proceed with rendering
-func (r *MarkdownStripper) TripleEmphasis(out *bytes.Buffer, text []byte) {
- r.processString(out, text, false)
-}
-
-// StrikeThrough dummy function to proceed with rendering
-func (r *MarkdownStripper) StrikeThrough(out *bytes.Buffer, text []byte) {
- r.processString(out, text, false)
-}
-
-// FootnoteRef dummy function to proceed with rendering
-func (r *MarkdownStripper) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
- // Not rendered
- r.coallesce = false
-}
-
-// Low-level callbacks
-
-// Entity dummy function to proceed with rendering
-func (r *MarkdownStripper) Entity(out *bytes.Buffer, entity []byte) {
- // FIXME: literal entities are not parsed; perhaps they should
- r.coallesce = false
-}
-
-// NormalText dummy function to proceed with rendering
-func (r *MarkdownStripper) NormalText(out *bytes.Buffer, text []byte) {
- r.processString(out, text, true)
-}
-
-// Header and footer
-
-// DocumentHeader dummy function to proceed with rendering
-func (r *MarkdownStripper) DocumentHeader(out *bytes.Buffer) {
- r.coallesce = false
-}
-
-// DocumentFooter dummy function to proceed with rendering
-func (r *MarkdownStripper) DocumentFooter(out *bytes.Buffer) {
- r.coallesce = false
-}
-
-// GetFlags returns rendering flags
-func (r *MarkdownStripper) GetFlags() int {
- return 0
-}
-
-//revive:enable:var-naming
-
-func doubleSpace(out *bytes.Buffer) {
- if out.Len() > 0 {
- out.WriteByte('\n')
+func (r *MarkdownStripper) doubleSpace(w io.Writer) {
+ if !r.empty {
+ _, _ = w.Write([]byte{'\n'})
}
}
-func (r *MarkdownStripper) processString(out *bytes.Buffer, text []byte, coallesce bool) {
+func (r *MarkdownStripper) processString(w io.Writer, text []byte, coallesce bool) {
// Always break-up words
if !coallesce || !r.coallesce {
- doubleSpace(out)
+ r.doubleSpace(w)
}
- out.Write(text)
+ _, _ = w.Write(text)
r.coallesce = coallesce
+ r.empty = false
}
-func (r *MarkdownStripper) processLink(out *bytes.Buffer, link []byte, content []byte) {
+
+func (r *MarkdownStripper) processLink(w io.Writer, link []byte) {
// Links are processed out of band
r.links = append(r.links, string(link))
r.coallesce = false
diff --git a/modules/markup/orgmode/orgmode.go b/modules/markup/orgmode/orgmode.go
index f63155201e..54188d2734 100644
--- a/modules/markup/orgmode/orgmode.go
+++ b/modules/markup/orgmode/orgmode.go
@@ -5,12 +5,16 @@
package markup
import (
+ "bytes"
+ "fmt"
+ "html"
+ "strings"
+
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markup"
- "code.gitea.io/gitea/modules/markup/markdown"
+ "code.gitea.io/gitea/modules/util"
- "github.com/chaseadamsio/goorgeous"
- "github.com/russross/blackfriday"
+ "github.com/niklasfasching/go-org/org"
)
func init() {
@@ -32,23 +36,23 @@ func (Parser) Extensions() []string {
}
// Render renders orgmode rawbytes to HTML
-func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) (result []byte) {
- defer func() {
- if err := recover(); err != nil {
- log.Error("Panic in orgmode.Render: %v Just returning the rawBytes", err)
- result = rawBytes
- }
- }()
- htmlFlags := blackfriday.HTML_USE_XHTML
- htmlFlags |= blackfriday.HTML_SKIP_STYLE
- htmlFlags |= blackfriday.HTML_OMIT_CONTENTS
- renderer := &markdown.Renderer{
- Renderer: blackfriday.HtmlRenderer(htmlFlags, "", ""),
- URLPrefix: urlPrefix,
- IsWiki: isWiki,
+func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte {
+ htmlWriter := org.NewHTMLWriter()
+
+ renderer := &Renderer{
+ HTMLWriter: htmlWriter,
+ URLPrefix: urlPrefix,
+ IsWiki: isWiki,
}
- result = goorgeous.Org(rawBytes, renderer)
- return
+
+ htmlWriter.ExtendingWriter = renderer
+
+ res, err := org.New().Silent().Parse(bytes.NewReader(rawBytes), "").Write(renderer)
+ if err != nil {
+ log.Error("Panic in orgmode.Render: %v Just returning the rawBytes", err)
+ return rawBytes
+ }
+ return []byte(res)
}
// RenderString reners orgmode string to HTML string
@@ -56,7 +60,63 @@ func RenderString(rawContent string, urlPrefix string, metas map[string]string,
return string(Render([]byte(rawContent), urlPrefix, metas, isWiki))
}
-// Render implements markup.Parser
+// Render reners orgmode string to HTML string
func (Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte {
return Render(rawBytes, urlPrefix, metas, isWiki)
}
+
+// Renderer implements org.Writer
+type Renderer struct {
+ *org.HTMLWriter
+ URLPrefix string
+ IsWiki bool
+}
+
+var byteMailto = []byte("mailto:")
+
+// WriteRegularLink renders images, links or videos
+func (r *Renderer) WriteRegularLink(l org.RegularLink) {
+ link := []byte(html.EscapeString(l.URL))
+ if l.Protocol == "file" {
+ link = link[len("file:"):]
+ }
+ if len(link) > 0 && !markup.IsLink(link) &&
+ link[0] != '#' && !bytes.HasPrefix(link, byteMailto) {
+ lnk := string(link)
+ if r.IsWiki {
+ lnk = util.URLJoin("wiki", lnk)
+ }
+ link = []byte(util.URLJoin(r.URLPrefix, lnk))
+ }
+
+ description := string(link)
+ if l.Description != nil {
+ description = r.nodesAsString(l.Description...)
+ }
+ switch l.Kind() {
+ case "image":
+ r.WriteString(fmt.Sprintf(` `, link, description, description))
+ case "video":
+ r.WriteString(fmt.Sprintf(`%s `, link, description, description))
+ default:
+ r.WriteString(fmt.Sprintf(` \n"))
- p.inline(&tmpBuf, data)
- tmpBuf.WriteByte('\n')
- tmpBuf.Write([]byte("
\n"))
- tmpBlock.Write(tmpBuf.Bytes())
-
- } else {
- tmpBlock.WriteByte('\n')
- tmpBlock.Write(data)
- }
-
- } else {
- marker = string(matches[2])
- syntax = string(matches[3])
- }
- case isFootnoteDef(data):
- matches := reFootnoteDef.FindSubmatch(data)
- for i := range p.notes {
- if p.notes[i].id == string(matches[1]) {
- p.notes[i].def = string(matches[2])
- }
- }
- case isTable(data):
- if inTable != true {
- inTable = true
- }
- tmpBlock.Write(data)
- tmpBlock.WriteByte('\n')
- case IsKeyword(data):
- continue
- case isComment(data):
- p.generateComment(&output, data)
- case isHeadline(data):
- p.generateHeadline(&output, data)
- case isDefinitionList(data):
- if inList != true {
- listType = "dl"
- inList = true
- }
- var work bytes.Buffer
- flags := blackfriday.LIST_TYPE_DEFINITION
- matches := reDefinitionList.FindSubmatch(data)
- flags |= blackfriday.LIST_TYPE_TERM
- p.inline(&work, matches[1])
- p.r.ListItem(&tmpBlock, work.Bytes(), flags)
- work.Reset()
- flags &= ^blackfriday.LIST_TYPE_TERM
- p.inline(&work, matches[2])
- p.r.ListItem(&tmpBlock, work.Bytes(), flags)
- case isUnorderedList(data):
- if inList != true {
- listType = "ul"
- inList = true
- }
- matches := reUnorderedList.FindSubmatch(data)
- var work bytes.Buffer
- p.inline(&work, matches[2])
- p.r.ListItem(&tmpBlock, work.Bytes(), 0)
- case isOrderedList(data):
- if inList != true {
- listType = "ol"
- inList = true
- }
- matches := reOrderedList.FindSubmatch(data)
- var work bytes.Buffer
- tmpBlock.WriteString("
+Copyright (c) 2018 Niklas Fasching
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/niklasfasching/go-org/org/block.go b/vendor/github.com/niklasfasching/go-org/org/block.go
new file mode 100644
index 0000000000..0e7a526243
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/block.go
@@ -0,0 +1,84 @@
+package org
+
+import (
+ "regexp"
+ "strings"
+ "unicode"
+)
+
+type Block struct {
+ Name string
+ Parameters []string
+ Children []Node
+}
+
+type Example struct {
+ Children []Node
+}
+
+var exampleLineRegexp = regexp.MustCompile(`^(\s*):(\s(.*)|\s*$)`)
+var beginBlockRegexp = regexp.MustCompile(`(?i)^(\s*)#\+BEGIN_(\w+)(.*)`)
+var endBlockRegexp = regexp.MustCompile(`(?i)^(\s*)#\+END_(\w+)`)
+
+func lexBlock(line string) (token, bool) {
+ if m := beginBlockRegexp.FindStringSubmatch(line); m != nil {
+ return token{"beginBlock", len(m[1]), strings.ToUpper(m[2]), m}, true
+ } else if m := endBlockRegexp.FindStringSubmatch(line); m != nil {
+ return token{"endBlock", len(m[1]), strings.ToUpper(m[2]), m}, true
+ }
+ return nilToken, false
+}
+
+func lexExample(line string) (token, bool) {
+ if m := exampleLineRegexp.FindStringSubmatch(line); m != nil {
+ return token{"example", len(m[1]), m[3], m}, true
+ }
+ return nilToken, false
+}
+
+func isRawTextBlock(name string) bool { return name == "SRC" || name == "EXAMPLE" || name == "EXPORT" }
+
+func (d *Document) parseBlock(i int, parentStop stopFn) (int, Node) {
+ t, start := d.tokens[i], i
+ name, parameters := t.content, strings.Fields(t.matches[3])
+ trim := trimIndentUpTo(d.tokens[i].lvl)
+ stop := func(d *Document, i int) bool {
+ return i >= len(d.tokens) || (d.tokens[i].kind == "endBlock" && d.tokens[i].content == name)
+ }
+ block, i := Block{name, parameters, nil}, i+1
+ if isRawTextBlock(name) {
+ rawText := ""
+ for ; !stop(d, i); i++ {
+ rawText += trim(d.tokens[i].matches[0]) + "\n"
+ }
+ block.Children = d.parseRawInline(rawText)
+ } else {
+ consumed, nodes := d.parseMany(i, stop)
+ block.Children = nodes
+ i += consumed
+ }
+ if i < len(d.tokens) && d.tokens[i].kind == "endBlock" && d.tokens[i].content == name {
+ return i + 1 - start, block
+ }
+ return 0, nil
+}
+
+func (d *Document) parseExample(i int, parentStop stopFn) (int, Node) {
+ example, start := Example{}, i
+ for ; !parentStop(d, i) && d.tokens[i].kind == "example"; i++ {
+ example.Children = append(example.Children, Text{d.tokens[i].content, true})
+ }
+ return i - start, example
+}
+
+func trimIndentUpTo(max int) func(string) string {
+ return func(line string) string {
+ i := 0
+ for ; i < len(line) && i < max && unicode.IsSpace(rune(line[i])); i++ {
+ }
+ return line[i:]
+ }
+}
+
+func (n Example) String() string { return orgWriter.nodesAsString(n) }
+func (n Block) String() string { return orgWriter.nodesAsString(n) }
diff --git a/vendor/github.com/niklasfasching/go-org/org/document.go b/vendor/github.com/niklasfasching/go-org/org/document.go
new file mode 100644
index 0000000000..e43eb626db
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/document.go
@@ -0,0 +1,260 @@
+// Package org is an Org mode syntax processor.
+//
+// It parses plain text into an AST and can export it as HTML or pretty printed Org mode syntax.
+// Further export formats can be defined using the Writer interface.
+//
+// You probably want to start with something like this:
+// input := strings.NewReader("Your Org mode input")
+// html, err := org.New().Parse(input, "./").Write(org.NewHTMLWriter())
+// if err != nil {
+// log.Fatalf("Something went wrong: %s", err)
+// }
+// log.Print(html)
+package org
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+)
+
+type Configuration struct {
+ MaxEmphasisNewLines int // Maximum number of newlines inside an emphasis. See org-emphasis-regexp-components newline.
+ AutoLink bool // Try to convert text passages that look like hyperlinks into hyperlinks.
+ DefaultSettings map[string]string // Default values for settings that are overriden by setting the same key in BufferSettings.
+ Log *log.Logger // Log is used to print warnings during parsing.
+ ReadFile func(filename string) ([]byte, error) // ReadFile is used to read e.g. #+INCLUDE files.
+}
+
+// Document contains the parsing results and a pointer to the Configuration.
+type Document struct {
+ *Configuration
+ Path string // Path of the file containing the parse input - used to resolve relative paths during parsing (e.g. INCLUDE).
+ tokens []token
+ Nodes []Node
+ NamedNodes map[string]Node
+ Outline Outline // Outline is a Table Of Contents for the document and contains all sections (headline + content).
+ BufferSettings map[string]string // Settings contains all settings that were parsed from keywords.
+ Error error
+}
+
+// Node represents a parsed node of the document.
+type Node interface {
+ String() string // String returns the pretty printed Org mode string for the node (see OrgWriter).
+}
+
+type lexFn = func(line string) (t token, ok bool)
+type parseFn = func(*Document, int, stopFn) (int, Node)
+type stopFn = func(*Document, int) bool
+
+type token struct {
+ kind string
+ lvl int
+ content string
+ matches []string
+}
+
+var lexFns = []lexFn{
+ lexHeadline,
+ lexDrawer,
+ lexBlock,
+ lexList,
+ lexTable,
+ lexHorizontalRule,
+ lexKeywordOrComment,
+ lexFootnoteDefinition,
+ lexExample,
+ lexText,
+}
+
+var nilToken = token{"nil", -1, "", nil}
+var orgWriter = NewOrgWriter()
+
+// New returns a new Configuration with (hopefully) sane defaults.
+func New() *Configuration {
+ return &Configuration{
+ AutoLink: true,
+ MaxEmphasisNewLines: 1,
+ DefaultSettings: map[string]string{
+ "TODO": "TODO | DONE",
+ "EXCLUDE_TAGS": "noexport",
+ "OPTIONS": "toc:t <:t e:t f:t pri:t todo:t tags:t",
+ },
+ Log: log.New(os.Stderr, "go-org: ", 0),
+ ReadFile: ioutil.ReadFile,
+ }
+}
+
+// String returns the pretty printed Org mode string for the given nodes (see OrgWriter).
+func String(nodes []Node) string { return orgWriter.nodesAsString(nodes...) }
+
+// Write is called after with an instance of the Writer interface to export a parsed Document into another format.
+func (d *Document) Write(w Writer) (out string, err error) {
+ defer func() {
+ if recovered := recover(); recovered != nil {
+ err = fmt.Errorf("could not write output: %s", recovered)
+ }
+ }()
+ if d.Error != nil {
+ return "", d.Error
+ } else if d.Nodes == nil {
+ return "", fmt.Errorf("could not write output: parse was not called")
+ }
+ w.Before(d)
+ WriteNodes(w, d.Nodes...)
+ w.After(d)
+ return w.String(), err
+}
+
+// Parse parses the input into an AST (and some other helpful fields like Outline).
+// To allow method chaining, errors are stored in document.Error rather than being returned.
+func (c *Configuration) Parse(input io.Reader, path string) (d *Document) {
+ outlineSection := &Section{}
+ d = &Document{
+ Configuration: c,
+ Outline: Outline{outlineSection, outlineSection, 0},
+ BufferSettings: map[string]string{},
+ NamedNodes: map[string]Node{},
+ Path: path,
+ }
+ defer func() {
+ if recovered := recover(); recovered != nil {
+ d.Error = fmt.Errorf("could not parse input: %v", recovered)
+ }
+ }()
+ if d.tokens != nil {
+ d.Error = fmt.Errorf("parse was called multiple times")
+ }
+ d.tokenize(input)
+ _, nodes := d.parseMany(0, func(d *Document, i int) bool { return i >= len(d.tokens) })
+ d.Nodes = nodes
+ return d
+}
+
+// Silent disables all logging of warnings during parsing.
+func (c *Configuration) Silent() *Configuration {
+ c.Log = log.New(ioutil.Discard, "", 0)
+ return c
+}
+
+func (d *Document) tokenize(input io.Reader) {
+ d.tokens = []token{}
+ scanner := bufio.NewScanner(input)
+ for scanner.Scan() {
+ d.tokens = append(d.tokens, tokenize(scanner.Text()))
+ }
+ if err := scanner.Err(); err != nil {
+ d.Error = fmt.Errorf("could not tokenize input: %s", err)
+ }
+}
+
+// Get returns the value for key in BufferSettings or DefaultSettings if key does not exist in the former
+func (d *Document) Get(key string) string {
+ if v, ok := d.BufferSettings[key]; ok {
+ return v
+ }
+ if v, ok := d.DefaultSettings[key]; ok {
+ return v
+ }
+ return ""
+}
+
+// GetOption returns the value associated to the export option key
+// Currently supported options:
+// - < (export timestamps)
+// - e (export org entities)
+// - f (export footnotes)
+// - toc (export table of content)
+// - todo (export headline todo status)
+// - pri (export headline priority)
+// - tags (export headline tags)
+// see https://orgmode.org/manual/Export-settings.html for more information
+func (d *Document) GetOption(key string) bool {
+ get := func(settings map[string]string) string {
+ for _, field := range strings.Fields(settings["OPTIONS"]) {
+ if strings.HasPrefix(field, key+":") {
+ return field[len(key)+1:]
+ }
+ }
+ return ""
+ }
+ value := get(d.BufferSettings)
+ if value == "" {
+ value = get(d.DefaultSettings)
+ }
+ switch value {
+ case "t":
+ return true
+ case "nil":
+ return false
+ default:
+ d.Log.Printf("Bad value for export option %s (%s)", key, value)
+ return false
+ }
+}
+
+func (d *Document) parseOne(i int, stop stopFn) (consumed int, node Node) {
+ switch d.tokens[i].kind {
+ case "unorderedList", "orderedList":
+ consumed, node = d.parseList(i, stop)
+ case "tableRow", "tableSeparator":
+ consumed, node = d.parseTable(i, stop)
+ case "beginBlock":
+ consumed, node = d.parseBlock(i, stop)
+ case "beginDrawer":
+ consumed, node = d.parseDrawer(i, stop)
+ case "text":
+ consumed, node = d.parseParagraph(i, stop)
+ case "example":
+ consumed, node = d.parseExample(i, stop)
+ case "horizontalRule":
+ consumed, node = d.parseHorizontalRule(i, stop)
+ case "comment":
+ consumed, node = d.parseComment(i, stop)
+ case "keyword":
+ consumed, node = d.parseKeyword(i, stop)
+ case "headline":
+ consumed, node = d.parseHeadline(i, stop)
+ case "footnoteDefinition":
+ consumed, node = d.parseFootnoteDefinition(i, stop)
+ }
+
+ if consumed != 0 {
+ return consumed, node
+ }
+ d.Log.Printf("Could not parse token %#v: Falling back to treating it as plain text.", d.tokens[i])
+ m := plainTextRegexp.FindStringSubmatch(d.tokens[i].matches[0])
+ d.tokens[i] = token{"text", len(m[1]), m[2], m}
+ return d.parseOne(i, stop)
+}
+
+func (d *Document) parseMany(i int, stop stopFn) (int, []Node) {
+ start, nodes := i, []Node{}
+ for i < len(d.tokens) && !stop(d, i) {
+ consumed, node := d.parseOne(i, stop)
+ i += consumed
+ nodes = append(nodes, node)
+ }
+ return i - start, nodes
+}
+
+func (d *Document) addHeadline(headline *Headline) int {
+ current := &Section{Headline: headline}
+ d.Outline.last.add(current)
+ d.Outline.count++
+ d.Outline.last = current
+ return d.Outline.count
+}
+
+func tokenize(line string) token {
+ for _, lexFn := range lexFns {
+ if token, ok := lexFn(line); ok {
+ return token
+ }
+ }
+ panic(fmt.Sprintf("could not lex line: %s", line))
+}
diff --git a/vendor/github.com/niklasfasching/go-org/org/drawer.go b/vendor/github.com/niklasfasching/go-org/org/drawer.go
new file mode 100644
index 0000000000..8bb9974380
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/drawer.go
@@ -0,0 +1,97 @@
+package org
+
+import (
+ "regexp"
+ "strings"
+)
+
+type Drawer struct {
+ Name string
+ Children []Node
+}
+
+type PropertyDrawer struct {
+ Properties [][]string
+}
+
+var beginDrawerRegexp = regexp.MustCompile(`^(\s*):(\S+):\s*$`)
+var endDrawerRegexp = regexp.MustCompile(`^(\s*):END:\s*$`)
+var propertyRegexp = regexp.MustCompile(`^(\s*):(\S+):(\s+(.*)$|$)`)
+
+func lexDrawer(line string) (token, bool) {
+ if m := endDrawerRegexp.FindStringSubmatch(line); m != nil {
+ return token{"endDrawer", len(m[1]), "", m}, true
+ } else if m := beginDrawerRegexp.FindStringSubmatch(line); m != nil {
+ return token{"beginDrawer", len(m[1]), strings.ToUpper(m[2]), m}, true
+ }
+ return nilToken, false
+}
+
+func (d *Document) parseDrawer(i int, parentStop stopFn) (int, Node) {
+ name := strings.ToUpper(d.tokens[i].content)
+ if name == "PROPERTIES" {
+ return d.parsePropertyDrawer(i, parentStop)
+ }
+ drawer, start := Drawer{Name: name}, i
+ i++
+ stop := func(d *Document, i int) bool {
+ if parentStop(d, i) {
+ return true
+ }
+ kind := d.tokens[i].kind
+ return kind == "beginDrawer" || kind == "endDrawer" || kind == "headline"
+ }
+ for {
+ consumed, nodes := d.parseMany(i, stop)
+ i += consumed
+ drawer.Children = append(drawer.Children, nodes...)
+ if i < len(d.tokens) && d.tokens[i].kind == "beginDrawer" {
+ p := Paragraph{[]Node{Text{":" + d.tokens[i].content + ":", false}}}
+ drawer.Children = append(drawer.Children, p)
+ i++
+ } else {
+ break
+ }
+ }
+ if i < len(d.tokens) && d.tokens[i].kind == "endDrawer" {
+ i++
+ }
+ return i - start, drawer
+}
+
+func (d *Document) parsePropertyDrawer(i int, parentStop stopFn) (int, Node) {
+ drawer, start := PropertyDrawer{}, i
+ i++
+ stop := func(d *Document, i int) bool {
+ return parentStop(d, i) || (d.tokens[i].kind != "text" && d.tokens[i].kind != "beginDrawer")
+ }
+ for ; !stop(d, i); i++ {
+ m := propertyRegexp.FindStringSubmatch(d.tokens[i].matches[0])
+ if m == nil {
+ return 0, nil
+ }
+ k, v := strings.ToUpper(m[2]), strings.TrimSpace(m[4])
+ drawer.Properties = append(drawer.Properties, []string{k, v})
+ }
+ if i < len(d.tokens) && d.tokens[i].kind == "endDrawer" {
+ i++
+ } else {
+ return 0, nil
+ }
+ return i - start, drawer
+}
+
+func (d *PropertyDrawer) Get(key string) (string, bool) {
+ if d == nil {
+ return "", false
+ }
+ for _, kvPair := range d.Properties {
+ if kvPair[0] == key {
+ return kvPair[1], true
+ }
+ }
+ return "", false
+}
+
+func (n Drawer) String() string { return orgWriter.nodesAsString(n) }
+func (n PropertyDrawer) String() string { return orgWriter.nodesAsString(n) }
diff --git a/vendor/github.com/niklasfasching/go-org/org/footnote.go b/vendor/github.com/niklasfasching/go-org/org/footnote.go
new file mode 100644
index 0000000000..660e244386
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/footnote.go
@@ -0,0 +1,35 @@
+package org
+
+import (
+ "regexp"
+)
+
+type FootnoteDefinition struct {
+ Name string
+ Children []Node
+ Inline bool
+}
+
+var footnoteDefinitionRegexp = regexp.MustCompile(`^\[fn:([\w-]+)\](\s+(.+)|\s*$)`)
+
+func lexFootnoteDefinition(line string) (token, bool) {
+ if m := footnoteDefinitionRegexp.FindStringSubmatch(line); m != nil {
+ return token{"footnoteDefinition", 0, m[1], m}, true
+ }
+ return nilToken, false
+}
+
+func (d *Document) parseFootnoteDefinition(i int, parentStop stopFn) (int, Node) {
+ start, name := i, d.tokens[i].content
+ d.tokens[i] = tokenize(d.tokens[i].matches[2])
+ stop := func(d *Document, i int) bool {
+ return parentStop(d, i) ||
+ (isSecondBlankLine(d, i) && i > start+1) ||
+ d.tokens[i].kind == "headline" || d.tokens[i].kind == "footnoteDefinition"
+ }
+ consumed, nodes := d.parseMany(i, stop)
+ definition := FootnoteDefinition{name, nodes, false}
+ return consumed, definition
+}
+
+func (n FootnoteDefinition) String() string { return orgWriter.nodesAsString(n) }
diff --git a/vendor/github.com/niklasfasching/go-org/org/fuzz.go b/vendor/github.com/niklasfasching/go-org/org/fuzz.go
new file mode 100644
index 0000000000..1e72b5ad92
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/fuzz.go
@@ -0,0 +1,27 @@
+// +build gofuzz
+
+package org
+
+import (
+ "bytes"
+ "strings"
+)
+
+// Fuzz function to be used by https://github.com/dvyukov/go-fuzz
+func Fuzz(input []byte) int {
+ conf := New().Silent()
+ d := conf.Parse(bytes.NewReader(input), "")
+ orgOutput, err := d.Write(NewOrgWriter())
+ if err != nil {
+ panic(err)
+ }
+ htmlOutputA, err := d.Write(NewHTMLWriter())
+ if err != nil {
+ panic(err)
+ }
+ htmlOutputB, err := conf.Parse(strings.NewReader(orgOutput), "").Write(NewHTMLWriter())
+ if htmlOutputA != htmlOutputB {
+ panic("rendered org results in different html than original input")
+ }
+ return 0
+}
diff --git a/vendor/github.com/niklasfasching/go-org/org/headline.go b/vendor/github.com/niklasfasching/go-org/org/headline.go
new file mode 100644
index 0000000000..23b986fbc8
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/headline.go
@@ -0,0 +1,101 @@
+package org
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode"
+)
+
+type Outline struct {
+ *Section
+ last *Section
+ count int
+}
+
+type Section struct {
+ Headline *Headline
+ Parent *Section
+ Children []*Section
+}
+
+type Headline struct {
+ Index int
+ Lvl int
+ Status string
+ Priority string
+ Properties *PropertyDrawer
+ Title []Node
+ Tags []string
+ Children []Node
+}
+
+var headlineRegexp = regexp.MustCompile(`^([*]+)\s+(.*)`)
+var tagRegexp = regexp.MustCompile(`(.*?)\s+(:[A-Za-z0-9_@#%:]+:\s*$)`)
+
+func lexHeadline(line string) (token, bool) {
+ if m := headlineRegexp.FindStringSubmatch(line); m != nil {
+ return token{"headline", len(m[1]), m[2], m}, true
+ }
+ return nilToken, false
+}
+
+func (d *Document) parseHeadline(i int, parentStop stopFn) (int, Node) {
+ t, headline := d.tokens[i], Headline{}
+ headline.Lvl = t.lvl
+
+ headline.Index = d.addHeadline(&headline)
+
+ text := t.content
+ todoKeywords := strings.FieldsFunc(d.Get("TODO"), func(r rune) bool { return unicode.IsSpace(r) || r == '|' })
+ for _, k := range todoKeywords {
+ if strings.HasPrefix(text, k) && len(text) > len(k) && unicode.IsSpace(rune(text[len(k)])) {
+ headline.Status = k
+ text = text[len(k)+1:]
+ break
+ }
+ }
+
+ if len(text) >= 4 && text[0:2] == "[#" && strings.Contains("ABC", text[2:3]) && text[3] == ']' {
+ headline.Priority = text[2:3]
+ text = strings.TrimSpace(text[4:])
+ }
+
+ if m := tagRegexp.FindStringSubmatch(text); m != nil {
+ text = m[1]
+ headline.Tags = strings.FieldsFunc(m[2], func(r rune) bool { return r == ':' })
+ }
+
+ headline.Title = d.parseInline(text)
+
+ stop := func(d *Document, i int) bool {
+ return parentStop(d, i) || d.tokens[i].kind == "headline" && d.tokens[i].lvl <= headline.Lvl
+ }
+ consumed, nodes := d.parseMany(i+1, stop)
+ if len(nodes) > 0 {
+ if d, ok := nodes[0].(PropertyDrawer); ok {
+ headline.Properties = &d
+ nodes = nodes[1:]
+ }
+ }
+ headline.Children = nodes
+ return consumed + 1, headline
+}
+
+func (h Headline) ID() string {
+ if customID, ok := h.Properties.Get("CUSTOM_ID"); ok {
+ return customID
+ }
+ return fmt.Sprintf("headline-%d", h.Index)
+}
+
+func (parent *Section) add(current *Section) {
+ if parent.Headline == nil || parent.Headline.Lvl < current.Headline.Lvl {
+ parent.Children = append(parent.Children, current)
+ current.Parent = parent
+ } else {
+ parent.Parent.add(current)
+ }
+}
+
+func (n Headline) String() string { return orgWriter.nodesAsString(n) }
diff --git a/vendor/github.com/niklasfasching/go-org/org/html_entity.go b/vendor/github.com/niklasfasching/go-org/org/html_entity.go
new file mode 100644
index 0000000000..484059b28d
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/html_entity.go
@@ -0,0 +1,437 @@
+package org
+
+import "strings"
+
+var htmlEntityReplacer *strings.Replacer
+
+func init() {
+ htmlEntities = append(htmlEntities,
+ "---", "—",
+ "--", "–",
+ "...", "…",
+ )
+ htmlEntityReplacer = strings.NewReplacer(htmlEntities...)
+}
+
+/*
+Generated & copied over using the following elisp
+(Setting up go generate seems like a waste for now - I call YAGNI on that one)
+
+(insert (mapconcat
+ (lambda (entity) (concat "`\\" (car entity) "`, `" (nth 6 entity) "`")) ; entity -> utf8
+ (remove-if-not 'listp org-entities)
+ ",\n"))
+*/
+var htmlEntities = []string{
+ `\Agrave`, `À`,
+ `\agrave`, `à`,
+ `\Aacute`, `Á`,
+ `\aacute`, `á`,
+ `\Acirc`, `Â`,
+ `\acirc`, `â`,
+ `\Amacr`, `Ã`,
+ `\amacr`, `ã`,
+ `\Atilde`, `Ã`,
+ `\atilde`, `ã`,
+ `\Auml`, `Ä`,
+ `\auml`, `ä`,
+ `\Aring`, `Å`,
+ `\AA`, `Å`,
+ `\aring`, `å`,
+ `\AElig`, `Æ`,
+ `\aelig`, `æ`,
+ `\Ccedil`, `Ç`,
+ `\ccedil`, `ç`,
+ `\Egrave`, `È`,
+ `\egrave`, `è`,
+ `\Eacute`, `É`,
+ `\eacute`, `é`,
+ `\Ecirc`, `Ê`,
+ `\ecirc`, `ê`,
+ `\Euml`, `Ë`,
+ `\euml`, `ë`,
+ `\Igrave`, `Ì`,
+ `\igrave`, `ì`,
+ `\Iacute`, `Í`,
+ `\iacute`, `í`,
+ `\Icirc`, `Î`,
+ `\icirc`, `î`,
+ `\Iuml`, `Ï`,
+ `\iuml`, `ï`,
+ `\Ntilde`, `Ñ`,
+ `\ntilde`, `ñ`,
+ `\Ograve`, `Ò`,
+ `\ograve`, `ò`,
+ `\Oacute`, `Ó`,
+ `\oacute`, `ó`,
+ `\Ocirc`, `Ô`,
+ `\ocirc`, `ô`,
+ `\Otilde`, `Õ`,
+ `\otilde`, `õ`,
+ `\Ouml`, `Ö`,
+ `\ouml`, `ö`,
+ `\Oslash`, `Ø`,
+ `\oslash`, `ø`,
+ `\OElig`, `Œ`,
+ `\oelig`, `œ`,
+ `\Scaron`, `Š`,
+ `\scaron`, `š`,
+ `\szlig`, `ß`,
+ `\Ugrave`, `Ù`,
+ `\ugrave`, `ù`,
+ `\Uacute`, `Ú`,
+ `\uacute`, `ú`,
+ `\Ucirc`, `Û`,
+ `\ucirc`, `û`,
+ `\Uuml`, `Ü`,
+ `\uuml`, `ü`,
+ `\Yacute`, `Ý`,
+ `\yacute`, `ý`,
+ `\Yuml`, `Ÿ`,
+ `\yuml`, `ÿ`,
+ `\fnof`, `ƒ`,
+ `\real`, `ℜ`,
+ `\image`, `ℑ`,
+ `\weierp`, `℘`,
+ `\ell`, `ℓ`,
+ `\imath`, `ı`,
+ `\jmath`, `ȷ`,
+ `\Alpha`, `Α`,
+ `\alpha`, `α`,
+ `\Beta`, `Β`,
+ `\beta`, `β`,
+ `\Gamma`, `Γ`,
+ `\gamma`, `γ`,
+ `\Delta`, `Δ`,
+ `\delta`, `δ`,
+ `\Epsilon`, `Ε`,
+ `\epsilon`, `ε`,
+ `\varepsilon`, `ε`,
+ `\Zeta`, `Ζ`,
+ `\zeta`, `ζ`,
+ `\Eta`, `Η`,
+ `\eta`, `η`,
+ `\Theta`, `Θ`,
+ `\theta`, `θ`,
+ `\thetasym`, `ϑ`,
+ `\vartheta`, `ϑ`,
+ `\Iota`, `Ι`,
+ `\iota`, `ι`,
+ `\Kappa`, `Κ`,
+ `\kappa`, `κ`,
+ `\Lambda`, `Λ`,
+ `\lambda`, `λ`,
+ `\Mu`, `Μ`,
+ `\mu`, `μ`,
+ `\nu`, `ν`,
+ `\Nu`, `Ν`,
+ `\Xi`, `Ξ`,
+ `\xi`, `ξ`,
+ `\Omicron`, `Ο`,
+ `\omicron`, `ο`,
+ `\Pi`, `Π`,
+ `\pi`, `π`,
+ `\Rho`, `Ρ`,
+ `\rho`, `ρ`,
+ `\Sigma`, `Σ`,
+ `\sigma`, `σ`,
+ `\sigmaf`, `ς`,
+ `\varsigma`, `ς`,
+ `\Tau`, `Τ`,
+ `\Upsilon`, `Υ`,
+ `\upsih`, `ϒ`,
+ `\upsilon`, `υ`,
+ `\Phi`, `Φ`,
+ `\phi`, `ɸ`,
+ `\varphi`, `φ`,
+ `\Chi`, `Χ`,
+ `\chi`, `χ`,
+ `\acutex`, `𝑥́`,
+ `\Psi`, `Ψ`,
+ `\psi`, `ψ`,
+ `\tau`, `τ`,
+ `\Omega`, `Ω`,
+ `\omega`, `ω`,
+ `\piv`, `ϖ`,
+ `\varpi`, `ϖ`,
+ `\partial`, `∂`,
+ `\alefsym`, `ℵ`,
+ `\aleph`, `ℵ`,
+ `\gimel`, `ℷ`,
+ `\beth`, `ב`,
+ `\dalet`, `ד`,
+ `\ETH`, `Ð`,
+ `\eth`, `ð`,
+ `\THORN`, `Þ`,
+ `\thorn`, `þ`,
+ `\dots`, `…`,
+ `\cdots`, `⋯`,
+ `\hellip`, `…`,
+ `\middot`, `·`,
+ `\iexcl`, `¡`,
+ `\iquest`, `¿`,
+ `\shy`, ``,
+ `\ndash`, `–`,
+ `\mdash`, `—`,
+ `\quot`, `"`,
+ `\acute`, `´`,
+ `\ldquo`, `“`,
+ `\rdquo`, `”`,
+ `\bdquo`, `„`,
+ `\lsquo`, `‘`,
+ `\rsquo`, `’`,
+ `\sbquo`, `‚`,
+ `\laquo`, `«`,
+ `\raquo`, `»`,
+ `\lsaquo`, `‹`,
+ `\rsaquo`, `›`,
+ `\circ`, `∘`,
+ `\vert`, `|`,
+ `\vbar`, `|`,
+ `\brvbar`, `¦`,
+ `\S`, `§`,
+ `\sect`, `§`,
+ `\amp`, `&`,
+ `\lt`, `<`,
+ `\gt`, `>`,
+ `\tilde`, `~`,
+ `\slash`, `/`,
+ `\plus`, `+`,
+ `\under`, `_`,
+ `\equal`, `=`,
+ `\asciicirc`, `^`,
+ `\dagger`, `†`,
+ `\dag`, `†`,
+ `\Dagger`, `‡`,
+ `\ddag`, `‡`,
+ `\nbsp`, ` `,
+ `\ensp`, ` `,
+ `\emsp`, ` `,
+ `\thinsp`, ` `,
+ `\curren`, `¤`,
+ `\cent`, `¢`,
+ `\pound`, `£`,
+ `\yen`, `¥`,
+ `\euro`, `€`,
+ `\EUR`, `€`,
+ `\dollar`, `$`,
+ `\USD`, `$`,
+ `\copy`, `©`,
+ `\reg`, `®`,
+ `\trade`, `™`,
+ `\minus`, `−`,
+ `\pm`, `±`,
+ `\plusmn`, `±`,
+ `\times`, `×`,
+ `\frasl`, `⁄`,
+ `\colon`, `:`,
+ `\div`, `÷`,
+ `\frac12`, `½`,
+ `\frac14`, `¼`,
+ `\frac34`, `¾`,
+ `\permil`, `‰`,
+ `\sup1`, `¹`,
+ `\sup2`, `²`,
+ `\sup3`, `³`,
+ `\radic`, `√`,
+ `\sum`, `∑`,
+ `\prod`, `∏`,
+ `\micro`, `µ`,
+ `\macr`, `¯`,
+ `\deg`, `°`,
+ `\prime`, `′`,
+ `\Prime`, `″`,
+ `\infin`, `∞`,
+ `\infty`, `∞`,
+ `\prop`, `∝`,
+ `\propto`, `∝`,
+ `\not`, `¬`,
+ `\neg`, `¬`,
+ `\land`, `∧`,
+ `\wedge`, `∧`,
+ `\lor`, `∨`,
+ `\vee`, `∨`,
+ `\cap`, `∩`,
+ `\cup`, `∪`,
+ `\smile`, `⌣`,
+ `\frown`, `⌢`,
+ `\int`, `∫`,
+ `\therefore`, `∴`,
+ `\there4`, `∴`,
+ `\because`, `∵`,
+ `\sim`, `∼`,
+ `\cong`, `≅`,
+ `\simeq`, `≅`,
+ `\asymp`, `≈`,
+ `\approx`, `≈`,
+ `\ne`, `≠`,
+ `\neq`, `≠`,
+ `\equiv`, `≡`,
+ `\triangleq`, `≜`,
+ `\le`, `≤`,
+ `\leq`, `≤`,
+ `\ge`, `≥`,
+ `\geq`, `≥`,
+ `\lessgtr`, `≶`,
+ `\lesseqgtr`, `⋚`,
+ `\ll`, `≪`,
+ `\Ll`, `⋘`,
+ `\lll`, `⋘`,
+ `\gg`, `≫`,
+ `\Gg`, `⋙`,
+ `\ggg`, `⋙`,
+ `\prec`, `≺`,
+ `\preceq`, `≼`,
+ `\preccurlyeq`, `≼`,
+ `\succ`, `≻`,
+ `\succeq`, `≽`,
+ `\succcurlyeq`, `≽`,
+ `\sub`, `⊂`,
+ `\subset`, `⊂`,
+ `\sup`, `⊃`,
+ `\supset`, `⊃`,
+ `\nsub`, `⊄`,
+ `\sube`, `⊆`,
+ `\nsup`, `⊅`,
+ `\supe`, `⊇`,
+ `\setminus`, `⧵`,
+ `\forall`, `∀`,
+ `\exist`, `∃`,
+ `\exists`, `∃`,
+ `\nexist`, `∄`,
+ `\nexists`, `∄`,
+ `\empty`, `∅`,
+ `\emptyset`, `∅`,
+ `\isin`, `∈`,
+ `\in`, `∈`,
+ `\notin`, `∉`,
+ `\ni`, `∋`,
+ `\nabla`, `∇`,
+ `\ang`, `∠`,
+ `\angle`, `∠`,
+ `\perp`, `⊥`,
+ `\parallel`, `∥`,
+ `\sdot`, `⋅`,
+ `\cdot`, `⋅`,
+ `\lceil`, `⌈`,
+ `\rceil`, `⌉`,
+ `\lfloor`, `⌊`,
+ `\rfloor`, `⌋`,
+ `\lang`, `⟨`,
+ `\rang`, `⟩`,
+ `\langle`, `⟨`,
+ `\rangle`, `⟩`,
+ `\hbar`, `ℏ`,
+ `\mho`, `℧`,
+ `\larr`, `←`,
+ `\leftarrow`, `←`,
+ `\gets`, `←`,
+ `\lArr`, `⇐`,
+ `\Leftarrow`, `⇐`,
+ `\uarr`, `↑`,
+ `\uparrow`, `↑`,
+ `\uArr`, `⇑`,
+ `\Uparrow`, `⇑`,
+ `\rarr`, `→`,
+ `\to`, `→`,
+ `\rightarrow`, `→`,
+ `\rArr`, `⇒`,
+ `\Rightarrow`, `⇒`,
+ `\darr`, `↓`,
+ `\downarrow`, `↓`,
+ `\dArr`, `⇓`,
+ `\Downarrow`, `⇓`,
+ `\harr`, `↔`,
+ `\leftrightarrow`, `↔`,
+ `\hArr`, `⇔`,
+ `\Leftrightarrow`, `⇔`,
+ `\crarr`, `↵`,
+ `\hookleftarrow`, `↵`,
+ `\arccos`, `arccos`,
+ `\arcsin`, `arcsin`,
+ `\arctan`, `arctan`,
+ `\arg`, `arg`,
+ `\cos`, `cos`,
+ `\cosh`, `cosh`,
+ `\cot`, `cot`,
+ `\coth`, `coth`,
+ `\csc`, `csc`,
+ `\deg`, `deg`,
+ `\det`, `det`,
+ `\dim`, `dim`,
+ `\exp`, `exp`,
+ `\gcd`, `gcd`,
+ `\hom`, `hom`,
+ `\inf`, `inf`,
+ `\ker`, `ker`,
+ `\lg`, `lg`,
+ `\lim`, `lim`,
+ `\liminf`, `liminf`,
+ `\limsup`, `limsup`,
+ `\ln`, `ln`,
+ `\log`, `log`,
+ `\max`, `max`,
+ `\min`, `min`,
+ `\Pr`, `Pr`,
+ `\sec`, `sec`,
+ `\sin`, `sin`,
+ `\sinh`, `sinh`,
+ `\sup`, `sup`,
+ `\tan`, `tan`,
+ `\tanh`, `tanh`,
+ `\bull`, `•`,
+ `\bullet`, `•`,
+ `\star`, `⋆`,
+ `\lowast`, `∗`,
+ `\ast`, `*`,
+ `\odot`, `ʘ`,
+ `\oplus`, `⊕`,
+ `\otimes`, `⊗`,
+ `\check`, `✓`,
+ `\checkmark`, `✓`,
+ `\para`, `¶`,
+ `\ordf`, `ª`,
+ `\ordm`, `º`,
+ `\cedil`, `¸`,
+ `\oline`, `‾`,
+ `\uml`, `¨`,
+ `\zwnj`, ``,
+ `\zwj`, ``,
+ `\lrm`, ``,
+ `\rlm`, ``,
+ `\smiley`, `☺`,
+ `\blacksmile`, `☻`,
+ `\sad`, `☹`,
+ `\frowny`, `☹`,
+ `\clubs`, `♣`,
+ `\clubsuit`, `♣`,
+ `\spades`, `♠`,
+ `\spadesuit`, `♠`,
+ `\hearts`, `♥`,
+ `\heartsuit`, `♥`,
+ `\diams`, `◆`,
+ `\diamondsuit`, `◆`,
+ `\diamond`, `◆`,
+ `\Diamond`, `◆`,
+ `\loz`, `⧫`,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+ `\_ `, ` `,
+}
diff --git a/vendor/github.com/niklasfasching/go-org/org/html_writer.go b/vendor/github.com/niklasfasching/go-org/org/html_writer.go
new file mode 100644
index 0000000000..90a48c6b4b
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/html_writer.go
@@ -0,0 +1,504 @@
+package org
+
+import (
+ "fmt"
+ "html"
+ "log"
+ "regexp"
+ "strings"
+ "unicode"
+
+ h "golang.org/x/net/html"
+ "golang.org/x/net/html/atom"
+)
+
+// HTMLWriter exports an org document into a html document.
+type HTMLWriter struct {
+ ExtendingWriter Writer
+ HighlightCodeBlock func(source, lang string) string
+
+ strings.Builder
+ document *Document
+ htmlEscape bool
+ log *log.Logger
+ footnotes *footnotes
+}
+
+type footnotes struct {
+ mapping map[string]int
+ list []*FootnoteDefinition
+}
+
+var emphasisTags = map[string][]string{
+ "/": []string{"", " "},
+ "*": []string{"", " "},
+ "+": []string{"", ""},
+ "~": []string{"", "
"},
+ "=": []string{``, "
"},
+ "_": []string{``, " "},
+ "_{}": []string{"", " "},
+ "^{}": []string{"", " "},
+}
+
+var listTags = map[string][]string{
+ "unordered": []string{""},
+ "ordered": []string{"", " "},
+ "descriptive": []string{"", " "},
+}
+
+var listItemStatuses = map[string]string{
+ " ": "unchecked",
+ "-": "indeterminate",
+ "X": "checked",
+}
+
+var cleanHeadlineTitleForHTMLAnchorRegexp = regexp.MustCompile(`?a[^>]*>`) // nested a tags are not valid HTML
+
+func NewHTMLWriter() *HTMLWriter {
+ defaultConfig := New()
+ return &HTMLWriter{
+ document: &Document{Configuration: defaultConfig},
+ log: defaultConfig.Log,
+ htmlEscape: true,
+ HighlightCodeBlock: func(source, lang string) string {
+ return fmt.Sprintf("", html.EscapeString(source))
+ },
+ footnotes: &footnotes{
+ mapping: map[string]int{},
+ },
+ }
+}
+
+func (w *HTMLWriter) emptyClone() *HTMLWriter {
+ wcopy := *w
+ wcopy.Builder = strings.Builder{}
+ return &wcopy
+}
+
+func (w *HTMLWriter) nodesAsString(nodes ...Node) string {
+ tmp := w.emptyClone()
+ WriteNodes(tmp, nodes...)
+ return tmp.String()
+}
+
+func (w *HTMLWriter) WriterWithExtensions() Writer {
+ if w.ExtendingWriter != nil {
+ return w.ExtendingWriter
+ }
+ return w
+}
+
+func (w *HTMLWriter) Before(d *Document) {
+ w.document = d
+ w.log = d.Log
+ w.WriteOutline(d)
+}
+
+func (w *HTMLWriter) After(d *Document) {
+ w.WriteFootnotes(d)
+}
+
+func (w *HTMLWriter) WriteComment(Comment) {}
+func (w *HTMLWriter) WritePropertyDrawer(PropertyDrawer) {}
+
+func (w *HTMLWriter) WriteBlock(b Block) {
+ content := ""
+ if isRawTextBlock(b.Name) {
+ exportWriter := w.emptyClone()
+ exportWriter.htmlEscape = false
+ WriteNodes(exportWriter, b.Children...)
+ content = strings.TrimRightFunc(exportWriter.String(), unicode.IsSpace)
+ } else {
+ content = w.nodesAsString(b.Children...)
+ }
+ switch name := b.Name; {
+ case name == "SRC":
+ lang := "text"
+ if len(b.Parameters) >= 1 {
+ lang = strings.ToLower(b.Parameters[0])
+ }
+ content = w.HighlightCodeBlock(content, lang)
+ w.WriteString(fmt.Sprintf("\n%s\n
\n", lang, content))
+ case name == "EXAMPLE":
+ w.WriteString(`` + "\n" + content + "\n \n")
+ case name == "EXPORT" && len(b.Parameters) >= 1 && strings.ToLower(b.Parameters[0]) == "html":
+ w.WriteString(content + "\n")
+ case name == "QUOTE":
+ w.WriteString("\n" + content + " \n")
+ case name == "CENTER":
+ w.WriteString(`` + "\n")
+ w.WriteString(content + "
\n")
+ default:
+ w.WriteString(fmt.Sprintf(``, strings.ToLower(b.Name)) + "\n")
+ w.WriteString(content + "
\n")
+ }
+}
+
+func (w *HTMLWriter) WriteDrawer(d Drawer) {
+ WriteNodes(w, d.Children...)
+}
+
+func (w *HTMLWriter) WriteKeyword(k Keyword) {
+ if k.Key == "HTML" {
+ w.WriteString(k.Value + "\n")
+ }
+}
+
+func (w *HTMLWriter) WriteInclude(i Include) {
+ WriteNodes(w, i.Resolve())
+}
+
+func (w *HTMLWriter) WriteFootnoteDefinition(f FootnoteDefinition) {
+ w.footnotes.updateDefinition(f)
+}
+
+func (w *HTMLWriter) WriteFootnotes(d *Document) {
+ if !w.document.GetOption("f") || len(w.footnotes.list) == 0 {
+ return
+ }
+ w.WriteString(`\n")
+}
+
+func (w *HTMLWriter) WriteOutline(d *Document) {
+ if w.document.GetOption("toc") && len(d.Outline.Children) != 0 {
+ w.WriteString("\n\n")
+ for _, section := range d.Outline.Children {
+ w.writeSection(section)
+ }
+ w.WriteString(" \n \n")
+ }
+}
+
+func (w *HTMLWriter) writeSection(section *Section) {
+ // NOTE: To satisfy hugo ExtractTOC() check we cannot use `\n` here. Doesn't really matter, just a note.
+ w.WriteString(" ")
+ h := section.Headline
+ title := cleanHeadlineTitleForHTMLAnchorRegexp.ReplaceAllString(w.nodesAsString(h.Title...), "")
+ w.WriteString(fmt.Sprintf("%s \n", h.ID(), title))
+ if len(section.Children) != 0 {
+ w.WriteString("\n")
+ for _, section := range section.Children {
+ w.writeSection(section)
+ }
+ w.WriteString(" \n")
+ }
+ w.WriteString(" \n")
+}
+
+func (w *HTMLWriter) WriteHeadline(h Headline) {
+ for _, excludeTag := range strings.Fields(w.document.Get("EXCLUDE_TAGS")) {
+ for _, tag := range h.Tags {
+ if excludeTag == tag {
+ return
+ }
+ }
+ }
+
+ w.WriteString(fmt.Sprintf(``, h.Lvl, h.ID()) + "\n")
+ if w.document.GetOption("todo") && h.Status != "" {
+ w.WriteString(fmt.Sprintf(`%s `, h.Status) + "\n")
+ }
+ if w.document.GetOption("pri") && h.Priority != "" {
+ w.WriteString(fmt.Sprintf(`[%s] `, h.Priority) + "\n")
+ }
+
+ WriteNodes(w, h.Title...)
+ if w.document.GetOption("tags") && len(h.Tags) != 0 {
+ tags := make([]string, len(h.Tags))
+ for i, tag := range h.Tags {
+ tags[i] = fmt.Sprintf(`%s `, tag)
+ }
+ w.WriteString(" ")
+ w.WriteString(fmt.Sprintf(`%s `, strings.Join(tags, " ")))
+ }
+ w.WriteString(fmt.Sprintf("\n \n", h.Lvl))
+ WriteNodes(w, h.Children...)
+}
+
+func (w *HTMLWriter) WriteText(t Text) {
+ if !w.htmlEscape {
+ w.WriteString(t.Content)
+ } else if !w.document.GetOption("e") || t.IsRaw {
+ w.WriteString(html.EscapeString(t.Content))
+ } else {
+ w.WriteString(html.EscapeString(htmlEntityReplacer.Replace(t.Content)))
+ }
+}
+
+func (w *HTMLWriter) WriteEmphasis(e Emphasis) {
+ tags, ok := emphasisTags[e.Kind]
+ if !ok {
+ panic(fmt.Sprintf("bad emphasis %#v", e))
+ }
+ w.WriteString(tags[0])
+ WriteNodes(w, e.Content...)
+ w.WriteString(tags[1])
+}
+
+func (w *HTMLWriter) WriteLatexFragment(l LatexFragment) {
+ w.WriteString(l.OpeningPair)
+ WriteNodes(w, l.Content...)
+ w.WriteString(l.ClosingPair)
+}
+
+func (w *HTMLWriter) WriteStatisticToken(s StatisticToken) {
+ w.WriteString(fmt.Sprintf(`[%s]
`, s.Content))
+}
+
+func (w *HTMLWriter) WriteLineBreak(l LineBreak) {
+ w.WriteString(strings.Repeat("\n", l.Count))
+}
+
+func (w *HTMLWriter) WriteExplicitLineBreak(l ExplicitLineBreak) {
+ w.WriteString(" \n")
+}
+
+func (w *HTMLWriter) WriteFootnoteLink(l FootnoteLink) {
+ if !w.document.GetOption("f") {
+ return
+ }
+ i := w.footnotes.add(l)
+ id := i + 1
+ w.WriteString(fmt.Sprintf(``, id, id, id))
+}
+
+func (w *HTMLWriter) WriteTimestamp(t Timestamp) {
+ if !w.document.GetOption("<") {
+ return
+ }
+ w.WriteString(`<`)
+ if t.IsDate {
+ w.WriteString(t.Time.Format(datestampFormat))
+ } else {
+ w.WriteString(t.Time.Format(timestampFormat))
+ }
+ if t.Interval != "" {
+ w.WriteString(" " + t.Interval)
+ }
+ w.WriteString(`> `)
+}
+
+func (w *HTMLWriter) WriteRegularLink(l RegularLink) {
+ url := html.EscapeString(l.URL)
+ if l.Protocol == "file" {
+ url = url[len("file:"):]
+ }
+ description := url
+ if l.Description != nil {
+ description = w.nodesAsString(l.Description...)
+ }
+ switch l.Kind() {
+ case "image":
+ w.WriteString(fmt.Sprintf(` `, url, description, description))
+ case "video":
+ w.WriteString(fmt.Sprintf(`%s `, url, description, description))
+ default:
+ w.WriteString(fmt.Sprintf(`%s `, url, description))
+ }
+}
+
+func (w *HTMLWriter) WriteList(l List) {
+ tags, ok := listTags[l.Kind]
+ if !ok {
+ panic(fmt.Sprintf("bad list kind %#v", l))
+ }
+ w.WriteString(tags[0] + "\n")
+ WriteNodes(w, l.Items...)
+ w.WriteString(tags[1] + "\n")
+}
+
+func (w *HTMLWriter) WriteListItem(li ListItem) {
+ if li.Status != "" {
+ w.WriteString(fmt.Sprintf("\n", listItemStatuses[li.Status]))
+ } else {
+ w.WriteString(" \n")
+ }
+ WriteNodes(w, li.Children...)
+ w.WriteString(" \n")
+}
+
+func (w *HTMLWriter) WriteDescriptiveListItem(di DescriptiveListItem) {
+ if di.Status != "" {
+ w.WriteString(fmt.Sprintf("\n", listItemStatuses[di.Status]))
+ } else {
+ w.WriteString("\n")
+ }
+
+ if len(di.Term) != 0 {
+ WriteNodes(w, di.Term...)
+ } else {
+ w.WriteString("?")
+ }
+ w.WriteString("\n \n")
+ w.WriteString(" \n")
+ WriteNodes(w, di.Details...)
+ w.WriteString(" \n")
+}
+
+func (w *HTMLWriter) WriteParagraph(p Paragraph) {
+ if len(p.Children) == 0 {
+ return
+ }
+ w.WriteString("")
+ if _, ok := p.Children[0].(LineBreak); !ok {
+ w.WriteString("\n")
+ }
+ WriteNodes(w, p.Children...)
+ w.WriteString("\n
\n")
+}
+
+func (w *HTMLWriter) WriteExample(e Example) {
+ w.WriteString(`` + "\n")
+ if len(e.Children) != 0 {
+ for _, n := range e.Children {
+ WriteNodes(w, n)
+ w.WriteString("\n")
+ }
+ }
+ w.WriteString(" \n")
+}
+
+func (w *HTMLWriter) WriteHorizontalRule(h HorizontalRule) {
+ w.WriteString(" \n")
+}
+
+func (w *HTMLWriter) WriteNodeWithMeta(n NodeWithMeta) {
+ out := w.nodesAsString(n.Node)
+ if p, ok := n.Node.(Paragraph); ok {
+ if len(p.Children) == 1 && isImageOrVideoLink(p.Children[0]) {
+ out = w.nodesAsString(p.Children[0])
+ }
+ }
+ for _, attributes := range n.Meta.HTMLAttributes {
+ out = w.withHTMLAttributes(out, attributes...) + "\n"
+ }
+ if len(n.Meta.Caption) != 0 {
+ caption := ""
+ for i, ns := range n.Meta.Caption {
+ if i != 0 {
+ caption += " "
+ }
+ caption += w.nodesAsString(ns...)
+ }
+ out = fmt.Sprintf("\n%s\n%s\n \n \n", out, caption)
+ }
+ w.WriteString(out)
+}
+
+func (w *HTMLWriter) WriteNodeWithName(n NodeWithName) {
+ WriteNodes(w, n.Node)
+}
+
+func (w *HTMLWriter) WriteTable(t Table) {
+ w.WriteString("\n")
+ beforeFirstContentRow := true
+ for i, row := range t.Rows {
+ if row.IsSpecial || len(row.Columns) == 0 {
+ continue
+ }
+ if beforeFirstContentRow {
+ beforeFirstContentRow = false
+ if i+1 < len(t.Rows) && len(t.Rows[i+1].Columns) == 0 {
+ w.WriteString("\n")
+ w.writeTableColumns(row.Columns, "th")
+ w.WriteString(" \n\n")
+ continue
+ } else {
+ w.WriteString(" \n")
+ }
+ }
+ w.writeTableColumns(row.Columns, "td")
+ }
+ w.WriteString(" \n
\n")
+}
+
+func (w *HTMLWriter) writeTableColumns(columns []Column, tag string) {
+ w.WriteString("\n")
+ for _, column := range columns {
+ if column.Align == "" {
+ w.WriteString(fmt.Sprintf("<%s>", tag))
+ } else {
+ w.WriteString(fmt.Sprintf(`<%s class="align-%s">`, tag, column.Align))
+ }
+ WriteNodes(w, column.Children...)
+ w.WriteString(fmt.Sprintf("%s>\n", tag))
+ }
+ w.WriteString(" \n")
+}
+
+func (w *HTMLWriter) withHTMLAttributes(input string, kvs ...string) string {
+ if len(kvs)%2 != 0 {
+ w.log.Printf("withHTMLAttributes: Len of kvs must be even: %#v", kvs)
+ return input
+ }
+ context := &h.Node{Type: h.ElementNode, Data: "body", DataAtom: atom.Body}
+ nodes, err := h.ParseFragment(strings.NewReader(strings.TrimSpace(input)), context)
+ if err != nil || len(nodes) != 1 {
+ w.log.Printf("withHTMLAttributes: Could not extend attributes of %s: %v (%s)", input, nodes, err)
+ return input
+ }
+ out, node := strings.Builder{}, nodes[0]
+ for i := 0; i < len(kvs)-1; i += 2 {
+ node.Attr = setHTMLAttribute(node.Attr, strings.TrimPrefix(kvs[i], ":"), kvs[i+1])
+ }
+ err = h.Render(&out, nodes[0])
+ if err != nil {
+ w.log.Printf("withHTMLAttributes: Could not extend attributes of %s: %v (%s)", input, node, err)
+ return input
+ }
+ return out.String()
+}
+
+func setHTMLAttribute(attributes []h.Attribute, k, v string) []h.Attribute {
+ for i, a := range attributes {
+ if strings.ToLower(a.Key) == strings.ToLower(k) {
+ switch strings.ToLower(k) {
+ case "class", "style":
+ attributes[i].Val += " " + v
+ default:
+ attributes[i].Val = v
+ }
+ return attributes
+ }
+ }
+ return append(attributes, h.Attribute{Namespace: "", Key: k, Val: v})
+}
+
+func (fs *footnotes) add(f FootnoteLink) int {
+ if i, ok := fs.mapping[f.Name]; ok && f.Name != "" {
+ return i
+ }
+ fs.list = append(fs.list, f.Definition)
+ i := len(fs.list) - 1
+ if f.Name != "" {
+ fs.mapping[f.Name] = i
+ }
+ return i
+}
+
+func (fs *footnotes) updateDefinition(f FootnoteDefinition) {
+ if i, ok := fs.mapping[f.Name]; ok {
+ fs.list[i] = &f
+ }
+}
diff --git a/vendor/github.com/niklasfasching/go-org/org/inline.go b/vendor/github.com/niklasfasching/go-org/org/inline.go
new file mode 100644
index 0000000000..02d5a15341
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/inline.go
@@ -0,0 +1,357 @@
+package org
+
+import (
+ "fmt"
+ "path"
+ "regexp"
+ "strings"
+ "time"
+ "unicode"
+)
+
+type Text struct {
+ Content string
+ IsRaw bool
+}
+
+type LineBreak struct{ Count int }
+type ExplicitLineBreak struct{}
+
+type StatisticToken struct{ Content string }
+
+type Timestamp struct {
+ Time time.Time
+ IsDate bool
+ Interval string
+}
+
+type Emphasis struct {
+ Kind string
+ Content []Node
+}
+
+type LatexFragment struct {
+ OpeningPair string
+ ClosingPair string
+ Content []Node
+}
+
+type FootnoteLink struct {
+ Name string
+ Definition *FootnoteDefinition
+}
+
+type RegularLink struct {
+ Protocol string
+ Description []Node
+ URL string
+ AutoLink bool
+}
+
+var validURLCharacters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~:/?#[]@!$&'()*+,;="
+var autolinkProtocols = regexp.MustCompile(`^(https?|ftp|file)$`)
+var imageExtensionRegexp = regexp.MustCompile(`^[.](png|gif|jpe?g|svg|tiff?)$`)
+var videoExtensionRegexp = regexp.MustCompile(`^[.](webm|mp4)$`)
+
+var subScriptSuperScriptRegexp = regexp.MustCompile(`^([_^]){([^{}]+?)}`)
+var timestampRegexp = regexp.MustCompile(`^<(\d{4}-\d{2}-\d{2})( [A-Za-z]+)?( \d{2}:\d{2})?( \+\d+[dwmy])?>`)
+var footnoteRegexp = regexp.MustCompile(`^\[fn:([\w-]*?)(:(.*?))?\]`)
+var statisticsTokenRegexp = regexp.MustCompile(`^\[(\d+/\d+|\d+%)\]`)
+var latexFragmentRegexp = regexp.MustCompile(`(?s)^\\begin{(\w+)}(.*)\\end{(\w+)}`)
+
+var timestampFormat = "2006-01-02 Mon 15:04"
+var datestampFormat = "2006-01-02 Mon"
+
+var latexFragmentPairs = map[string]string{
+ `\(`: `\)`,
+ `\[`: `\]`,
+ `$$`: `$$`,
+}
+
+func (d *Document) parseInline(input string) (nodes []Node) {
+ previous, current := 0, 0
+ for current < len(input) {
+ rewind, consumed, node := 0, 0, (Node)(nil)
+ switch input[current] {
+ case '^':
+ consumed, node = d.parseSubOrSuperScript(input, current)
+ case '_':
+ consumed, node = d.parseSubScriptOrEmphasis(input, current)
+ case '*', '/', '+':
+ consumed, node = d.parseEmphasis(input, current, false)
+ case '=', '~':
+ consumed, node = d.parseEmphasis(input, current, true)
+ case '[':
+ consumed, node = d.parseOpeningBracket(input, current)
+ case '<':
+ consumed, node = d.parseTimestamp(input, current)
+ case '\\':
+ consumed, node = d.parseExplicitLineBreakOrLatexFragment(input, current)
+ case '$':
+ consumed, node = d.parseLatexFragment(input, current)
+ case '\n':
+ consumed, node = d.parseLineBreak(input, current)
+ case ':':
+ rewind, consumed, node = d.parseAutoLink(input, current)
+ current -= rewind
+ }
+ if consumed != 0 {
+ if current > previous {
+ nodes = append(nodes, Text{input[previous:current], false})
+ }
+ if node != nil {
+ nodes = append(nodes, node)
+ }
+ current += consumed
+ previous = current
+ } else {
+ current++
+ }
+ }
+
+ if previous < len(input) {
+ nodes = append(nodes, Text{input[previous:], false})
+ }
+ return nodes
+}
+
+func (d *Document) parseRawInline(input string) (nodes []Node) {
+ previous, current := 0, 0
+ for current < len(input) {
+ if input[current] == '\n' {
+ consumed, node := d.parseLineBreak(input, current)
+ if current > previous {
+ nodes = append(nodes, Text{input[previous:current], true})
+ }
+ nodes = append(nodes, node)
+ current += consumed
+ previous = current
+ } else {
+ current++
+ }
+ }
+ if previous < len(input) {
+ nodes = append(nodes, Text{input[previous:], true})
+ }
+ return nodes
+}
+
+func (d *Document) parseLineBreak(input string, start int) (int, Node) {
+ i := start
+ for ; i < len(input) && input[i] == '\n'; i++ {
+ }
+ return i - start, LineBreak{i - start}
+}
+
+func (d *Document) parseExplicitLineBreakOrLatexFragment(input string, start int) (int, Node) {
+ switch {
+ case start+2 >= len(input):
+ case input[start+1] == '\\' && start != 0 && input[start-1] != '\n':
+ for i := start + 2; unicode.IsSpace(rune(input[i])); i++ {
+ if i >= len(input) || input[i] == '\n' {
+ return i + 1 - start, ExplicitLineBreak{}
+ }
+ }
+ case input[start+1] == '(' || input[start+1] == '[':
+ return d.parseLatexFragment(input, start)
+ case strings.Index(input[start:], `\begin{`) == 0:
+ if m := latexFragmentRegexp.FindStringSubmatch(input[start:]); m != nil {
+ if open, content, close := m[1], m[2], m[3]; open == close {
+ openingPair, closingPair := `\begin{`+open+`}`, `\end{`+close+`}`
+ i := strings.Index(input[start:], closingPair)
+ return i + len(closingPair), LatexFragment{openingPair, closingPair, d.parseRawInline(content)}
+ }
+ }
+ }
+ return 0, nil
+}
+
+func (d *Document) parseLatexFragment(input string, start int) (int, Node) {
+ if start+2 >= len(input) {
+ return 0, nil
+ }
+ openingPair := input[start : start+2]
+ closingPair := latexFragmentPairs[openingPair]
+ if i := strings.Index(input[start+2:], closingPair); i != -1 {
+ content := d.parseRawInline(input[start+2 : start+2+i])
+ return i + 2 + 2, LatexFragment{openingPair, closingPair, content}
+ }
+ return 0, nil
+}
+
+func (d *Document) parseSubOrSuperScript(input string, start int) (int, Node) {
+ if m := subScriptSuperScriptRegexp.FindStringSubmatch(input[start:]); m != nil {
+ return len(m[2]) + 3, Emphasis{m[1] + "{}", []Node{Text{m[2], false}}}
+ }
+ return 0, nil
+}
+
+func (d *Document) parseSubScriptOrEmphasis(input string, start int) (int, Node) {
+ if consumed, node := d.parseSubOrSuperScript(input, start); consumed != 0 {
+ return consumed, node
+ }
+ return d.parseEmphasis(input, start, false)
+}
+
+func (d *Document) parseOpeningBracket(input string, start int) (int, Node) {
+ if len(input[start:]) >= 2 && input[start] == '[' && input[start+1] == '[' {
+ return d.parseRegularLink(input, start)
+ } else if footnoteRegexp.MatchString(input[start:]) {
+ return d.parseFootnoteReference(input, start)
+ } else if statisticsTokenRegexp.MatchString(input[start:]) {
+ return d.parseStatisticToken(input, start)
+ }
+ return 0, nil
+}
+
+func (d *Document) parseFootnoteReference(input string, start int) (int, Node) {
+ if m := footnoteRegexp.FindStringSubmatch(input[start:]); m != nil {
+ name, definition := m[1], m[3]
+ if name == "" && definition == "" {
+ return 0, nil
+ }
+ link := FootnoteLink{name, nil}
+ if definition != "" {
+ link.Definition = &FootnoteDefinition{name, []Node{Paragraph{d.parseInline(definition)}}, true}
+ }
+ return len(m[0]), link
+ }
+ return 0, nil
+}
+
+func (d *Document) parseStatisticToken(input string, start int) (int, Node) {
+ if m := statisticsTokenRegexp.FindStringSubmatch(input[start:]); m != nil {
+ return len(m[1]) + 2, StatisticToken{m[1]}
+ }
+ return 0, nil
+}
+
+func (d *Document) parseAutoLink(input string, start int) (int, int, Node) {
+ if !d.AutoLink || start == 0 || len(input[start:]) < 3 || input[start:start+3] != "://" {
+ return 0, 0, nil
+ }
+ protocolStart, protocol := start-1, ""
+ for ; protocolStart > 0; protocolStart-- {
+ if !unicode.IsLetter(rune(input[protocolStart])) {
+ protocolStart++
+ break
+ }
+ }
+ if m := autolinkProtocols.FindStringSubmatch(input[protocolStart:start]); m != nil {
+ protocol = m[1]
+ } else {
+ return 0, 0, nil
+ }
+ end := start
+ for ; end < len(input) && strings.ContainsRune(validURLCharacters, rune(input[end])); end++ {
+ }
+ path := input[start:end]
+ if path == "://" {
+ return 0, 0, nil
+ }
+ return len(protocol), len(path + protocol), RegularLink{protocol, nil, protocol + path, true}
+}
+
+func (d *Document) parseRegularLink(input string, start int) (int, Node) {
+ input = input[start:]
+ if len(input) < 3 || input[:2] != "[[" || input[2] == '[' {
+ return 0, nil
+ }
+ end := strings.Index(input, "]]")
+ if end == -1 {
+ return 0, nil
+ }
+ rawLinkParts := strings.Split(input[2:end], "][")
+ description, link := ([]Node)(nil), rawLinkParts[0]
+ if len(rawLinkParts) == 2 {
+ link, description = rawLinkParts[0], d.parseInline(rawLinkParts[1])
+ }
+ if strings.ContainsRune(link, '\n') {
+ return 0, nil
+ }
+ consumed := end + 2
+ protocol, linkParts := "", strings.SplitN(link, ":", 2)
+ if len(linkParts) == 2 {
+ protocol = linkParts[0]
+ }
+ return consumed, RegularLink{protocol, description, link, false}
+}
+
+func (d *Document) parseTimestamp(input string, start int) (int, Node) {
+ if m := timestampRegexp.FindStringSubmatch(input[start:]); m != nil {
+ ddmmyy, hhmm, interval, isDate := m[1], m[3], strings.TrimSpace(m[4]), false
+ if hhmm == "" {
+ hhmm, isDate = "00:00", true
+ }
+ t, err := time.Parse(timestampFormat, fmt.Sprintf("%s Mon %s", ddmmyy, hhmm))
+ if err != nil {
+ return 0, nil
+ }
+ timestamp := Timestamp{t, isDate, interval}
+ return len(m[0]), timestamp
+ }
+ return 0, nil
+}
+
+func (d *Document) parseEmphasis(input string, start int, isRaw bool) (int, Node) {
+ marker, i := input[start], start
+ if !hasValidPreAndBorderChars(input, i) {
+ return 0, nil
+ }
+ for i, consumedNewLines := i+1, 0; i < len(input) && consumedNewLines <= d.MaxEmphasisNewLines; i++ {
+ if input[i] == '\n' {
+ consumedNewLines++
+ }
+
+ if input[i] == marker && i != start+1 && hasValidPostAndBorderChars(input, i) {
+ if isRaw {
+ return i + 1 - start, Emphasis{input[start : start+1], d.parseRawInline(input[start+1 : i])}
+ }
+ return i + 1 - start, Emphasis{input[start : start+1], d.parseInline(input[start+1 : i])}
+ }
+ }
+ return 0, nil
+}
+
+// see org-emphasis-regexp-components (emacs elisp variable)
+
+func hasValidPreAndBorderChars(input string, i int) bool {
+ return (i+1 >= len(input) || isValidBorderChar(rune(input[i+1]))) && (i == 0 || isValidPreChar(rune(input[i-1])))
+}
+
+func hasValidPostAndBorderChars(input string, i int) bool {
+ return (i == 0 || isValidBorderChar(rune(input[i-1]))) && (i+1 >= len(input) || isValidPostChar(rune(input[i+1])))
+}
+
+func isValidPreChar(r rune) bool {
+ return unicode.IsSpace(r) || strings.ContainsRune(`-({'"`, r)
+}
+
+func isValidPostChar(r rune) bool {
+ return unicode.IsSpace(r) || strings.ContainsRune(`-.,:!?;'")}[`, r)
+}
+
+func isValidBorderChar(r rune) bool { return !unicode.IsSpace(r) }
+
+func (l RegularLink) Kind() string {
+ if p := l.Protocol; l.Description != nil || (p != "" && p != "file" && p != "http" && p != "https") {
+ return "regular"
+ }
+ if imageExtensionRegexp.MatchString(path.Ext(l.URL)) {
+ return "image"
+ }
+ if videoExtensionRegexp.MatchString(path.Ext(l.URL)) {
+ return "video"
+ }
+ return "regular"
+}
+
+func (n Text) String() string { return orgWriter.nodesAsString(n) }
+func (n LineBreak) String() string { return orgWriter.nodesAsString(n) }
+func (n ExplicitLineBreak) String() string { return orgWriter.nodesAsString(n) }
+func (n StatisticToken) String() string { return orgWriter.nodesAsString(n) }
+func (n Emphasis) String() string { return orgWriter.nodesAsString(n) }
+func (n LatexFragment) String() string { return orgWriter.nodesAsString(n) }
+func (n FootnoteLink) String() string { return orgWriter.nodesAsString(n) }
+func (n RegularLink) String() string { return orgWriter.nodesAsString(n) }
+func (n Timestamp) String() string { return orgWriter.nodesAsString(n) }
diff --git a/vendor/github.com/niklasfasching/go-org/org/keyword.go b/vendor/github.com/niklasfasching/go-org/org/keyword.go
new file mode 100644
index 0000000000..776241797b
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/keyword.go
@@ -0,0 +1,184 @@
+package org
+
+import (
+ "bytes"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+type Comment struct{ Content string }
+
+type Keyword struct {
+ Key string
+ Value string
+}
+
+type NodeWithName struct {
+ Name string
+ Node Node
+}
+
+type NodeWithMeta struct {
+ Node Node
+ Meta Metadata
+}
+
+type Metadata struct {
+ Caption [][]Node
+ HTMLAttributes [][]string
+}
+
+type Include struct {
+ Keyword
+ Resolve func() Node
+}
+
+var keywordRegexp = regexp.MustCompile(`^(\s*)#\+([^:]+):(\s+(.*)|$)`)
+var commentRegexp = regexp.MustCompile(`^(\s*)#(.*)`)
+
+var includeFileRegexp = regexp.MustCompile(`(?i)^"([^"]+)" (src|example|export) (\w+)$`)
+var attributeRegexp = regexp.MustCompile(`(?:^|\s+)(:[-\w]+)\s+(.*)$`)
+
+func lexKeywordOrComment(line string) (token, bool) {
+ if m := keywordRegexp.FindStringSubmatch(line); m != nil {
+ return token{"keyword", len(m[1]), m[2], m}, true
+ } else if m := commentRegexp.FindStringSubmatch(line); m != nil {
+ return token{"comment", len(m[1]), m[2], m}, true
+ }
+ return nilToken, false
+}
+
+func (d *Document) parseComment(i int, stop stopFn) (int, Node) {
+ return 1, Comment{d.tokens[i].content}
+}
+
+func (d *Document) parseKeyword(i int, stop stopFn) (int, Node) {
+ k := parseKeyword(d.tokens[i])
+ switch k.Key {
+ case "NAME":
+ return d.parseNodeWithName(k, i, stop)
+ case "SETUPFILE":
+ return d.loadSetupFile(k)
+ case "INCLUDE":
+ return d.parseInclude(k)
+ case "CAPTION", "ATTR_HTML":
+ consumed, node := d.parseAffiliated(i, stop)
+ if consumed != 0 {
+ return consumed, node
+ }
+ fallthrough
+ default:
+ if _, ok := d.BufferSettings[k.Key]; ok {
+ d.BufferSettings[k.Key] = strings.Join([]string{d.BufferSettings[k.Key], k.Value}, "\n")
+ } else {
+ d.BufferSettings[k.Key] = k.Value
+ }
+ return 1, k
+ }
+}
+
+func (d *Document) parseNodeWithName(k Keyword, i int, stop stopFn) (int, Node) {
+ if stop(d, i+1) {
+ return 0, nil
+ }
+ consumed, node := d.parseOne(i+1, stop)
+ if consumed == 0 || node == nil {
+ return 0, nil
+ }
+ d.NamedNodes[k.Value] = node
+ return consumed + 1, NodeWithName{k.Value, node}
+}
+
+func (d *Document) parseAffiliated(i int, stop stopFn) (int, Node) {
+ start, meta := i, Metadata{}
+ for ; !stop(d, i) && d.tokens[i].kind == "keyword"; i++ {
+ switch k := parseKeyword(d.tokens[i]); k.Key {
+ case "CAPTION":
+ meta.Caption = append(meta.Caption, d.parseInline(k.Value))
+ case "ATTR_HTML":
+ attributes, rest := []string{}, k.Value
+ for {
+ if k, m := "", attributeRegexp.FindStringSubmatch(rest); m != nil {
+ k, rest = m[1], m[2]
+ attributes = append(attributes, k)
+ if v, m := "", attributeRegexp.FindStringSubmatchIndex(rest); m != nil {
+ v, rest = rest[:m[0]], rest[m[0]:]
+ attributes = append(attributes, v)
+ } else {
+ attributes = append(attributes, strings.TrimSpace(rest))
+ break
+ }
+ } else {
+ break
+ }
+ }
+ meta.HTMLAttributes = append(meta.HTMLAttributes, attributes)
+ default:
+ return 0, nil
+ }
+ }
+ if stop(d, i) {
+ return 0, nil
+ }
+ consumed, node := d.parseOne(i, stop)
+ if consumed == 0 || node == nil {
+ return 0, nil
+ }
+ i += consumed
+ return i - start, NodeWithMeta{node, meta}
+}
+
+func parseKeyword(t token) Keyword {
+ k, v := t.matches[2], t.matches[4]
+ return Keyword{strings.ToUpper(k), strings.TrimSpace(v)}
+}
+
+func (d *Document) parseInclude(k Keyword) (int, Node) {
+ resolve := func() Node {
+ d.Log.Printf("Bad include %#v", k)
+ return k
+ }
+ if m := includeFileRegexp.FindStringSubmatch(k.Value); m != nil {
+ path, kind, lang := m[1], m[2], m[3]
+ if !filepath.IsAbs(path) {
+ path = filepath.Join(filepath.Dir(d.Path), path)
+ }
+ resolve = func() Node {
+ bs, err := d.ReadFile(path)
+ if err != nil {
+ d.Log.Printf("Bad include %#v: %s", k, err)
+ return k
+ }
+ return Block{strings.ToUpper(kind), []string{lang}, d.parseRawInline(string(bs))}
+ }
+ }
+ return 1, Include{k, resolve}
+}
+
+func (d *Document) loadSetupFile(k Keyword) (int, Node) {
+ path := k.Value
+ if !filepath.IsAbs(path) {
+ path = filepath.Join(filepath.Dir(d.Path), path)
+ }
+ bs, err := d.ReadFile(path)
+ if err != nil {
+ d.Log.Printf("Bad setup file: %#v: %s", k, err)
+ return 1, k
+ }
+ setupDocument := d.Configuration.Parse(bytes.NewReader(bs), path)
+ if err := setupDocument.Error; err != nil {
+ d.Log.Printf("Bad setup file: %#v: %s", k, err)
+ return 1, k
+ }
+ for k, v := range setupDocument.BufferSettings {
+ d.BufferSettings[k] = v
+ }
+ return 1, k
+}
+
+func (n Comment) String() string { return orgWriter.nodesAsString(n) }
+func (n Keyword) String() string { return orgWriter.nodesAsString(n) }
+func (n NodeWithMeta) String() string { return orgWriter.nodesAsString(n) }
+func (n NodeWithName) String() string { return orgWriter.nodesAsString(n) }
+func (n Include) String() string { return orgWriter.nodesAsString(n) }
diff --git a/vendor/github.com/niklasfasching/go-org/org/list.go b/vendor/github.com/niklasfasching/go-org/org/list.go
new file mode 100644
index 0000000000..6ba28f6fe4
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/list.go
@@ -0,0 +1,114 @@
+package org
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode"
+)
+
+type List struct {
+ Kind string
+ Items []Node
+}
+
+type ListItem struct {
+ Bullet string
+ Status string
+ Children []Node
+}
+
+type DescriptiveListItem struct {
+ Bullet string
+ Status string
+ Term []Node
+ Details []Node
+}
+
+var unorderedListRegexp = regexp.MustCompile(`^(\s*)([+*-])(\s+(.*)|$)`)
+var orderedListRegexp = regexp.MustCompile(`^(\s*)(([0-9]+|[a-zA-Z])[.)])(\s+(.*)|$)`)
+var descriptiveListItemRegexp = regexp.MustCompile(`\s::(\s|$)`)
+var listItemStatusRegexp = regexp.MustCompile(`\[( |X|-)\]\s`)
+
+func lexList(line string) (token, bool) {
+ if m := unorderedListRegexp.FindStringSubmatch(line); m != nil {
+ return token{"unorderedList", len(m[1]), m[4], m}, true
+ } else if m := orderedListRegexp.FindStringSubmatch(line); m != nil {
+ return token{"orderedList", len(m[1]), m[5], m}, true
+ }
+ return nilToken, false
+}
+
+func isListToken(t token) bool {
+ return t.kind == "unorderedList" || t.kind == "orderedList"
+}
+
+func listKind(t token) (string, string) {
+ kind := ""
+ switch bullet := t.matches[2]; {
+ case bullet == "*" || bullet == "+" || bullet == "-":
+ kind = "unordered"
+ case unicode.IsLetter(rune(bullet[0])), unicode.IsDigit(rune(bullet[0])):
+ kind = "ordered"
+ default:
+ panic(fmt.Sprintf("bad list bullet '%s': %#v", bullet, t))
+ }
+ if descriptiveListItemRegexp.MatchString(t.content) {
+ return kind, "descriptive"
+ }
+ return kind, kind
+}
+
+func (d *Document) parseList(i int, parentStop stopFn) (int, Node) {
+ start, lvl := i, d.tokens[i].lvl
+ listMainKind, kind := listKind(d.tokens[i])
+ list := List{Kind: kind}
+ stop := func(*Document, int) bool {
+ if parentStop(d, i) || d.tokens[i].lvl != lvl || !isListToken(d.tokens[i]) {
+ return true
+ }
+ itemMainKind, _ := listKind(d.tokens[i])
+ return itemMainKind != listMainKind
+ }
+ for !stop(d, i) {
+ consumed, node := d.parseListItem(list, i, parentStop)
+ i += consumed
+ list.Items = append(list.Items, node)
+ }
+ return i - start, list
+}
+
+func (d *Document) parseListItem(l List, i int, parentStop stopFn) (int, Node) {
+ start, nodes, bullet := i, []Node{}, d.tokens[i].matches[2]
+ minIndent, dterm, content, status := d.tokens[i].lvl+len(bullet), "", d.tokens[i].content, ""
+ if m := listItemStatusRegexp.FindStringSubmatch(content); m != nil {
+ status, content = m[1], content[len("[ ] "):]
+ }
+ if l.Kind == "descriptive" {
+ if m := descriptiveListItemRegexp.FindStringIndex(content); m != nil {
+ dterm, content = content[:m[0]], content[m[1]:]
+ }
+ }
+
+ d.tokens[i] = tokenize(strings.Repeat(" ", minIndent) + content)
+ stop := func(d *Document, i int) bool {
+ if parentStop(d, i) {
+ return true
+ }
+ t := d.tokens[i]
+ return t.lvl < minIndent && !(t.kind == "text" && t.content == "")
+ }
+ for !stop(d, i) && (i <= start+1 || !isSecondBlankLine(d, i)) {
+ consumed, node := d.parseOne(i, stop)
+ i += consumed
+ nodes = append(nodes, node)
+ }
+ if l.Kind == "descriptive" {
+ return i - start, DescriptiveListItem{bullet, status, d.parseInline(dterm), nodes}
+ }
+ return i - start, ListItem{bullet, status, nodes}
+}
+
+func (n List) String() string { return orgWriter.nodesAsString(n) }
+func (n ListItem) String() string { return orgWriter.nodesAsString(n) }
+func (n DescriptiveListItem) String() string { return orgWriter.nodesAsString(n) }
diff --git a/vendor/github.com/niklasfasching/go-org/org/org_writer.go b/vendor/github.com/niklasfasching/go-org/org/org_writer.go
new file mode 100644
index 0000000000..d574cda527
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/org_writer.go
@@ -0,0 +1,334 @@
+package org
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// OrgWriter export an org document into pretty printed org document.
+type OrgWriter struct {
+ ExtendingWriter Writer
+ TagsColumn int
+
+ strings.Builder
+ indent string
+}
+
+var emphasisOrgBorders = map[string][]string{
+ "_": []string{"_", "_"},
+ "*": []string{"*", "*"},
+ "/": []string{"/", "/"},
+ "+": []string{"+", "+"},
+ "~": []string{"~", "~"},
+ "=": []string{"=", "="},
+ "_{}": []string{"_{", "}"},
+ "^{}": []string{"^{", "}"},
+}
+
+func NewOrgWriter() *OrgWriter {
+ return &OrgWriter{
+ TagsColumn: 77,
+ }
+}
+
+func (w *OrgWriter) WriterWithExtensions() Writer {
+ if w.ExtendingWriter != nil {
+ return w.ExtendingWriter
+ }
+ return w
+}
+
+func (w *OrgWriter) Before(d *Document) {}
+func (w *OrgWriter) After(d *Document) {}
+
+func (w *OrgWriter) emptyClone() *OrgWriter {
+ wcopy := *w
+ wcopy.Builder = strings.Builder{}
+ return &wcopy
+}
+
+func (w *OrgWriter) nodesAsString(nodes ...Node) string {
+ tmp := w.emptyClone()
+ WriteNodes(tmp, nodes...)
+ return tmp.String()
+}
+
+func (w *OrgWriter) WriteHeadline(h Headline) {
+ tmp := w.emptyClone()
+ tmp.WriteString(strings.Repeat("*", h.Lvl))
+ if h.Status != "" {
+ tmp.WriteString(" " + h.Status)
+ }
+ if h.Priority != "" {
+ tmp.WriteString(" [#" + h.Priority + "]")
+ }
+ tmp.WriteString(" ")
+ WriteNodes(tmp, h.Title...)
+ hString := tmp.String()
+ if len(h.Tags) != 0 {
+ tString := ":" + strings.Join(h.Tags, ":") + ":"
+ if n := w.TagsColumn - len(tString) - len(hString); n > 0 {
+ w.WriteString(hString + strings.Repeat(" ", n) + tString)
+ } else {
+ w.WriteString(hString + " " + tString)
+ }
+ } else {
+ w.WriteString(hString)
+ }
+ w.WriteString("\n")
+ if len(h.Children) != 0 {
+ w.WriteString(w.indent)
+ }
+ if h.Properties != nil {
+ WriteNodes(w, *h.Properties)
+ }
+ WriteNodes(w, h.Children...)
+}
+
+func (w *OrgWriter) WriteBlock(b Block) {
+ w.WriteString(w.indent + "#+BEGIN_" + b.Name)
+ if len(b.Parameters) != 0 {
+ w.WriteString(" " + strings.Join(b.Parameters, " "))
+ }
+ w.WriteString("\n")
+ if isRawTextBlock(b.Name) {
+ w.WriteString(w.indent)
+ }
+ WriteNodes(w, b.Children...)
+ if !isRawTextBlock(b.Name) {
+ w.WriteString(w.indent)
+ }
+ w.WriteString("#+END_" + b.Name + "\n")
+}
+
+func (w *OrgWriter) WriteDrawer(d Drawer) {
+ w.WriteString(w.indent + ":" + d.Name + ":\n")
+ WriteNodes(w, d.Children...)
+ w.WriteString(w.indent + ":END:\n")
+}
+
+func (w *OrgWriter) WritePropertyDrawer(d PropertyDrawer) {
+ w.WriteString(":PROPERTIES:\n")
+ for _, kvPair := range d.Properties {
+ k, v := kvPair[0], kvPair[1]
+ if v != "" {
+ v = " " + v
+ }
+ w.WriteString(fmt.Sprintf(":%s:%s\n", k, v))
+ }
+ w.WriteString(":END:\n")
+}
+
+func (w *OrgWriter) WriteFootnoteDefinition(f FootnoteDefinition) {
+ w.WriteString(fmt.Sprintf("[fn:%s]", f.Name))
+ content := w.nodesAsString(f.Children...)
+ if content != "" && !unicode.IsSpace(rune(content[0])) {
+ w.WriteString(" ")
+ }
+ w.WriteString(content)
+}
+
+func (w *OrgWriter) WriteParagraph(p Paragraph) {
+ content := w.nodesAsString(p.Children...)
+ if len(content) > 0 && content[0] != '\n' {
+ w.WriteString(w.indent)
+ }
+ w.WriteString(content + "\n")
+}
+
+func (w *OrgWriter) WriteExample(e Example) {
+ for _, n := range e.Children {
+ w.WriteString(w.indent + ":")
+ if content := w.nodesAsString(n); content != "" {
+ w.WriteString(" " + content)
+ }
+ w.WriteString("\n")
+ }
+}
+
+func (w *OrgWriter) WriteKeyword(k Keyword) {
+ w.WriteString(w.indent + "#+" + k.Key + ":")
+ if k.Value != "" {
+ w.WriteString(" " + k.Value)
+ }
+ w.WriteString("\n")
+}
+
+func (w *OrgWriter) WriteInclude(i Include) {
+ w.WriteKeyword(i.Keyword)
+}
+
+func (w *OrgWriter) WriteNodeWithMeta(n NodeWithMeta) {
+ for _, ns := range n.Meta.Caption {
+ w.WriteString("#+CAPTION: ")
+ WriteNodes(w, ns...)
+ w.WriteString("\n")
+ }
+ for _, attributes := range n.Meta.HTMLAttributes {
+ w.WriteString("#+ATTR_HTML: ")
+ w.WriteString(strings.Join(attributes, " ") + "\n")
+ }
+ WriteNodes(w, n.Node)
+}
+
+func (w *OrgWriter) WriteNodeWithName(n NodeWithName) {
+ w.WriteString(fmt.Sprintf("#+NAME: %s\n", n.Name))
+ WriteNodes(w, n.Node)
+}
+
+func (w *OrgWriter) WriteComment(c Comment) {
+ w.WriteString(w.indent + "#" + c.Content + "\n")
+}
+
+func (w *OrgWriter) WriteList(l List) { WriteNodes(w, l.Items...) }
+
+func (w *OrgWriter) WriteListItem(li ListItem) {
+ liWriter := w.emptyClone()
+ liWriter.indent = w.indent + strings.Repeat(" ", len(li.Bullet)+1)
+ WriteNodes(liWriter, li.Children...)
+ content := strings.TrimPrefix(liWriter.String(), liWriter.indent)
+ w.WriteString(w.indent + li.Bullet)
+ if li.Status != "" {
+ w.WriteString(fmt.Sprintf(" [%s]", li.Status))
+ }
+ if len(content) > 0 && content[0] == '\n' {
+ w.WriteString(content)
+ } else {
+ w.WriteString(" " + content)
+ }
+}
+
+func (w *OrgWriter) WriteDescriptiveListItem(di DescriptiveListItem) {
+ w.WriteString(w.indent + di.Bullet)
+ if di.Status != "" {
+ w.WriteString(fmt.Sprintf(" [%s]", di.Status))
+ }
+ indent := w.indent + strings.Repeat(" ", len(di.Bullet)+1)
+ if len(di.Term) != 0 {
+ term := w.nodesAsString(di.Term...)
+ w.WriteString(" " + term + " ::")
+ indent = indent + strings.Repeat(" ", len(term)+4)
+ }
+ diWriter := w.emptyClone()
+ diWriter.indent = indent
+ WriteNodes(diWriter, di.Details...)
+ details := strings.TrimPrefix(diWriter.String(), diWriter.indent)
+ if len(details) > 0 && details[0] == '\n' {
+ w.WriteString(details)
+ } else {
+ w.WriteString(" " + details)
+ }
+}
+
+func (w *OrgWriter) WriteTable(t Table) {
+ for _, row := range t.Rows {
+ w.WriteString(w.indent)
+ if len(row.Columns) == 0 {
+ w.WriteString(`|`)
+ for i := 0; i < len(t.ColumnInfos); i++ {
+ w.WriteString(strings.Repeat("-", t.ColumnInfos[i].Len+2))
+ if i < len(t.ColumnInfos)-1 {
+ w.WriteString("+")
+ }
+ }
+ w.WriteString(`|`)
+
+ } else {
+ w.WriteString(`|`)
+ for _, column := range row.Columns {
+ w.WriteString(` `)
+ content := w.nodesAsString(column.Children...)
+ if content == "" {
+ content = " "
+ }
+ n := column.Len - utf8.RuneCountInString(content)
+ if n < 0 {
+ n = 0
+ }
+ if column.Align == "center" {
+ if n%2 != 0 {
+ w.WriteString(" ")
+ }
+ w.WriteString(strings.Repeat(" ", n/2) + content + strings.Repeat(" ", n/2))
+ } else if column.Align == "right" {
+ w.WriteString(strings.Repeat(" ", n) + content)
+ } else {
+ w.WriteString(content + strings.Repeat(" ", n))
+ }
+ w.WriteString(` |`)
+ }
+ }
+ w.WriteString("\n")
+ }
+}
+
+func (w *OrgWriter) WriteHorizontalRule(hr HorizontalRule) {
+ w.WriteString(w.indent + "-----\n")
+}
+
+func (w *OrgWriter) WriteText(t Text) { w.WriteString(t.Content) }
+
+func (w *OrgWriter) WriteEmphasis(e Emphasis) {
+ borders, ok := emphasisOrgBorders[e.Kind]
+ if !ok {
+ panic(fmt.Sprintf("bad emphasis %#v", e))
+ }
+ w.WriteString(borders[0])
+ WriteNodes(w, e.Content...)
+ w.WriteString(borders[1])
+}
+
+func (w *OrgWriter) WriteLatexFragment(l LatexFragment) {
+ w.WriteString(l.OpeningPair)
+ WriteNodes(w, l.Content...)
+ w.WriteString(l.ClosingPair)
+}
+
+func (w *OrgWriter) WriteStatisticToken(s StatisticToken) {
+ w.WriteString(fmt.Sprintf("[%s]", s.Content))
+}
+
+func (w *OrgWriter) WriteLineBreak(l LineBreak) {
+ w.WriteString(strings.Repeat("\n"+w.indent, l.Count))
+}
+
+func (w *OrgWriter) WriteExplicitLineBreak(l ExplicitLineBreak) {
+ w.WriteString(`\\` + "\n" + w.indent)
+}
+
+func (w *OrgWriter) WriteTimestamp(t Timestamp) {
+ w.WriteString("<")
+ if t.IsDate {
+ w.WriteString(t.Time.Format(datestampFormat))
+ } else {
+ w.WriteString(t.Time.Format(timestampFormat))
+ }
+ if t.Interval != "" {
+ w.WriteString(" " + t.Interval)
+ }
+ w.WriteString(">")
+}
+
+func (w *OrgWriter) WriteFootnoteLink(l FootnoteLink) {
+ w.WriteString("[fn:" + l.Name)
+ if l.Definition != nil {
+ w.WriteString(":")
+ WriteNodes(w, l.Definition.Children[0].(Paragraph).Children...)
+ }
+ w.WriteString("]")
+}
+
+func (w *OrgWriter) WriteRegularLink(l RegularLink) {
+ if l.AutoLink {
+ w.WriteString(l.URL)
+ } else if l.Description == nil {
+ w.WriteString(fmt.Sprintf("[[%s]]", l.URL))
+ } else {
+ descriptionWriter := w.emptyClone()
+ WriteNodes(descriptionWriter, l.Description...)
+ description := descriptionWriter.String()
+ w.WriteString(fmt.Sprintf("[[%s][%s]]", l.URL, description))
+ }
+}
diff --git a/vendor/github.com/niklasfasching/go-org/org/paragraph.go b/vendor/github.com/niklasfasching/go-org/org/paragraph.go
new file mode 100644
index 0000000000..b7d3ea92ce
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/paragraph.go
@@ -0,0 +1,46 @@
+package org
+
+import (
+ "regexp"
+ "strings"
+)
+
+type Paragraph struct{ Children []Node }
+type HorizontalRule struct{}
+
+var horizontalRuleRegexp = regexp.MustCompile(`^(\s*)-{5,}\s*$`)
+var plainTextRegexp = regexp.MustCompile(`^(\s*)(.*)`)
+
+func lexText(line string) (token, bool) {
+ if m := plainTextRegexp.FindStringSubmatch(line); m != nil {
+ return token{"text", len(m[1]), m[2], m}, true
+ }
+ return nilToken, false
+}
+
+func lexHorizontalRule(line string) (token, bool) {
+ if m := horizontalRuleRegexp.FindStringSubmatch(line); m != nil {
+ return token{"horizontalRule", len(m[1]), "", m}, true
+ }
+ return nilToken, false
+}
+
+func (d *Document) parseParagraph(i int, parentStop stopFn) (int, Node) {
+ lines, start := []string{d.tokens[i].content}, i
+ i++
+ stop := func(d *Document, i int) bool {
+ return parentStop(d, i) || d.tokens[i].kind != "text" || d.tokens[i].content == ""
+ }
+ for ; !stop(d, i); i++ {
+ lines = append(lines, d.tokens[i].content)
+ }
+ consumed := i - start
+ return consumed, Paragraph{d.parseInline(strings.Join(lines, "\n"))}
+}
+
+func (d *Document) parseHorizontalRule(i int, parentStop stopFn) (int, Node) {
+ return 1, HorizontalRule{}
+}
+
+func (n Paragraph) String() string { return orgWriter.nodesAsString(n) }
+func (n HorizontalRule) String() string { return orgWriter.nodesAsString(n) }
diff --git a/vendor/github.com/niklasfasching/go-org/org/table.go b/vendor/github.com/niklasfasching/go-org/org/table.go
new file mode 100644
index 0000000000..a404e1a9f2
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/table.go
@@ -0,0 +1,130 @@
+package org
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type Table struct {
+ Rows []Row
+ ColumnInfos []ColumnInfo
+}
+
+type Row struct {
+ Columns []Column
+ IsSpecial bool
+}
+
+type Column struct {
+ Children []Node
+ *ColumnInfo
+}
+
+type ColumnInfo struct {
+ Align string
+ Len int
+}
+
+var tableSeparatorRegexp = regexp.MustCompile(`^(\s*)(\|[+-|]*)\s*$`)
+var tableRowRegexp = regexp.MustCompile(`^(\s*)(\|.*)`)
+
+var columnAlignRegexp = regexp.MustCompile(`^<(l|c|r)>$`)
+
+func lexTable(line string) (token, bool) {
+ if m := tableSeparatorRegexp.FindStringSubmatch(line); m != nil {
+ return token{"tableSeparator", len(m[1]), m[2], m}, true
+ } else if m := tableRowRegexp.FindStringSubmatch(line); m != nil {
+ return token{"tableRow", len(m[1]), m[2], m}, true
+ }
+ return nilToken, false
+}
+
+func (d *Document) parseTable(i int, parentStop stopFn) (int, Node) {
+ rawRows, start := [][]string{}, i
+ for ; !parentStop(d, i); i++ {
+ if t := d.tokens[i]; t.kind == "tableRow" {
+ rawRow := strings.FieldsFunc(d.tokens[i].content, func(r rune) bool { return r == '|' })
+ for i := range rawRow {
+ rawRow[i] = strings.TrimSpace(rawRow[i])
+ }
+ rawRows = append(rawRows, rawRow)
+ } else if t.kind == "tableSeparator" {
+ rawRows = append(rawRows, nil)
+ } else {
+ break
+ }
+ }
+
+ table := Table{nil, getColumnInfos(rawRows)}
+ for _, rawColumns := range rawRows {
+ row := Row{nil, isSpecialRow(rawColumns)}
+ if len(rawColumns) != 0 {
+ for i := range table.ColumnInfos {
+ column := Column{nil, &table.ColumnInfos[i]}
+ if i < len(rawColumns) {
+ column.Children = d.parseInline(rawColumns[i])
+ }
+ row.Columns = append(row.Columns, column)
+ }
+ }
+ table.Rows = append(table.Rows, row)
+ }
+ return i - start, table
+}
+
+func getColumnInfos(rows [][]string) []ColumnInfo {
+ columnCount := 0
+ for _, columns := range rows {
+ if n := len(columns); n > columnCount {
+ columnCount = n
+ }
+ }
+
+ columnInfos := make([]ColumnInfo, columnCount)
+ for i := 0; i < columnCount; i++ {
+ countNumeric, countNonNumeric := 0, 0
+ for _, columns := range rows {
+ if i >= len(columns) {
+ continue
+ }
+
+ if n := utf8.RuneCountInString(columns[i]); n > columnInfos[i].Len {
+ columnInfos[i].Len = n
+ }
+
+ if m := columnAlignRegexp.FindStringSubmatch(columns[i]); m != nil && isSpecialRow(columns) {
+ switch m[1] {
+ case "l":
+ columnInfos[i].Align = "left"
+ case "c":
+ columnInfos[i].Align = "center"
+ case "r":
+ columnInfos[i].Align = "right"
+ }
+ } else if _, err := strconv.ParseFloat(columns[i], 32); err == nil {
+ countNumeric++
+ } else if strings.TrimSpace(columns[i]) != "" {
+ countNonNumeric++
+ }
+ }
+
+ if columnInfos[i].Align == "" && countNumeric >= countNonNumeric {
+ columnInfos[i].Align = "right"
+ }
+ }
+ return columnInfos
+}
+
+func isSpecialRow(rawColumns []string) bool {
+ isAlignRow := true
+ for _, rawColumn := range rawColumns {
+ if !columnAlignRegexp.MatchString(rawColumn) && rawColumn != "" {
+ isAlignRow = false
+ }
+ }
+ return isAlignRow
+}
+
+func (n Table) String() string { return orgWriter.nodesAsString(n) }
diff --git a/vendor/github.com/niklasfasching/go-org/org/util.go b/vendor/github.com/niklasfasching/go-org/org/util.go
new file mode 100644
index 0000000000..c25bf27ee2
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/util.go
@@ -0,0 +1,19 @@
+package org
+
+func isSecondBlankLine(d *Document, i int) bool {
+ if i-1 <= 0 {
+ return false
+ }
+ t1, t2 := d.tokens[i-1], d.tokens[i]
+ if t1.kind == "text" && t2.kind == "text" && t1.content == "" && t2.content == "" {
+ return true
+ }
+ return false
+}
+
+func isImageOrVideoLink(n Node) bool {
+ if l, ok := n.(RegularLink); ok && l.Kind() == "video" || l.Kind() == "image" {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/niklasfasching/go-org/org/writer.go b/vendor/github.com/niklasfasching/go-org/org/writer.go
new file mode 100644
index 0000000000..c4aebd69f5
--- /dev/null
+++ b/vendor/github.com/niklasfasching/go-org/org/writer.go
@@ -0,0 +1,103 @@
+package org
+
+import "fmt"
+
+// Writer is the interface that is used to export a parsed document into a new format. See Document.Write().
+type Writer interface {
+ Before(*Document) // Before is called before any nodes are passed to the writer.
+ After(*Document) // After is called after all nodes have been passed to the writer.
+ String() string // String is called at the very end to retrieve the final output.
+
+ WriterWithExtensions() Writer
+
+ WriteKeyword(Keyword)
+ WriteInclude(Include)
+ WriteComment(Comment)
+ WriteNodeWithMeta(NodeWithMeta)
+ WriteNodeWithName(NodeWithName)
+ WriteHeadline(Headline)
+ WriteBlock(Block)
+ WriteExample(Example)
+ WriteDrawer(Drawer)
+ WritePropertyDrawer(PropertyDrawer)
+ WriteList(List)
+ WriteListItem(ListItem)
+ WriteDescriptiveListItem(DescriptiveListItem)
+ WriteTable(Table)
+ WriteHorizontalRule(HorizontalRule)
+ WriteParagraph(Paragraph)
+ WriteText(Text)
+ WriteEmphasis(Emphasis)
+ WriteLatexFragment(LatexFragment)
+ WriteStatisticToken(StatisticToken)
+ WriteExplicitLineBreak(ExplicitLineBreak)
+ WriteLineBreak(LineBreak)
+ WriteRegularLink(RegularLink)
+ WriteTimestamp(Timestamp)
+ WriteFootnoteLink(FootnoteLink)
+ WriteFootnoteDefinition(FootnoteDefinition)
+}
+
+func WriteNodes(w Writer, nodes ...Node) {
+ w = w.WriterWithExtensions()
+ for _, n := range nodes {
+ switch n := n.(type) {
+ case Keyword:
+ w.WriteKeyword(n)
+ case Include:
+ w.WriteInclude(n)
+ case Comment:
+ w.WriteComment(n)
+ case NodeWithMeta:
+ w.WriteNodeWithMeta(n)
+ case NodeWithName:
+ w.WriteNodeWithName(n)
+ case Headline:
+ w.WriteHeadline(n)
+ case Block:
+ w.WriteBlock(n)
+ case Example:
+ w.WriteExample(n)
+ case Drawer:
+ w.WriteDrawer(n)
+ case PropertyDrawer:
+ w.WritePropertyDrawer(n)
+ case List:
+ w.WriteList(n)
+ case ListItem:
+ w.WriteListItem(n)
+ case DescriptiveListItem:
+ w.WriteDescriptiveListItem(n)
+ case Table:
+ w.WriteTable(n)
+ case HorizontalRule:
+ w.WriteHorizontalRule(n)
+ case Paragraph:
+ w.WriteParagraph(n)
+ case Text:
+ w.WriteText(n)
+ case Emphasis:
+ w.WriteEmphasis(n)
+ case LatexFragment:
+ w.WriteLatexFragment(n)
+ case StatisticToken:
+ w.WriteStatisticToken(n)
+ case ExplicitLineBreak:
+ w.WriteExplicitLineBreak(n)
+ case LineBreak:
+ w.WriteLineBreak(n)
+ case RegularLink:
+ w.WriteRegularLink(n)
+ case Timestamp:
+ w.WriteTimestamp(n)
+ case FootnoteLink:
+ w.WriteFootnoteLink(n)
+ case FootnoteDefinition:
+ w.WriteFootnoteDefinition(n)
+ default:
+ if n != nil {
+ panic(fmt.Sprintf("bad node %T %#v", n, n))
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go
deleted file mode 100644
index 9656c42a19..0000000000
--- a/vendor/github.com/russross/blackfriday/doc.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Package blackfriday is a Markdown processor.
-//
-// It translates plain text with simple formatting rules into HTML or LaTeX.
-//
-// Sanitized Anchor Names
-//
-// Blackfriday includes an algorithm for creating sanitized anchor names
-// corresponding to a given input text. This algorithm is used to create
-// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The
-// algorithm is specified below, so that other packages can create
-// compatible anchor names and links to those anchors.
-//
-// The algorithm iterates over the input text, interpreted as UTF-8,
-// one Unicode code point (rune) at a time. All runes that are letters (category L)
-// or numbers (category N) are considered valid characters. They are mapped to
-// lower case, and included in the output. All other runes are considered
-// invalid characters. Invalid characters that preceed the first valid character,
-// as well as invalid character that follow the last valid character
-// are dropped completely. All other sequences of invalid characters
-// between two valid characters are replaced with a single dash character '-'.
-//
-// SanitizedAnchorName exposes this functionality, and can be used to
-// create compatible links to the anchor names generated by blackfriday.
-// This algorithm is also implemented in a small standalone package at
-// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients
-// that want a small package and don't need full functionality of blackfriday.
-package blackfriday
-
-// NOTE: Keep Sanitized Anchor Name algorithm in sync with package
-// github.com/shurcooL/sanitized_anchor_name.
-// Otherwise, users of sanitized_anchor_name will get anchor names
-// that are incompatible with those generated by blackfriday.
diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go
deleted file mode 100644
index e0a6c69c96..0000000000
--- a/vendor/github.com/russross/blackfriday/html.go
+++ /dev/null
@@ -1,938 +0,0 @@
-//
-// Blackfriday Markdown Processor
-// Available at http://github.com/russross/blackfriday
-//
-// Copyright © 2011 Russ Ross .
-// Distributed under the Simplified BSD License.
-// See README.md for details.
-//
-
-//
-//
-// HTML rendering backend
-//
-//
-
-package blackfriday
-
-import (
- "bytes"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-// Html renderer configuration options.
-const (
- HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks
- HTML_SKIP_STYLE // skip embedded