diff --git a/.travis.yml b/.travis.yml index c990f01..2d75fdd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ env: - VERSION=min - VERSION=max script: + - dub test :common --compiler=$DC - rdmd ./d-test-utils/test_with_package.d libdparse -- dub test --compiler=$DC - rdmd ./d-test-utils/test_with_package.d libdparse -- dub build --config=lib --compiler=$DC - rdmd ./d-test-utils/test_with_package.d libdparse -- dub build --config=exe --compiler=$DC diff --git a/common/dub.sdl b/common/dub.sdl new file mode 100644 index 0000000..3cc7dd0 --- /dev/null +++ b/common/dub.sdl @@ -0,0 +1 @@ +name "common" diff --git a/src/ddoc/lexer.d b/common/source/ddoc/lexer.d similarity index 100% rename from src/ddoc/lexer.d rename to common/source/ddoc/lexer.d diff --git a/src/ddoc/macros.d b/common/source/ddoc/macros.d similarity index 99% rename from src/ddoc/macros.d rename to common/source/ddoc/macros.d index 5466369..2d72094 100644 --- a/src/ddoc/macros.d +++ b/common/source/ddoc/macros.d @@ -889,4 +889,4 @@ package size_t stripWhitespace(ref Lexer lexer) return start; } -enum callHighlightMsg = "You should call ddoc.hightlight.hightlight(string) first."; +enum callHighlightMsg = "You should call ddoc.hightlight.hightlight(string) or ddoc.unhighlight.unhighlight(string) first."; diff --git a/src/ddoc/sections.d b/common/source/ddoc/sections.d similarity index 100% rename from src/ddoc/sections.d rename to common/source/ddoc/sections.d diff --git a/common/source/ddoc/types.d b/common/source/ddoc/types.d new file mode 100644 index 0000000..ff57493 --- /dev/null +++ b/common/source/ddoc/types.d @@ -0,0 +1,97 @@ +module ddoc.types; + +import ddoc.lexer; +import ddoc.sections; + +struct Comment +{ + bool isDitto() const @property + { + import std.string : strip, toLower; + + return sections.length == 2 && sections[0].content.strip().toLower() == "ditto"; + } + + Section[] sections; + + /** + * Creates a Comment object without expanding the sections. + * + * Use $(LREF parse) with a function pointer to $(REF highlight, ddoc,highlight) + * or $(REF parseComment, ddoc,comments) to parse a comment while also + * expanding sections. + */ + static Comment parseUnexpanded(string text) + { + import ddoc.unhighlight : unhighlight; + + return parse(text, null, false, &unhighlight); + } + + static Comment parse(string text, string[string] macros, bool removeUnknown, + string function(string) highlightFn) + { + import std.functional : toDelegate; + + return parse(text, macros, removeUnknown, toDelegate(highlightFn)); + } + + static Comment parse(string text, string[string] macros, bool removeUnknown, + string delegate(string) highlightFn) + out(retVal) + { + assert(retVal.sections.length >= 2); + } + do + { + import std.algorithm : find; + import ddoc.macros : expand; + + auto sections = splitSections(text); + string[string] sMacros = macros; + auto m = sections.find!(p => p.name == "Macros"); + const e = sections.find!(p => p.name == "Escapes"); + auto p = sections.find!(p => p.name == "Params"); + if (m.length) + { + if (!doMapping(m[0])) + throw new DdocParseException("Unable to parse Key/Value pairs", m[0].content); + foreach (kv; m[0].mapping) + sMacros[kv[0]] = kv[1]; + } + if (e.length) + { + assert(0, "Escapes not handled yet"); + } + if (p.length) + { + if (!doMapping(p[0])) + throw new DdocParseException("Unable to parse Key/Value pairs", p[0].content); + foreach (ref kv; p[0].mapping) + kv[1] = expand(Lexer(highlightFn(kv[1])), sMacros, removeUnknown); + } + + foreach (ref Section sec; sections) + { + if (sec.name != "Macros" && sec.name != "Escapes" && sec.name != "Params") + sec.content = expand(Lexer(highlightFn(sec.content)), sMacros, removeUnknown); + } + return Comment(sections); + } +} + +private: +bool doMapping(ref Section s) +{ + import ddoc.macros : KeyValuePair, parseKeyValuePair; + + auto lex = Lexer(s.content); + KeyValuePair[] pairs; + if (parseKeyValuePair(lex, pairs)) + { + foreach (idx, kv; pairs) + s.mapping ~= kv; + return true; + } + return false; +} diff --git a/common/source/ddoc/unhighlight.d b/common/source/ddoc/unhighlight.d new file mode 100644 index 0000000..1419b94 --- /dev/null +++ b/common/source/ddoc/unhighlight.d @@ -0,0 +1,114 @@ +/** + * Converts embedded code sections to plain text inside `(D_CODE)` without any + * syntax highlighting applied. This can be used as lightweight alternative to + * ddoc.highlight when syntax highlighting the code is not actually needed. + * + * Copyright: © 2014 Economic Modeling Specialists, Intl. + * Authors: Brian Schott, Mathias 'Geod24' Lang, Jan Jurzitza + * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0) + */ +module ddoc.unhighlight; + +import std.array; + +/** + * Parses a string and replace embedded code (code between at least 3 '-') with + * plaintext. + * + * Params: + * str = A string that might contain embedded code. Only code will be modified. + * If the string doesn't contain any embedded code, it will be returned as is. + * + * Returns: + * A (possibly new) string containing the embedded code inside `D_CODE` macros. + */ +string unhighlight(string str) +{ + return highlightBase(str, (code, ref o) { o.put(code); }); +} + +/** + * Base code for highlight and unhighlight, calling the $(LREF highlightCode) + * callback parameter on all embedded sections to handle how it is emitted. + * + * Params: + * str = A string that might contain embedded code. Only code will be modified. + * If the string doesn't contain any embedded code, it will be returned as is. + * highlightCode = The callback to call for embedded and inlined code sections. + * `D_CODE` macross will be automatically prefixed and suffixed before/after + * the call to this function. + */ +string highlightBase(string str, void delegate(string code, ref Appender!string output) highlightCode) +{ + import ddoc.lexer; + import ddoc.macros : tokOffset; + + auto lex = Lexer(str, true); + auto output = appender!string; + size_t start; + // We need this because there's no way to tell how many dashes precede + // an embedded. + size_t end; + while (!lex.empty) + { + if (lex.front.type == Type.embedded) + { + if (start != end) + output.put(lex.text[start .. end]); + output.put("$(D_CODE "); + highlightCode(lex.front.text, output); + output.put(")"); + start = lex.offset; + } + else if (lex.front.type == Type.inlined) + { + if (start != end) + output.put(lex.text[start .. end]); + output.put("$(DDOC_BACKQUOTED "); + highlightCode(lex.front.text, output); + output.put(")"); + start = lex.offset; + } + end = lex.offset; + lex.popFront(); + } + + if (start) + { + output.put(lex.text[start .. end]); + return output.data; + } + else + { + return str; + } +} + +unittest +{ + import ddoc.lexer; + import ddoc.macros; + + string[string] macros = null; + + string text = `description + +Something else + +--- +// an example +--- +Throws: a fit +--- +/// another example +---`; + text = unhighlight(text); + auto lexer = Lexer(text, true); + assert(expand(lexer, macros, false) == `description + +Something else + +
// an example+Throws: a fit +
/// another example`); +} diff --git a/dub.json b/dub.json index 28eedd4..47c29f0 100644 --- a/dub.json +++ b/dub.json @@ -8,18 +8,20 @@ "license": "BSL-1.0", "homepage": "https://github.com/dlang-community/libddoc", "configurations": [ - { - "name": "lib", - "targetType": "library", - "versions": [ "LIBDDOC_CONFIG_LIB" ] - }, - { - "name": "exe", - "targetType": "executable", - "versions": [ "LIBDDOC_CONFIG_EXE" ] - } + { + "name": "lib", + "targetType": "library", + "versions": [ "LIBDDOC_CONFIG_LIB" ] + }, + { + "name": "exe", + "targetType": "executable", + "versions": [ "LIBDDOC_CONFIG_EXE" ] + } ], "dependencies": { - "libdparse": ">=0.13.0 <0.18.0" - } + "libdparse": ">=0.13.0 <1.0.0", + ":common": "*" + }, + "subPackages": ["common"] } diff --git a/src/ddoc/comments.d b/src/ddoc/comments.d index f73adb5..cbd1f6d 100644 --- a/src/ddoc/comments.d +++ b/src/ddoc/comments.d @@ -7,6 +7,8 @@ module ddoc.comments; import ddoc.sections; import ddoc.lexer; +public import ddoc.types; + Comment parseComment(string text, string[string] macros, bool removeUnknown = true) out(retVal) { @@ -14,40 +16,9 @@ out(retVal) } do { - import std.algorithm : find; - import ddoc.macros : expand; import ddoc.highlight : highlight; - auto sections = splitSections(text); - string[string] sMacros = macros; - auto m = sections.find!(p => p.name == "Macros"); - const e = sections.find!(p => p.name == "Escapes"); - auto p = sections.find!(p => p.name == "Params"); - if (m.length) - { - if (!doMapping(m[0])) - throw new DdocParseException("Unable to parse Key/Value pairs", m[0].content); - foreach (kv; m[0].mapping) - sMacros[kv[0]] = kv[1]; - } - if (e.length) - { - assert(0, "Escapes not handled yet"); - } - if (p.length) - { - if (!doMapping(p[0])) - throw new DdocParseException("Unable to parse Key/Value pairs", p[0].content); - foreach (ref kv; p[0].mapping) - kv[1] = expand(Lexer(highlight(kv[1])), sMacros, removeUnknown); - } - - foreach (ref Section sec; sections) - { - if (sec.name != "Macros" && sec.name != "Escapes" && sec.name != "Params") - sec.content = expand(Lexer(highlight(sec.content)), sMacros, removeUnknown); - } - return Comment(sections); + return Comment.parse(text, macros, removeUnknown, &highlight); } unittest @@ -58,18 +29,6 @@ unittest assert(test.sections[2].name == "Params"); } -struct Comment -{ - bool isDitto() const @property - { - import std.string : strip, toLower; - - return sections.length == 2 && sections[0].content.strip().toLower() == "ditto"; - } - - Section[] sections; -} - unittest { import std.conv : text; @@ -229,19 +188,3 @@ Params: assert(parsed.sections[3].mapping[1][0] == "supportGC"); assert(parsed.sections[3].mapping[1][1][0] == 't', "<<" ~ parsed.sections[3].mapping[1][1] ~ ">>"); } - -private: -bool doMapping(ref Section s) -{ - import ddoc.macros : KeyValuePair, parseKeyValuePair; - - auto lex = Lexer(s.content); - KeyValuePair[] pairs; - if (parseKeyValuePair(lex, pairs)) - { - foreach (idx, kv; pairs) - s.mapping ~= kv; - return true; - } - return false; -}