# from gitverse
npm install -g git+https://gitverse.ru/mikle33/logipard
# or: from github
npm install -g git+https://github.com/sbtrn-devil/logipard
# or: from npm
# TODO
# from gitverse
npm install git+https://gitverse.ru/mikle33/logipard
# or: from github
npm install git+https://github.com/sbtrn-devil/logipard
# or: from npm
# TODO
lp-cli [cmd line options]
node_modules/.bin/lp-cli [cmd line options]
node_modules\.bin\lp-cli [cmd line options]
-h
or --help
for summary of the command line options (they are quite few, most of the work
is intended to be specified via configuration file(s)).<PROJECT_ROOT>
), possibly even created a package.json
in it (let's say it is a Node.js library).lp-cli [options]
(adjust it if you installed locally).<PROJECT_ROOT>/index.js
module.exports.leftpad = function leftpad(str, len, padding = ' ') {
str = str + '';
if (str.length >= len) return str;
padding = (padding = padding + '').length ? padding : ' ';
while (padding.length + str.length < len) {
padding = padding + padding;
}
return padding.substring(0, len - str.length) + str;
}
<PROJECT_ROOT>/index.js
//#LP functions/leftpad { <#./%title: leftpad(str, len[, padding])#>
// Pad a string from left to the given minimum length with whitespaces or a given padding string.
//#LP ./str %arg: String to be padded
//#LP ./len %arg: Number, minimum required length of the padded string (<#ref leftpad/str#>)
//#LP ./padding %arg: String, optional. If specified, this string or its portion will be used for the padding instead of spaces.
//
// The string is repeated as required if it is less than the length required for the padding, and its leftmost part of the required length is used.
module.exports.leftpad = function leftpad(str, len, padding = ' ') {
... // almost everything within can be left as is so far
//#LP ./%return: The padded string with minimum length of (<#ref leftpad/len#>)
return padding.substring(0, len - str.length) + str;
}
//#LP }
//
here), possibly with some code at beginning of the line, where first comment starts with #LP
token,
and until the next line that does not end with single-line comment. Here, the //
line after ./padding
is required to keep the // The string is ...
comment included into the run.
On the other hand, the // almost everything ...
comment line is separated from the run by non-comment ending line, so this comment is 'just a code comment' and is ignored.functions
: the item automatically introduced by fact we introduced its member sub-items, we'll consider it a container for the functions listfunctions/leftpad
: the primary documentation item for our function, contains everything related to it (i. e. the following items)functions/leftpad/%title
(comes from ./%title
): the item that contains human readable title for the main itemfunctions/leftpad/str
, functions/leftpad/len
, functions/leftpad/padding
: items documenting arguments of the function, note each of them is tagged with %arg
model tagfunctions/leftpad/%return
: the item that documents our function's returned value%arg
: the model tag item with this name, introduced by the fact it was used%arg
,
the fact that the documented entity is specifically a function) is also entirely conventional, as well as their names. From Logipard documentation model perspective, all of these are just generic items,
and at this point their interpretation in the way described above only exists in our mind.%return
fragment near the return statement rather
than bundled with parameters block - again, purely for example; we could as well place it more traditionally.) In general, however, you could place it anywhere in the source file,
or in a different file, or even place different sub-items across different locations. Probably it makes not much sense for things like function arguments and return value, but these may be not the only
items related to this function you may want to document under this node.%
. Fortunately, we have this already.domain.our-unique.leftpad.for-nodejs/functions
domain.our-unique.leftpad.for-nodejs/functions/leftpad
domain.our-unique.leftpad.for-nodejs/functions/leftpad/%title
domain.our-unique.leftpad.for-nodejs/functions/leftpad/str
domain.our-unique.leftpad.for-nodejs/functions/leftpad/len
domain.our-unique.leftpad.for-nodejs/functions/leftpad/padding
domain.our-unique.leftpad.for-nodejs/functions/leftpad/%return
%arg
(this can be left as is, for some reasons)<PROJECT_ROOT>/index.js
//#LP-alias M: domain.our-unique.leftpad.for-nodejs
// now instead of domain.our-unique.leftpad.for-nodejs/functions/... we can use M/functions
//#LP M/functions/leftpad { <#./%title: leftpad(str, len[, padding])#>
...
// ...the remaining part of the file can be left unchanged, since it uses relative names
//#LP-alias M: domain.our-unique.leftpad.for-nodejs
will likely be the shared common part for all files that contain documentation
for our project, and possibly more shared prologue parts will occur eventually. To further reduce duplication, we can move this to separate file and
just include it everywhere we need...<PROJECT_ROOT>/leftpad-module-inc.lp-txt
#LP-alias M: domain.our-unique.leftpad.for-nodejs
<PROJECT_ROOT>/index.js
//#LP-include leftpad-module-inc.lp-txt
//#LP M/functions/leftpad { <#./%title: leftpad(str, len[, padding])#>
...
// ...the remaining part of the file still unchanged
<PROJECT_ROOT>/leftpad-doc-html.tpl.html
<html>
<head>
<style>
body {
font-family: sans-serif;
}
code {
font-family: monospace;
background: lightgray;
margin: 0;
padding: 0;
}
pre {
font-family: monospace;
background: lightgray;
margin: 0;
padding: 0;
}
table, th, td { border: 1px solid; border-spacing: 0; border-collapse: collapse; }
table { margin: 0.5em }
CSS_TARGET
</style>
</head>
<body>
<div style="display: flex; flex-direction: column; height: 100%; overflow: hidden">
<div style="border-bottom: double 3px">
<center style="font-size: 200%">leftpad for node.js</center>
<center style="font-size: 75%">1.0.0</center>
</div>
<div style="padding: 0; margin: 0; overflow: clip; height: 0; flex-grow: 1">
HTML_TARGET
</div>
</div>
</body>
</html>
CSS_TARGET
and HTML_TARGET
placeholders.<PROJECT_ROOT>/lp-config.json
//#charset utf-8
// (That comment annotation above is taken into consideration, hopefully you will be saving this file in UTF-8)
// This doesn't quite look like a valid JSON (including the comments), but don't bother for now,
// just copy and paste everything as is.
{
"+ config": {
},
"lp-extract": {
"+ config": {
// note that non-absolute paths are relative to project root (which is location of this config file)
"+ excludeInFiles": ["node_modules/**"]
},
"items": [
{
// section for the primary codbase, in our case it is all the JS files
"inFiles": ["**/*.js"],
"excludeInFiles": [],
"outDir": "lp-extract.gen",
"reader": "${LP_HOME}/lpxread-basic" $, // trailing $ is not a typo
"lpxread-basic": {
"srcType": "generic-c-like"
}
},
{
// remember the leftpad-module-inc.lp-txt? it falls under this section
"inFiles": ["**/*-inc.lp-txt"],
"excludeInFiles": [],
"forLPInclude": true,
"outDir": "lp-extract.gen/lp-includes",
"reader": "${LP_HOME}/lpxread-basic" $,
"lpxread-basic": {
"srcType": "lp-text"
}
}
]
},
"lp-compile": {
"+ config": {
},
"items": [
{
"inRootDir": "lp-extract.gen",
"lpIncLookupDirName": "lp-includes",
"writer": "${LP_HOME}/lpcwrite-basic-json" $,
"lpcwrite-basic-json": {
// you may want to customize this for your project name
"outFile": "lp-compile.gen/leftpad-doc-fdom.json"
}
}
]
},
"lp-generate": {
"+ config": {
},
"items": [
{
"inFile": "lp-compile.gen/leftpad-doc-fdom.json", // same as outFile in lp-compile section
"writer": "${LP_HOME}/lpgwrite-example" $,
"lpgwrite-example": {
// very much magic here, just paste it with no hesitation
"program": file("${LP_HOME}/lpgwrite-example-docprg.lpson" $, {
"docprgPrologue": [ { "nameAlias": "M", "name": "domain.our-unique.leftpad.for-nodejs" } ],
"docRootItems": {
"query": [{ "with": "M/functions" }],
"sort": { "byMember": "%order", "keyFormat": "ds-natural", "order": "asc" }
},
"LS_EXTENDS": "Extends (is a)",
"LS_MEMBERS": "Members",
"LS_NAME": "Name",
"LS_DESCRIPTION": "Description",
"LS_MEMBERS_FROM_EXTENTS": "Members from extents",
"LS_ARGUMENTS": "Arguments",
"LS_RETURNS": "Returns:",
"LS_ERRORS": "Errors:",
"LS_MEMBERS_DETAILED": "Members (detailed)",
"LS_MEMBERS_FROM_EXTENTS_DETAILED": "Members from extents (detailed)",
"LS_ARGUMENTS_DETAILED": "Arguments (detailed)",
"LS_NOTES": "Notes",
"LS_PROPERTIES": "Properties",
"LS_PROPERTIES_FROM_EXTENTS": "Properties from extents",
"LS_METHODS": "Methods",
"LS_METHODS_FROM_EXTENTS": "Methods from extents"
}),
"renders": [
{
"docModel": "DocMain",
"renderer": "${LP_HOME}/lpgwrite-example-render-html" $,
"lpgwrite-example-render-html": {
"outFile": "lp-generate.gen/leftpad-doc.html",
"emitToc": true,
"inTemplateFile": "leftpad-doc-html.tpl.html",
"htmlPlaceholder": "HTML_TARGET",
"cssPlaceholder": "CSS_TARGET",
"localizedKeywords": {
"SNAPBACK": "Snapback",
"SNAPBACK_AND_SCROLL": "Snapback & Scroll",
"ELEVATE": "Elevate",
"RESET": "Reset",
"ELEVATE_TO": "Elevate to...",
"COPY_ITEM_NAME": "Copy this item's LP FDOM full name to clipboard:",
"ITEM_UNFOLDED_ELSEWHERE": "Item unfolded elsewhere on page, click/tap to unfold here...",
"MORE": "More... >>",
"TABLE_OF_CONTENTS": "Table of contents"
}
}
}
]
}
}
]
}
}
<PROJECT_ROOT>
and your current user has write permissions in it, invoke the CLI:lp-cli lp-config.json
=== Performing stage: EXTRACT ===
EXTRACT: 15.039ms
=== Performing stage: COMPILE ===
COMPILE: 18.592ms
=== Performing stage: GENERATE ===
Start render
lpgwrite-example-render-html: file lp-generate.gen/leftpad-doc.html created
GENERATE: 69.098ms
lp-generate.gen
, which should contain leftpad-doc.html
file. It is our documentation page, ready to view via browser.
Isn't it nice? An extra note: it is a completely self-contained HTML file, you can move it around alone with no fear to lose some dependencies, and it is static and indexation friendly when hosted on web.lp-compile.gen
and file leftpad-doc-fdom.json
in it. It is your documentation model DB (in JSON form, for this instance).<PROJECT_ROOT>/readme.lp-txt
#LP-include leftpad-module-inc.lp-txt
#-LP note the syntax difference when we are using plain text-ish files
# by the way, fragments opened by #-LP are treated as non-annotation comments by Logipard
# and will not in make it into DB and documentation, they span until next #LP tag.
# So this paragraph is still a LP comment (the hashes are actually optional, but the visual style
# better be consistent). Also note that consistent indentation (same amount of same type of whitespaces
# at start of each interim line within the #LP scope) is handled gracefully.
#LP M/readme { <#./%title: leftpad: quickstart#>
String left pad, revised and improved.
#LP ./install { <#./%title: Install#> <#./%order: 1#>
```
$ npm install git+https://<whereismygit.com>/leftpad
# TODO: actual location
```
#LP }
#LP ./usage { <#./%title: Usage#> <#./%order: 2#>
Use it this way:
```
const { leftpad } = require('./leftpad');
leftpad('foo', 5);
// ' foo'
leftpad('foo', 2);
// 'foo'
leftpad('foo', 10, '+-=');
// '+-=+-=+foo'
```
#LP }
#-LP By the way, it is a good idea to add reference to the usage under the documented function hub...
#LP M/functions/leftpad/usage { <#./%title: Usage#>
See some usage examples under <#ref readme/usage#>.
#LP }
#-LP And also give the functions section some official stuffing...
#LP M/functions: <#./%title: Functions reference#>
Reference on the library functions.
#-LP Be careful though of one caveat when using multi-line #LP...: syntax: its scope is terminated with next non-<#...#>'d #LP tag or #-LP comment
# So lines after this comment are again in M/readme (and the comment itself, in turn, only ends with an #LP tag)
#LP ./versions { <#./%title: Versions summary#> <#./%order: 3#>
#LP ./1.0.0: Initial release version.
#LP ./0.0.9: Pre-release version.
Was not documented with LP, so it pretty sucked.
#LP }
#LP }
<PROJECT_ROOT>/lp-config.json
...
"lp-extract": {
... // under "items"...
"items": [
// add third item (to capture the new readme.lp-txt):
...,
{
"inFiles": ["**/*.lp-txt"],
"excludeInFiles": [],
"outDir": "lp-extract.gen",
"reader": "${LP_HOME}/lpxread-basic" $,
"lpxread-basic": {
"srcType": "lp-text"
}
}
]
},
...
"lp-generate": {
... // in the first (and so far the only) item under "items"...
"items": [
{
"inFile": "lp-compile.gen/leftpad-doc-fdom.json",
"writer": "${LP_HOME}/lpgwrite-example" $,
"lpgwrite-example": {
... // leave most as is, except for....
"docRootItems": {
"query": [{ "with": ["M/readme", "M/functions"] }], // <-- change "query" member to this
],
... // everything here remains as is
},
... // here as well
},
// then, add this second item to "items":
{
"inFile": "lp-compile.gen/leftpad-doc-fdom.json", // note, still same as outFile in lp-compile section
"writer": "${LP_HOME}/lpgwrite-example" $,
"lpgwrite-example": {
"program": file("${LP_HOME}/lpgwrite-example-docprg.lpson" $, {
"docprgPrologue": [ { "nameAlias": "M", "name": "domain.our-unique.leftpad.for-nodejs" } ],
"docRootItems": {
"query": [{ "with": ["M/readme"] }],
],
"sort": { "byMember": "%order", "keyFormat": "natural", "order": "asc" }
}
}),
"renders": [
{
"docModel": "DocMain",
"renderer": "${LP_HOME}/lpgwrite-example-render-md" $,
"lpgwrite-example-render-md": {
"outFile": "lp-generate.gen/leftpad-README.md",
"emitToc": true,
"addSourceRef": false,
"header": "# leftpad #\n\n---",
}
}
]
}
}
// everything else remains as is
}
...
lp-cli lp-config.json
lp-generate.gen
: you should see new file leftpad-README.md
(check it with some MD viewer you have at hand), and the file leftpad-doc.html
has updated -
now it includes the same information as in readme, while still featuring the functions reference, and you can see some other improvements you might have guessed from the
readme source./
-s). Thus, the node on our example picture shortnamed subNodeA-3-1
has full name of nodeA/subNodeA-3/subNodeA-3-1
, as well as the
node shortanmed %tag3
has full name of moreTags/%tag3
./
-s, with one extra option: a sequence between
single or double quotes is considered a single character, and can contain the characters normally disallowed. For example, spare/can
is not a valid short name
(although is a valid full name), and space time
is not a valid name at all, but "spare/can"
and 'space time'
are both valid short names. Or,
for another example, A/"B/C"/D
is a three segment full name made of short names A
, "B/C"
, D
. Quotes must match, except for when inside other type of quotes."shortName"
is not the same as
shortName
and not the same as "shortName "
).#
(e. g. #include
, ##mp
). That includes single #
. These are reserved as private generated names. They can actually
appear in the compiled model, but never as explicitly user specified short names.:
, {
, }
(e. g. a::b
, {ARG}
). :
, {
and }
are reserved as delimiters for input syntax..
, ..
, ...
, etc.). These are reserved for referring to current/previous/pre-previous/etc. path levels
and can not be actual node names.~
(single tilde) - every occurrence of this short name resolves to a unique private generated short name and can not be
an actual node name."#name"
, "."
, "~"
, '{ARG}'
, 'a::b'
are valid short names).%title
can designate
the parent's node title. Such shortnames are recommended to start with '%'
character to emphasize their special role and simplify filtering them out of "usual" sub-nodes.node #tag1 #tag2 ...
%function
, %arg
, etc.), each of which is also a FDOM node. Apart from
reducing number of basic entities in the model, this approach allows the tag itself to be tagged and to have metadata sub-nodes, which enables creation of quite complex comprehensions.%extends
can be tagged with all of the nodes describing the base classes.<query1> / <query2> / ... / <queryN>
.[node-fn-1, node-fn-2, ...]
As the collection may be empty, but may not contain null nodes, any null nodes possibly referred in this list are
effectively dropped from the specified collection (they won't be enumerated or considered in any way, and won't count towards size of the collection).[node-fn-1, node-fn-2, <coll-spec-1>, node-fn-3...]
The extended version of list of nodes, the collection spec elements are treated as list of nodes
obtained by expanding the corresponding collection; duplicate nodes are ignored (a node is only included in the collection once).<coll-spec-1> + <coll-spec-2> + ...
The collection that includes every node from every given collection.<coll-spec-1> & <coll-spec-2> & ...
The collection that only includes nodes that are found in all of the given collections.<coll-spec-1> - <coll-spec-2> - ...
The collection that only includes nodes that are found in first collection of the list, but not in any of 2nd and the rest collections of the list.SomeCollAlias
An alias is a "bookmark" for a collection within the query context to be able to recall it and refer to it later. An alias is conventionally denoted by a valid FDOM
short name.BasicNodes + [advanced/%node1, advanced/%node3, MoreAdvancedNodes] + ((EvenMoreNodesA & EvenMoreNodesB) - [%node4])
.true
if the condition always holds, false
if the condition always fails.isAnyOf <collection-spec>
: the condition holds if the node is one of the given collection.hasMembersThat <sub-condition>
: the condition holds if the sub-condition holds for at least one of the node's non-null members.hasMembersNamed <regular expression>
: the condition holds if the node has at least one non-null member with shortname that matches the given regular expression. Shortcut for
hasMembersThat (named <regular expression>)
(see below) that can have potentially optimized implementation.hasAnyOfTags <collection-spec>
: the condition holds if the node has at least one tag from the given collection of tag nodes.hasAllOfTags <collection-spec>
: the condition holds if the node has all tags from the given collection of tag nodes.hasParentThat <sub-condition>
: the condition holds if the sub-condition holds for the node's immediate parent.named <regular expression>
: the condition holds if the node's shortname matches the given regular expression.and <list of sub-conditions>
: the condition holds if all of the sub-conditions in the given list hold.or <list of sub-conditions>
: the condition holds if at least one of the sub-conditions in the given list holds.not <sub-condition>
: the condition holds if the sub-condition fails, and fails if the sub-condition holds.SomeConditionAlias
: a condition can be marked by a shortcut alias (conventionally denoted by a valid FDOM short name) that can be re-used across the query or other queries in this
context.alias ShortName
- set an local alias to current collection (it only is in effect until the current query teardown). Does not change the current collection, just sets an alias to it that can be used in
later parts of the query.membersThat <condition-spec> [on <collection-spec>] [recursive]
- take member nodes from each element of the current collection, such that each member satisfies the given condition (
✖ Condition specification
),
and replace the current collection with set of such nodes. An explicitly specified collection can optionally be searched in instead of the current one. This query can be recursive (
✖ Recursive queries
).node
node/memberA
node/memberB
node/memberC
node
membersThat (not (named /^.*A/))
will yield the following collection:node/memberB
node/memberC
itemsThat <condition-spec> [on <collection-spec>] [recursive]
- filter the elements from the current collection that satisfy the given condition (
✖ Condition specification
), and replace the current collection
with set of these nodes. An explicitly specified collection can optionally be searched in instead of the current one. This query can be recursive (
✖ Recursive queries
).node1/memberA
node2/memberB
node3/memberC
itemsThat (not (named /^.*A/))
will yield the following collection:node2/memberB
node3/memberC
tagsThat <condition-spec> [on <collection-spec>] [recursive]
- get tag nodes of each element of the collection, such that each tag satisfies the given condition (
✖ Condition specification
), and replace the current
collection with set of these nodes. An explicitly specified collection can optionally be searched in instead of the current one. This query can be recursive (
✖ Recursive queries
).
Although this query can be used on any collection, its typical use is on single elements (single element collection).inMembersThat <condition-spec> [on <collection-spec>] [recursive] query <basic-query-list>
- take member nodes from each element of the current collection, such that each member satisfies the given condition
(
✖ Condition specification
), then replace the current collection with the result of the given query list on collection of these (member) nodes. An explicitly specified collection of initial nodes can optionally be
set to start the sub-query on instead of the current one. This query can be recursive (
✖ Recursive queries
).node
node/memberA
node/memberB
node/memberB/%data
node/memberC
node/other/%data
node
inMembersThat (named /%^member/) query (hasMembersNamed /^%data$/)
delivers the following collection:node/memberB/%data
node/other
has member %data
, but does not pass the inMembersThat
condition, so doesn't get into the collection of subjects for sub-query search.)inTagsThat <condition-spec> [on <collection-spec>] [recursive] query <basic-query-list>
- take tag nodes from each element of the current collection, such that each tag satisfies the given condition
(
✖ Condition specification
), then replace the current collection with the result of the given query list on collection of these (tag) nodes. An explicitly specified collection of initial nodes can optionally be
set to start the sub-query on instead of the current one. This query can be recursive (
✖ Recursive queries
).%tag1
%tag2
%tag2/%isLangObject
%tag2/%subInfo
%tag3
%tag3/%subInfo
node1 #%tag1
node2 #%tag1 #%tag2
node3 #%tag3 #%tag2
node4 #%tag4
inTagsThat (hasMembersNamed /^%isLangObject$/) query membersThat (named /^%subInfo$/)
.node1
node2
node3
%tag2/%subInfo
node1
node4
inItemsThat <condition-spec> [on <collection-spec>] [recursive] query <basic-query-list>
- take each element of the current collection that satisfies the given condition (
✖ Condition specification
), then replace
the current collection with the result of the given query list on collection of these (collection entry) nodes. An explicitly specified collection of initial nodes can optionally be set to start the sub-query on
instead of the current one. This query can be recursive (
✖ Recursive queries
).node1
node1/member1
node1/member2
node1/member2/%flagged
node2
node2/member2
node2/member2/%flagged
node3/member1
node3/member1/%flagged
inItemsThat (hasMembersNamed /^member1$/) query membersThat (haveMembersNamed /^%flagged$/)
. Done on the collection:node1
node2
node3
node1/member2
node3/member1
subtractQuery [on <collection-spec>] <basic-query-list>
- perform the given query list and subtract the result from the current collection. The collection the sub-query is performed is by default the
subtractQuery
's initial collection itself, but an explicitly specified collection can be optionally given instead.node1
node2
node2/%flag
node3
subtractQuery itemsThat (haveMembersNamed /^%flag$/)
. Done on the collection:node1
node2
node3
node1
node3
unionQuery [on <collection-spec>] <basic-query-list>
- perform the given query list and union the result with the current collection. The collection the sub-query is performed is by default the
unionQuery
's initial collection itself, but an explicitly specified collection can be optionally given instead.node1
node2
node2/member
node3
unionQuery membersThat (true)
. Done on the collection:node1
node2
node3
node1
node2
node2/member
node3
intersectQuery [on <collection-spec>] <basic-query-list>
- perform the given query list and intersect the result with the current collection. The collection the sub-query is performed is by default the
intersectQuery
's initial collection itself, but an explicitly specified collection can be optionally given instead.node1
node2
node2/%flag
node3
intersectQuery itemsThat (haveMembersNamed /^%flag$/)
. Done on the collection:node1
node2
node3
node2
sideQuery [on <collection-spec>] <basic-query-list>
- perform the given query list, but leave the current collection unchanged. This query only makes sense if the query ends in setting a local alias.
The collection the sub-query is performed is by default the sideQuery
's initial collection itself, but an explicitly specified collection can be optionally given instead.tagsList #tag1 #tag2
node1 #tag1
node2 #tag2
node3 #tag3
tagsList
" will be: (sideQuery on [tagsList] allTagsThat (true) / alias TAGS) / allItemsThat (hasAnyOfTags TAGS)
.node1
node2
node3
node1
node2
classA
classB
classB/%extends #classA
classC
classD
classD/%extends #classB #classC
classD
inMembersThat (named /^%extends$/) allTagsThat (true)
, but this query will only return nodes for the "directly"
extended classes:classB
classC
inMembersThat (named /^%extends$/) recursive allTagsThat (true)
. Then we will get the expected:classA
(recursively queried via classB
)classB
classC
lp-cli --extract <config-file>
lp-cli --compile <config-file>
lp-cli --generate <config-file>
lp-cli --compile --generate <config-file>
//
), shell-like (#
), lua/SQL like (--
) languages, and also plaintext "language". Also serves as an example implementation of extraction reader.
Usage of this reader for an extract-stage item is enabled by writer: "${LP_HOME}/lpxread-basic" $
in (
✖ reader
).#LP
or -#LP
, is assumed
to be a LP input belonging to the single #LP tag (the -#LP runs are ignored), e. g.:code code // non-LP comment
code //#LP: an LP tag
code code code // continued LP tag
code //#LP: another LP tag
code code code // continued another LP tag
code
code // again a non-LP comment (line with no comment breaks contiguity)
code code
code code code // once again a non-LP comment
code //#LP: third LP tag <#LP: fully written inline tags, including digressions, are allowed#> as well
code code code //-#LP commented out LP tag
code //#LP: 4th LP tag
<#LP: an LP tag
continued LP tag#>
<#LP: another LP tag
continued another LP tag#>
<#LP: third LP tag <#LP: fully written inline tags, including digressions, are allowed#> as well#>
<#LP: 4th LP tag#>
//#charset utf-8
(only once per file, "charset" keyword must be in lowercase).
What is considered a single-line comment, depends on the source type specified for this extraction work item (see
✖ lpxread-basic specific config (lp-extract job item)
).#-LP
(or #LP-
) comment line to mark termination of LP tag started by #LP tag: ...
.
Additionally, in a quite specific case when you have a code fragment that contains LP markup, you should place it between #LP~delimiter~
delimiter lines
to avoid production of incorrect output.
E. g.:#LP~x~
```
#LP ./example: this is an example of a code that contains a verbatim LP markup <#~~ and an escaped verbatim run ~~#>
```
#LP~x~
#LP~...~
lines will be transferred to extracted input exactly verbatim, although the whole fragment still has to contain the correct LP markup. So only
use this way of delimitation for code fragments, and with caution.lpxread-basic
with lpxread-basic specific configuration should be added to the extraction job item that uses lpxread-basic, including
the sub-members as described...{
"inFiles": ["**/*.js"],
"excludeInFiles": [],
"outDir": "...",
"reader": "logipard/lpxread-basic",
"lpxread-basic": {
"srcType": "generic-c-like"
}
}
inFiles
...generic-c-like
: C-like languages allowing single-line comments starting from //
(C family, Java family, JS, PHP, etc.)generic-sh-like
: languages allowing single-line comments starting from #
(sh family, python, perl, PHP, etc.)lua-sql
: languages allowing single-line comments starting from --
(Lua & SQL are most known ones)lp-text
: plaintext-based file, where every line is considered a single-line commentwriter: "${LP_HOME}/lpgwrite-example" $
in
✖ writer
.writer: "${LP_HOME}/lpgwrite-i18n-assist" $
in
✖ writer
.{
// optional (note there is exactly single space after '+')
"+ config": {
...
},
// mandatory, configuration for extract stage
"lp-extract": {
// optional
"+ config": {
...
},
// mandatory
"items": [
{
"SKIP": bool, // optional, non-false value comments out this item
"inRootDir": string-path, // optional, defaults to project root dir (see below)
"inFiles": [ ...strings-file-globs ], // mandatory, must be relative to the dir specified in inRootDir
"excludeInFiles": [ ...strings-file-globs ], // optional, must be relative to the dir specified in inRootDir
"forLPInclude": bool, // optional, defaults to false
"outDir": string-path, // mandatory
"reader": string-path, // mandatory
... // no other parameters recognized by Logipard and are extra, the reader can require its own specific parameters
},
... // zero, one or more items
]
},
// mandatory, configuration for compile stage
"lp-compile": {
// optional
"+ config": {
...
},
// mandatory
"items": [
{
"SKIP": bool, // optional, non-false value comments out this item
"inRootDir": string-path, // mandatory,
"outFile": string-path,
"writer": string-path,
},
... // zero, one or more items
]
},
// mandatory, configuration for generate stage
"lp-generate": {
// optional
"+ config": {
},
// mandatory
"items": [
{
"SKIP": bool, // optional, non-false value comments out this item
"inFile": string-path,
"writer": string-path,
...
},
... // zero, one or more items
]
}
}
"items"
, but it is also merged with object "+ config"
from the corresponding tool's stage, and additionally merged with "+ config"
from the root level (in the order global "+ config" -> stage "+ config" -> item). The merging is shallow per-member
appending at the object's root level. If case of member name collision, the latter object member replaces the former object's member. However, there is a way to override this behaviour
for array or an object type member: if the member's expected name is "id"
, then add member named "+ id"
to the "+ config"
(s) - then, the resulting "id" member will contain the sub-members
from "+ id"
from the "+ config"
appended before the ones given by the item's "id"
.{
"+ config": {
...
"a": [1, 2],
"+ b": [3, 4],
"c": [5, 6],
"+ d": [7, 8],
...
},
"lp-...": {
"+ config": {
...
"a": [9, 10],
"b": [11, 12],
"+ c": [13, 14],
"+ d": [15, 16],
...
},
"items": [
...
{
// "a", "b", "c", "d" are unspecified, in the actual config item they will be:
// "a": [9, 10]
// "b": [3, 4, 11, 12]
// "c": [13, 14]
// "d": [7, 8, 15, 16]
},
{
...
"a": ["A", "B"], // actual "a": ["A", "B"]
"b": ["A", "B"], // actual "b": ["A", "B"]
"c": ["A", "B"], // actual "c": [13, 14, "A", "B"]
"d": ["A", "B"], // actual "d": [7, 8, 15, 16, "A", "B"]
...
}
]
}
}
Name
| Description
|
---|---|
✖ "+ config"
|
Configuration parameters shared by all the job items in all the stages. Appended to each item specific configuration before the item's own configuration and the stage specific
"+ config" . |
✖ lp-extract
|
Configuration parameters for the job items in the extract stage.
|
✖ lp-compile
|
Configuration parameters for the job items in the compile stage.
|
✖ lp-generate
|
Configuration parameters for the job items in the generate stage.
|
Name
| Description
|
---|---|
✖ "+ config"
|
Configuration items shared by all the job items in the extract stage. Appended to each item specific configuration before the item's own configuration, and after the global
✖ "+ config"
.
|
✖ items[]
|
Array of configurations specifying each job item in the extract stage.
|
Name
| Description
|
---|---|
✖ SKIP
|
Bool. Non-false value tells Logipard to skip this item. Use to comment out temporarily disabled items. Optional, defaults to false.
|
✖ inRootDir
|
Root directory for input files lookup, defaults to project root dir.
|
✖ inFiles
|
A string, or array of strings, with glob filename templates - specifies the set of input files that fall under this item.
The paths in templates are relative to the
✖ inRootDir
.
|
✖ excludeInFiles
|
A string, or array of strings, with glob filename templates - specifies the set of files to exclude from
✖ inFiles
.
The paths in templates are relative to the
✖ inRootDir
. Optional.
|
✖ outDir
|
A string, path to the directory where the extracted documentation model input will be placed. The extraction output directory is assumed transient and should be added to VCS ignore list.
Note that same source file can be picked by multiple extraction job items, but if its extracted input from different jobs ends up under same
outDir -s then later jobs will overwrite product of earlier ones -
you should consider this ahead and take care their output locations did not conflict. |
✖ forLPInclude
|
Boolean, if true then input extractions by this job item will be saved as module files eligible for inclusion via
LP-inc /LP-include
(see
✖ Including module files
). Optional, defaults to false. |
✖ reader
|
String. Path to the extraction reader's JS file, relative to project root (unless absolute).
The extraction reader is expected to comply with
✖ Extraction reader interface
. Logipard contains some built-in extraction readers:
✖ Built-in extraction readers
|
outDir
-s then later jobs will overwrite product of earlier ones -
you should consider this ahead and take care their output locations did not conflict.LP-inc
/LP-include
(see
✖ Including module files
). Optional, defaults to false.Name
| Description
|
---|---|
✖ "+ config"
|
Configuration items shared by all the job items in the compile stage. Appended to each item specific configuration before the item's own configuration, and after the global
✖ "+ config"
.
|
✖ items[]
|
Array of configurations specifying each job item in the compile stage.
|
Name
| Description
|
---|---|
✖ SKIP
|
Bool. Non-false value tells Logipard to skip this item. Use to comment out temporarily disabled items. Optional, defaults to false.
|
✖ inRootDir
|
Path to the root directory of the model input extracted at the extract stage. In most cases, root is the same as
✖ outDir
.
|
✖ lpIncLookupDirName
|
Name for directory to use for cascading LP-inc/LP-include lookup.
|
✖ writer
|
String. Path to the compilation model writer's JS.
The compilation writer is expected to comply with
✖ Compilation writer interface
.
Logipard contains some built-in compilation writers:
✖ Built-in compiled model writers
|
<#LP-include filename#>
in a LP input file, where filename
is non-absolute and not explicitly local (i. e. is not starting from .
or ..
), then lookup is done as ./<value-of-lpIncLookupDirName>/filename
,
if not found there then as ../<value-of-lpIncLookupDirName>/filename
, etc. upwards (but not higher than inRootDir).Name
| Description
|
---|---|
✖ "+ config"
|
Configuration items shared by all the job items in the generate stage. Appended to each item specific configuration before the item's own configuration, and after the global
✖ "+ config"
.
|
✖ items[]
|
Array of configurations specifying each job item in the generate stage.
|
Name
| Description
|
---|---|
✖ SKIP
|
Bool. Non-false value tells Logipard to skip this item. Use to comment out temporarily disabled items. Optional, defaults to false.
|
✖ writer
|
String. Path to the generation writer's (generator's) JS.
The generation writer is expected to comply with
✖ Generation writer interface
.
Logipard contains some built-in generation writers:
✖ Built-in generation writers
|
reader
field of an extract job item (
✖ reader
).
Extraction reader must be implemented as CommonJS module that exposes the following interface...Name
| Description
|
---|---|
✖ async .parseInput({ buffer, itemConfig, filePath })
|
Parse the file content, supplied as Node.JS Buffer, and return the extraction result in LP input format, as joint single string.
|
reader
module via module.exports similar to:exports.parseInput = async function parseInput({ buffer, itemConfig, filePath }) { ... }
Name
| Description
|
---|---|
✖ buffer
|
Buffer, the input file supplied in plain binary form. Dealing with encoding is up to the reader.
|
✖ itemConfig
|
dictionary (as Object), the piece of configuration object related to this job's item (
✖ items[]
).
The reader can read all members of the item config object, but it is a good style to keep reader-specific configuration under a member sub-object named after the reader.
|
✖ filePath
|
string, the path to the file (project root agnostic, ready for standalone use in
fs or path ).
Can be used for reference if information from the file alone is not sufficient for the reader's purposes. |
Name
| Description
|
---|---|
✖ .lpNameRegexp(bounds,flags)
|
Get RegExp for matching a string that fits as a LP item name. On success, the
[0] of the match is the name string. |
✖ .parseName(nameString)
|
Parse LP name string into array of name fragments.
|
✖ .currentScopeNodeName
|
Get full unaliased name of the currently scoped node (as array of string name fragments). Read-only.
|
✖ .resolveParsedName(parsedName)
|
Get node full FDOM name of a node by a parsed name array (obtained via
✖ .parseName(nameString)
). Useful
when a custom tag is assumed to contain a FDOM name, and the processor needs to resolve it by the same rules
as
<#ref name#> in this scope. |
✖ .items[]
|
The array of items contained in the tag, each element is either string (content) or a non-string object (nested tag, which should be processed via
✖ async .processTag(tagItem)
).
|
✖ async .processTag(tagItem)
|
Process the (nested) tag item, as LP would if encountered this tag normally inline.
|
✖ .text
|
The tag content as single string. Assuming no embedded tags exist in the content, otherwise null.
|
[0]
of the match is the name string.Name
| Description
|
---|---|
✖ bounds
|
String, optional, default
'' , can also be '^' , '$' , '^$' . Specifies the limit assertions to include into the regexp. If it contains ^ ,
the ^ is added to start of the regexp. If it contains $ , the $ is added to end of the regexp. |
✖ flags
|
String, optional, default
'g' . Set of regexp flags to add. |
<#ref name#>
in this scope.Name
| Description
|
---|---|
✖ parsedName
|
Array, the parsed name as returned by
✖ .parseName(nameString)
.
|
processTag
, since the writer does not have much context to do anything reasonable with it anyway.writer
field of a compile job item (
✖ writer
).
Compilation writer must be implemented as CommonJS module that exposes the following interface...Name
| Description
|
---|---|
✖
async .processCustomTag({ modelOutput, targetNodeName, tagName, toolkit, sourceFile })
|
Process a custom inline tag within the specified target node's content. Interpretation of the tag is up to the writer: it may be appending of some model representation specific type of content, or some adjustments
to the content output process, etc.
|
✖ async .openModelOutput({ itemConfig, workDir })
|
Initialize the compiled model storage, or open the existing one for update.
|
✖ async .closeModelOutput({ modelOutput })
|
Finalize the model output and invalidate the handle. A writer will be open and closed exactly once per the compile job item, we can call it model update session.
|
✖ async .invalidateSourceFile({ modelOutput, sourceFile, newDependencies })
|
Invalidate the given source file. All content and tag-ons added from this source file (
✖ async .appendContent({ modelOutput, targetNodeName, content, sourceFile })
,
✖ async .tagTo({ modelOutput, tagNodeName, targetNodeName, sourceFile })
) should be removed from the storage
or archived, as they are going to be replaced by a newer version of the input. Note that source file here, as well as in other methods, means LP input source file created at (
✖ Extraction stage
), not the
user-facing annotation source file(s), so it will contain
.lpinput extension and will be located at the path according to the corresponding extraction job's
✖ outDir
. |
✖ async .appendContent({ modelOutput, targetNodeName, content, sourceFile })
|
Append content to the specified target node. Only text content is added this way, for other content components there are other methods.
|
✖ async .tagTo({ modelOutput, tagNodeName, targetNodeName, sourceFile })
|
Tag a target node with a specific tag node. Can also be worded as "tag (apply) the specific tag node on a given target node". That is,
tagNodeName node will be added to list of targetNodeName node's tags. |
✖
async .appendRef({ modelOutput, targetNodeName, refNodeName, refText, sourceFile })
|
Append an inline reference to the specified target node's content.
|
writer
module via module.exports similar to:exports.openModelOutput = async function openModelOutput({ itemConfig, workDir }) { ... }
exports.closeModelOutput = async function closeModelOutput({ modelOutput }) { ... }
exports.invalidateSourceFile = async function invalidateSourceFile({ modelOutput, sourceFile, newDependencies }) { ... }
exports.appendContent = async function appendContent({ modelOutput, targetNodeName, content, sourceFile }) { ... }
exports.tagTo = async function tagTo({ modelOutput, tagNodeName, targetNodeName, sourceFile }) { ... }
exports.appendRef = async function appendRef({ modelOutput, targetNodeName, refNodeName, refText, sourceFile }) { ... }
exports.processCustomTag = async function processCustomTag({ modelOutput, targetNodeName, tagName, toolkit, sourceFile }) { ... }
Name
| Description
|
---|---|
✖ modelOutput
|
Model output handle (as returned by
✖ async .openModelOutput({ itemConfig, workDir })
).
|
✖ targetNodeName
|
String, the full FDOM name of the target node where the custom tag was encountered.
|
✖ tagName
| String, the name of the custom tag.
|
✖ toolkit
|
Object, a set of utility functions provided for custom tag processing. See (
✖ Compile stage writer toolkit for custom tag processor
).
|
✖ sourceFile
|
String, the path to the source file where this custom tag originates.
|
Name
| Description
|
---|---|
✖ itemConfig
|
dictionary (as Object), the piece of configuration object related to this job's item (
✖ items[]
).
The writer can read all members of the item config object, but it is a good style to keep writer-specific configuration under a member sub-object
named after the writer.
|
✖ workDir
|
string, the path to project root directory, ready for standalone use in
fs or path . It is useful if the writer's configuration
must contain any file/directory paths that should be project root relative. |
Name
| Description
|
---|---|
✖ modelOutput
|
Model output handle (as returned by
✖ async .openModelOutput({ itemConfig, workDir })
). Assumed no longer valid after this call.
|
.lpinput
extension and will be located at the path according to the corresponding extraction job's
✖ outDir
.Name
| Description
|
---|---|
✖ modelOutput
|
Model output handle (as returned by
✖ async .openModelOutput({ itemConfig, workDir })
).
|
✖ sourceFile
|
string, the path to the input file to invalidate, relative to the compile job's
✖ inRootDir
.
|
Name
| Description
|
---|---|
✖ modelOutput
|
Model output handle (as returned by
✖ async .openModelOutput({ itemConfig, workDir })
).
|
✖ targetNodeName
|
string, the full FDOM name of the target node where content will be appended.
|
✖ content
|
string, the content to append to the target node.
|
✖ sourceFile
|
string, the path to the input file from which this content originates, relative to the compile job's
✖ inRootDir
.
|
tagNodeName
node will be added to list of targetNodeName
node's tags.Name
| Description
|
---|---|
✖ modelOutput
|
Model output handle (as returned by
✖ async .openModelOutput({ itemConfig, workDir })
).
|
✖ tagNodeName
|
string, the full FDOM name of the tag node to apply.
|
✖ targetNodeName
|
string, the full FDOM name of the target node on which the
tagNodeName will be applied. |
✖ sourceFile
|
string, the path to the input file from which this tagging originates, relative to the compile job's
✖ inRootDir
.
Storing the tagging origin makes sense in context of subsequent invalidation (
✖ async .invalidateSourceFile({ modelOutput, sourceFile, newDependencies })
) - a tag stays in effect as long as there remains at least one non-invalidated
source for applying it. (Note that
tagTo can be called for same tagNodeName and targetNodeName multiple times with different sourceFile -s.) |
tagTo
can be called for same tagNodeName
and targetNodeName
multiple times with different sourceFile
-s.)tagTo
-s, and exactly once for every
sourceFile
for which any tagTo
-s (and other content adding methods) are invoked. That means, if a tag stays after all source invalidations, then invocation of each tagTo
will re-validate
the tag application from the corresponding sourceFile
.Name
| Description
|
---|---|
✖ modelOutput
|
Model output handle (as returned by
✖ async .openModelOutput({ itemConfig, workDir })
).
|
✖ targetNodeName
|
String, the full FDOM name of the target node where reference will be appended.
|
✖ refNodeName
|
String, the full FDOM name of the referenced node.
|
✖ refText
|
String, the alt text of the reference. Can be empty (and should be stored as such in the model, as generators can take it as hint for using an appropriate default display text).
|
✖ sourceFile
|
String, the path to the input file from which this reference originates, relative to the compile job's
✖ inRootDir
.
|
writer
field of a generate job item (
✖ writer
).
Generation writer must be implemented as CommonJS module that exposes the following interface...Name
| Description
|
---|---|
✖ async .perform({ workDir, itemConfig, errors })
| Perform the generation process.
|
writer
module via module.exports similar to:exports.perform = async function perform({ workDir, itemConfig, errors }) { ... }
Name
| Description
|
---|---|
✖ workDir
|
String, the path to project root directory, ready for standalone use in
fs or path . |
✖ itemConfig
|
Dictionary (as Object), the piece of configuration object related to this job's item (
✖ items[]
).
The writer can read all members of the item config object, but it is a good style to keep writer-specific configuration under a member sub-object
named after the generator.
|
✖ errors
|
Array, a collection of errors (as JS Error objects) encountered during processing that should be appended to.
|
lpgwrite-example
.
It is specified by renders[]/renderer
field of a lpgwrite-example
's generate job item (
✖ renderer
).
Renderer must be implemented as CommonJS module that exposes the following interface...Name
| Description
|
---|---|
✖ async .render({ workDir, rendererConfig, input, errors })
|
Render the output using the specified renderer configuration and input provided by the caller (
✖
${LP_HOME}/lpgwrite-example: An example generator of single-page HTML/MD documentation
).
|
renderer
module via module.exports similar to:exports.render = async function render({ workDir, rendererConfig, input, errors }) { ... }
Name
| Description
|
---|---|
✖ workDir
|
String, the path to project root directory, ready for standalone use in
fs or path . |
✖ rendererConfig
|
Dictionary (as Object), the piece of configuration object related to this renderer item (
✖ renders[]
).
The renderer can read all members of the item config object, but it is a good style to keep renderer-specific configuration under a member sub-object named after the renderer.
|
✖ input
|
The input data to be rendered. Object of this format:
✖ Input format for lpgwrite-example renderer
.
|
✖ errors
|
Array, a collection of errors (as JS Error objects) encountered during rendering that should be appended to.
|
Name
| Description
|
---|---|
✖ .toc[]
|
Array of items for table of contents. Each item is a dictionary (as Object) with the following members...
|
✖ .itemsByUid[uid]
|
Dicitonary by UID (string). Same items as
✖ .items[]
expanded flat, but keyed by UID (
✖ .uid
).
|
✖ .items[]
|
Array of items to display, ordered in the suggested display order when on single page. Each array element is dictionary (as Object) with the following members...
|
Name
| Description
|
---|---|
✖ .title
| String, the item's human readable title.
|
✖ .uid
|
String, the item's UID (key in
✖ .itemsByUid[uid]
).
|
✖ .subEntries[]
|
Array (non-null, at least empty), nested items of this TOC item. Each element has the same structure as a root element of
.toc[] , including next level
.subEntries[] (and so on). |
Name
| Description
|
---|---|
✖ .uid
|
String, the item's UID, can be used to access this item via
✖ .itemsByUid[uid]
.
|
✖ .modelBasic[]
|
Basic part of item's model to be visible in brief display mode, is always shown. The array (non-null, at least empty) of elements in the display order, each element can contain some of the following members...
|
✖ .modelMore[]
|
The additional part of the item's model to display in full display mode, in addition to basic one. The array (non-null, at least empty) that can contain the same elements
as
✖ .modelBasic[]
.
|
Name
| Description
|
---|---|
✖ .itemTitle
|
String. If the member is present, it marks this element is an item title, and contains the human readable text of the item title.
|
✖ .uid
|
String, defined only if
✖ .itemTitle
is present. UID of the target (or titled) item, the same as
✖ .uid
.
|
✖ .item
|
String. If the member is present, it marks this element is placeholder for emitting a nested item, and contains UID of this item, the same
✖ .uid
.
Note that same item (with same UID) can occur multiple times in the document, and one of these occurrences will be suggested as home (primary) location for the item - check
✖ .isHomeLocation
if this matters for the rendered document format.
|
✖ .isHomeLocation
|
Boolean, defined only if
✖ .item
is present. If true, this location is suggested as the item's home location. There is only one home location for each item.
|
✖ .printType
|
String, defined only if
✖ .item
is present. Defines the suggested display mode for item emitted into this placeholder. Can be either of:
|
✖ .text
|
String. If the member is present, it is a Markdown text fragment. Some of HTML-like tags, case-sensitive, should be interpreted as LP inline references (the text properties are HTML-encoded):
|
✖ .openSection
|
String. If the member is present, it marks this element is opener of a titled section, and contains the section ID to be matched in later
✖ .closeSection
.
|
✖ .closeSection
|
String. If the member is present, it marks this element is closure of a titled section, and contains the section ID to close, matching earlier
✖ .openSection
.
|
✖ .title
|
String, defined only if
✖ .openSection
is present. Title of the opened section.
|
✖ .table
|
If the member is present, it marks a table block. Object with the following member properties...
|
✖ .list[][]
|
Array of arrays of string. If the member is present, it marks this element is a (flat unnumbered) list. Each element of the array is a list item, each sub-element is a markdown text
(same as in
✖ .text
), the sub-elements and are assumed to be appended in the array order to form the line.
|
<lp-src file="filename"></lp-src>
(no inner tag text, file
is HTML-encoded): inline reference to LP input source file, with no .lpinput
suffix. Is always present, it is up to the renderer to strip
it or to interpret it.<lp-ref uid="UID" text="display text"></lp-ref>
(no inner tag text, uid
and text
are HTML-encoded): inline LP link to an item (as per <#ref ...#>
). UID is the same as
✖ .uid
.
Display text can be empty, in which case it is recommended to use item's title (
✖ title
).Name
| Description
|
---|---|
✖ .headers[]
|
Array of headers, in the display order of columns. Each element is a string with column header as markdown text (same as in
✖ .text
).
|
✖ .rows[]
|
Array of rows, in the display order. Each element is array of columns, in the display order of columns, with each sub-element is a string with column data as markdown text
(same as in
✖ .text
).
|
translator
field of a lpgwrite-example
's generate job item (lpgwrite-example/translator
).
Translator must be implemented as CommonJS module that exposes the following interface...Name
| Description
|
---|---|
✖ async .translate(str, translatorArgs)
| Perform the translation and return the result.
|
translator
module via module.exports similar to:exports.translate = async function translate(str, translatorArgs) { ... }
Name
| Description
|
---|---|
✖ str
|
String, the string to translate.
The string is assumed to be a Markdown code, with the following possible HTML-ish tags:
|
✖ translatorArgs
|
The agrument value specified for the translator in
✖ translatorArgs
. Passed as the object from configuration as is.
|
<lp-ref item="FDOM name">ref alt text</lp-ref>
: a LP inline link. The ref alt text
can be translated (note it is HTML-encoded), the rest part must be left intact.<lp-tag>...text...</lp-tag>
: a custom markup tag (as per
✖ extraTags
). ...text... is the HTML-encoded JSON code of the object and should remain such in the
translation result. It is up to the translator to be aware of what custom tags are possible and what is the correct translation scope within them. In an unidentifiable case, this fragment should be left as is.writer: "${LP_HOME}/lpcwrite-basic-json" $
in
✖ writer
.lpcwrite-basic-json
in the compilation item configuration:{
...
writer: "${LP_HOME}/lpcwrite-basic-json" $, // paste verbatim!
lpcwrite-basic-json: {
outFile: ...,
extraTags: { // optional
...
}
}
}
Name
| Description
|
---|---|
✖ extraTags
|
Object, as a dictionary
tagName: tagContentType . Describes additional tags that the writer will recognize in the input. These will appear in the compiled model via customTag objects
(see
✖ .content [lpgread-basic-json]
).
The custom tags not described here will be ignored with a warning. Note that tag names are case-insensitive (will be lowercased). |
✖ outFile
|
String. Path to the output JSON file (non-absolute path is relative to project root). The file is overwritten, but the model representation possibly already existing there will be updated rather than
rebuilt from scratch, attempting to preserve the parts of data for which the input didn't actually change.
|
tagName: tagContentType
. Describes additional tags that the writer will recognize in the input. These will appear in the compiled model via customTag
objects
(see
✖ .content [lpgread-basic-json]
).
The custom tags not described here will be ignored with a warning. Note that tag names are case-insensitive (will be lowercased).extraTags: {
"tagName1": <tag1 content type>, // string
"tagName2": <tag2 content type>, // string
...
}
lpgwirte-example
by itself only determines general, format-agnostic structure of the document, while rendering of the actual document is delegated to sub-plugin named renderer. As the title suggests,
built-in renderers for single-page HTML and single-page MD are available, but in fact the user can plug in its own renderers.lpgwrite-example
adds some extra FDOM comprehensions:%title
contains human readable title for the item. If there is no %title
member, the title is assumed to be the same
as the item's short name. It typically follows the item opening part in the pattern: <#./%title: Your title#>
(note it is a mistake to omit ./
or :
, it may need some training to unlearn doing this).lpgwrite-example
in the generation item configuration (
✖
${LP_HOME}/lpgwrite-example: An example generator of single-page HTML/MD documentation
):{
...
writer: "${LP_HOME}/lpgwrite-example" $, // paste verbatim!
lpgwrite-example: {
trace: ...,
program: [...],
renders: [
{
docModel: ...,
renderer: ...,
... // renderer-specific added config
},
...
]
}
}
lpgwrite-example
object, as follows:Name
| Description
|
---|---|
✖ renders[]
|
The list of sub-jobs to actually render a document. In addition to the members listed below, can contain additional members with renderer specific configuration fragments.
|
✖ trace
|
Boolean, optional. If true, then document program processing will have some added log verbosity to allow you tracking the details of what is and is not done.
|
✖ program[]
|
An array of document program instructions (see
✖ ${LP_HOME}/lpgwrite-example document program
)
|
Name
| Description
|
---|---|
✖ renderer
|
String, path to the renderer module. The renderer must comply with
✖ Interface for lpgwrite-example's renderer
. Logipard comes with the following built-in renderers...
|
✖ docModel
|
String, document model to use. Refers to docModel in the document program, specifically to value of
name in
✖ Document model definition
. |
lpgwrite-i18n-assist-trn-none
is provided, but the user can in fact plug in its own translators.lpgwrite-i18n-assist
forms sort of "sub-pipeline" - its output is intermediate and is meant to be picked by actual generators that follow it in the lp-generate
items list.
Hence, place lpgwrite-i18n-assist
item before the generators that rely on its result.lpgwrite-i18n-assist
adds extra FDOM comprehension:%title
member, if available, contains a human readable title for the parent item (similarly to
✖ ${LP_HOME}/lpgwrite-example
)%title
member is tagged with tag %noloc
, this title is assumed non-localizeable - it will not be included into the interim file, and will be transferred to the translated FDOM file
as is<#./%title: This title will be translated#>
, and <#./%title %noloc: This title will not be translated#>
.
This comprehension does not conflict with one from
✖ ${LP_HOME}/lpgwrite-example
and complements it seamlessly.lpgwrite-i18n-assist
in the generation item configuration:...
{
inFile: ...,
writer: "${LP_HOME}/lpgwrite-i18n-assist" $, // paste verbatim!
lpgwrite-i18n-assist: {
translator: ...,
items: [
{
outFile: ...,
interimFile: ...,
interimFileCharset: ...,
translatorArgs: ...
}
]
}
},
...
lpgwrite-i18n-assist
object, as follows:Name
| Description
|
---|---|
✖ items[]
|
Array. Items to process within this
lpgwrite-i18n-assist job, using the same translator specified by the
✖ translator
.
Each items[] element is an object as follows: |
✖ translator
|
String, path to the translator module, absolute or relative to project root. The translator must comply with
✖ Interface for lpgwrite-i18n-assist's translator
.
Logipard comes with the built-in dummy translator
✖
${LP_HOME}/lpgwrite-i18n-assist-trn-none: Dummy translator for lpgwrite-i18n-assist generator
.
|
lpgwrite-i18n-assist
job, using the same translator specified by the
✖ translator
.
Each items[]
element is an object as follows:Name
| Description
|
---|---|
✖ translatorArgs
|
Arbitrary JSON/LPSON value, optional (default = null). The object or value that will be transferred to the translator's method
✖ async .translate(str, translatorArgs)
.
|
✖ outFile
|
String. Path to the output JSON FDOM file with the translated text, absolute or relative to project root. Assumed to have .json extension.
|
✖ interimFile
|
String. Path to the interim translation file, absolute or relative to project root. Essentially an almost-plain text file, so assumed to have .txt extension.
|
✖ interimFileCharset
|
String, optional (default = "utf-8"). The charset to use in the interim file.
|
...
## Item: /domain.logipard/interfaces/compile/%title
# lp-stage-plugin-ifs.lp-txt
/ "Para:Ev+yL9F/vTiMmuKTf0MCOtkPdxbajKJGYcTegdUiEhKX4g0C7A+PMVsfHPOVu90ZRrksqgrsekUutwoGUA72zw=="
Interfaces related to compilation stage
\ "Para:Ev+yL9F/vTiMmuKTf0MCOtkPdxbajKJGYcTegdUiEhKX4g0C7A+PMVsfHPOVu90ZRrksqgrsekUutwoGUA72zw=="
## Item: /domain.logipard/interfaces/compile
## Item: /domain.logipard/interfaces/compile/writer-toolkit/%title
# internal/lp-compile-tools.js
/ "Para:vGDelX4EnoLn07hY9QgDuASeK7cUvLxrere0vuqNEu/pOGNVoVfpoUEsEtI0IW/gLrN3w2BHhUdktg51eEeEKg=="
Compile stage writer toolkit for custom tag processor
\ "Para:vGDelX4EnoLn07hY9QgDuASeK7cUvLxrere0vuqNEu/pOGNVoVfpoUEsEtI0IW/gLrN3w2BHhUdktg51eEeEKg=="
...
/ "Para:..."
...\ "Para:..."
lines delimit the translated content, which you can edit manually. lpgwrite-i18n-assist
will not overwrite them unless the corresponding pieces of the original
content are changed (although they can move around to modification of FDOM structure).lpgwrite-i18n-assist
keeps granularity of the editable units per paragraph or list item, keeps them
grouped by item and retaining the model order.Para
lines themselves or the codes in it - these are tags to match these fragments against their counterpart in the original content.
The lines outside should be treated as comments for navigation convenience, and are subject for changes with no warranties.Name
| Description
|
---|---|
✖ async loadFromFile(filePath [, extractSrcFile])
|
Load the model into memory and expose for reading in FDOM comprehension (
✖ FDOM querying
). Module level function.
|
const { loadFromFile } = require('logipard/lpgread-basic-json');
async main() { // the loader API is async
var reader = await loadFromFile("your-fdom.json");
// assuming your model contains the items named as below...
reader.nameAlias("domain.your.program", "M"); // set name alias
var classesSection = reader.item("M/classes"); // <Item>, domain.your.program/classes
var classA = reader.item(classesSection, "classA"); // <Item>, domain.your.program/classes/classA
// let's find items for all classes extended by A and print their titles
var extends = reader.item("%extends"); // %extends
var queryCtxItemsExtByA = reader.newQueryContext(); // <QueryContext>
var itemsExtByA = queryCtxItemsExtByA.with(classA) // or .with(queryCtxItemsExtByA.collection(classA))
.query({ inMembersThat: { named: "^%extends$" }, recursive: true, query: { tagsThat: true }})
.teardownCollection(); // itemsExtByA = <Collection>
for (var itemExtByA of itemsExtByA) { // itemExtByA = <Item>
console.log(reader.item(itemExtByA, "%title").content[0]); // assume all items have %title members with plain-text only content
}
}
Name
| Description
|
---|---|
✖ filePath
|
String. Path (same as for Node.JS
fs methods) to the FDOM JSON file. |
✖ extractSrcFile
|
Bool, optional (default is false). If true, then references to LP source file names will be added as inline text fragments.
Can be useful when reading for or with regard to diagnostic purposes.
|
const { loadFromFile } = require('logipard/lpgread-basic-json.js');
var reader = await loadFromFile("my-fdom-file.json");
Name
| Description
|
---|---|
✖ .item([itemRelTo,] name)
|
Get an item by its full or relative FDOM name. Similar to
✖ .item([baseItem ,] name)
, but can not support aliases since it is used outside a context.
|
✖ .itemByUid(uid)
|
Return an item by UID (see
✖ .uid
). Since this is a reader-specific method not prescribed by FDOM comprehension,
it can return
null for non-existent item. Same as
✖ .itemByUid(uid)
. |
✖ .newQueryContext()
| Create a new query context object.
|
Name
| Description
|
---|---|
✖ itemRelTo
|
Optional. If specified, it denotes an item that is considered as base for
✖ name
path, which is considered a relative path in this case.
Can be either of:
|
✖ name
|
The item name, full if
itemRelTo is not provided, or relative to it otherwise. |
Name
| Description
|
---|---|
✖ .content [lpgread-basic-json]
|
Read-only property, array of content elements. Implements
✖ .content
in
✖ logipard/lpgread-basic-json.js
specific flavour.
|
✖ .uid
|
Read-only property, string. The item's shortcut UID in the JSON representation of the model.
|
✖ .toString()
| JS stringification
|
Name
| Description
|
---|---|
✖ .content
|
Read-only property. The item content (text, inline references, and whatever else the reader's backing model supports).
|
✖ .name
|
Read-only property, string. The item's full path name (with no namespace aliases)
|
✖ .shortName
|
Read-only property, string. The item's short name (last segment of the full path name).
|
✖ .tags
|
Read-only property. Collection of the item's tags.
|
✖ .members
|
Read-only property. Collection of the item's members.
|
✖ .isNull
|
Read-only property, bool. Check if item is empty (true) or not (false).
|
✖ .parent
|
Read-only property,
✖ <Item>
. Return parent item (one this item is member to). For root item returns null (not null item).
|
✖ .isConditionTrue(lpqCtx, condSpec)
|
Check if the item satisfies a certain condition, must be done relative to a query context (in order to resolve condition and collection aliases).
|
{ ref: <Item>, text: string }
(ref is
✖ <Item> [lpgread-basic-json]
): inline item ref{ customTag: object }
: a custom tag, originating from
✖ ${LP_HOME}/lpcwrite-basic-json: Writer of FDOM into JSON file
Name
| Description
|
---|---|
✖ .content
|
Read-only property. The item content (text, inline references, and whatever else the reader's backing model supports).
|
✖ .name
|
Read-only property, string. The item's full path name (with no namespace aliases)
|
✖ .shortName
|
Read-only property, string. The item's short name (last segment of the full path name).
|
✖ .tags
|
Read-only property. Collection of the item's tags.
|
✖ .members
|
Read-only property. Collection of the item's members.
|
✖ .isNull
|
Read-only property, bool. Check if item is empty (true) or not (false).
|
✖ .parent
|
Read-only property,
✖ <Item>
. Return parent item (one this item is member to). For root item returns null (not null item).
|
✖ .isConditionTrue(lpqCtx, condSpec)
|
Check if the item satisfies a certain condition, must be done relative to a query context (in order to resolve condition and collection aliases).
|
Name
| Description
|
---|---|
✖ .itemByUid(uid)
|
Return an item by UID (see
✖ .uid
). Since this is a reader-specific method not prescribed by FDOM comprehension,
it can return
null for non-existent item. |
✖ .clearNameAlias(aliasName)
|
Clear item name alias set by
✖ .nameAlias(aliasName, item)
. The alias is no longer valid until re-assigned.
|
✖ .clearCollectionAlias(collectionAliasName)
|
Clear collection alias set by
✖ .collectionAlias(aliasName, ...collectionSpecs)
. The alias is no longer valid until re-assigned.
|
✖ .clearQueryAlias(queryAliasName)
|
Clear query name alias set by
✖ .queryAlias(aliasName, ...querySpecs)
. The alias is no longer valid until re-assigned.
|
✖ .clearConditionAlias(conditionAliasName)
|
Clear condition name alias set by
✖ .conditionAlias(aliasName, condSpec)
. The alias is no longer valid until re-assigned.
|
Name
| Description
|
---|---|
✖ .nameAlias(aliasName, item)
|
Set an item alias name (which should be a valid shortname), that can later be used as standalone item name or as starter for another item name within this
✖ <QueryContext>
.
Behaviour in case of already existing alias with the given name is implementation specific.
|
✖ .collectionAlias(aliasName, ...collectionSpecs)
|
Set a named collection alias that can be used later to reference the collection within this context (
✖ <CollectionSpec>
). The collection is built up from collections corresponding to each element of the
specs list. This alias is permament within the context, unlike query local alias (
✖ Set local collection alias ["alias ..."]
).
|
✖ .queryAlias(aliasName, ...querySpecs)
|
Set a named query alias that can be used later to reference the query within this context (
✖ <QuerySpec>
). The list is interpreted as a composite query.
|
✖ .conditionAlias(aliasName, condSpec)
|
Set a named condition alias that can be used later to reference the condition within this context (
✖ <Condition>
).
|
✖ .item([baseItem ,] name)
|
Return item by given path name, either full or relative to the provided base item. The full item name's first segment shortname can be a name alias defined in this
✖ <QueryContext>
.
|
✖ .collection(...collectionSpecs)
|
Returns a collection specified by a list of collection item specs. Each list item is a
✖ <CollectionSpec>
.
|
✖ .with(...collectionSpecs)
|
Set current collection for the subsequent query (call to
✖ .query(...querySpecs)
). Collection is built up from collections corresponding to each element of the specs list.
.with effectively initiates the query chain, but can be used in the middle of the chain as well to override the current collection after a certain step. |
✖ .query(...querySpecs)
|
Perform a query, or a list of queries intepreted as a composite query, given the current collection specified by preceding
✖ .with(...collectionSpecs)
or resulting from previous
.query calls.
Note that the resulting collection is not returned immediately, it becomes new current collection instead. |
✖ .teardownCollection()
|
Finalize the query and return the result (the current collection at time of the call). The current collection itself is reset, so the next query must be re-initialized, starting over from
✖ .with(...collectionSpecs)
.
|
✖ .currentCollectionAlias(aliasName)
|
Set a named collection alias for the current collection that can be used later to reference the collection within this context (
✖ <CollectionSpec>
). Only is usable mid query (when the current collection is
meaningful), otherwise it is an error. This is a local query alias, unlike a permament one (
✖ Set local collection alias ["alias ..."]
).
|
✖ .compileQuery(...querySpecs)
|
Compile a query into a handle object usable later to reference the query within this context (
✖ <QuerySpec>
). The list is interpreted as a composite query.
|
lpgwrite-example-render-html
in the
✖ renders[]
generation item configuration:lpgwrite-example: {
...
renders: [
{
docModel: ...,
renderer: "${LP_HOME}/lpgwrite-example-render-html" $, // paste verbatim!
lpgwrite-example-render-html: {
outFile: ...,
emitToc: ...,
inTemplateFile: "logipard-doc.tpl.html",
cssClasses: {
// all of these are optional, the whole cssClasses can be skipped at all
itemTitle: ...,
rawTitle: ...,
paragraph: ...,
verbatimSpan: ...,
linkSpan: ...,
moreSpan: ...,
elsewhereSpan: ...,
actionSpan: ...,
offSiteBlock: ...
},
htmlPlaceholder: ...,
cssPlaceholder: ...,
extraTokens: {
TOKEN_ID: "token value",
ANOTHER_TOKEN_ID: "token value 2",
...
},
localizedKeywords: {
// adjust these according to the target locale
SNAPBACK: "Snapback",
SNAPBACK_AND_SCROLL: "Snapback & Scroll",
ELEVATE: "Elevate",
RESET: "Reset",
ELEVATE_TO: "Elevate to...",
COPY_ITEM_NAME: "Copy this item's LP FDOM full name to clipboard:",
ITEM_UNFOLDED_ELSEWHERE: "Item unfolded elsewhere on page, click/tap to unfold here...",
MORE: "More... >>",
TABLE_OF_CONTENTS: "Table of contents"
},
addSourceRef: ...
}
},
...
]
}
lpgwrite-example-render-html
object inside the corresponding renders[]
item, with the following members...Name
| Description
|
---|---|
✖ outFile
|
String. Path to the output document file (.html) to write, absolute or relative to the project root.
|
✖ emitToc
|
Boolean, optional (default = true). If true, then the renderer will add TOC section to the document.
|
✖ inTemplateFile
|
String. Path to the template file for the output HTML, absolute or relative to the project root.
The template is blueprint for the resulting HTML file with added placeholders for generated CSS, HTML, and possible extra tokens.
|
✖ cssClasses
|
Dictionary of strings, optional. The CSS classes to apply to certain elements of the output document. Note these ones are
meant to be cascaded with
lpgwrite-example-render-html 's generated classes that determine layout, so should only contain the
data that affects appearance (font, color, background, padding, etc.), not layout (display, grid or related, flex or related, position, z-order, etc.). |
✖ htmlPlaceholder
|
String. The exact placeholder string to replace with generated HTML code, should be placed inside
<body> tag. The inserted code
will be wrapped into a single <div> element with no explicit classes or styling directly on it. |
✖ cssPlaceholder
|
String. The exact placeholder string to replace with generated CSS code, should be placed inside
<style> tag, outside any block. |
✖ extraTokens
|
Dictionary of strings, optional. Any additional tokens to substitute in the template. The keys are exact placeholder strings to replace
(they should not duplicate
htmlPlaceholder , cssPlaceholder , or each other), the values is the raw HTML code to insert in their places. |
✖ localizedKeywords
|
Dictionary of strings, optional. The list of strings used for certain UI purposes in the generated document, expected to be appropriate
for the document's target locale. These strings are plain text.
|
✖ addSourceRef
|
Boolean, optional (default = false). If set to true, then the generator will add source file names to the text fragments, it will help
to remind the origin for a particular text piece. This mode is useful while proof reading and debugging of the draft document, especially as your project and the information
across it grows sufficiently multi-file.
|
lpgwrite-example-render-html
's generated classes that determine layout, so should only contain the
data that affects appearance (font, color, background, padding, etc.), not layout (display, grid or related, flex or related, position, z-order, etc.).like this one
). It doesn't affect code blocks - those are rendered as <code>
tags and are styled via them.lpgwrite-example-render-md
in the
✖ renders[]
generation item configuration:lpgwrite-example: {
...
renders: [
{
docModel: ...,
renderer: "${LP_HOME}/lpgwrite-example-render-md" $, // paste verbatim!
lpgwrite-example-render-md: {
outFile: ...,
emitToc: ...,
header: ...,
footer: ...,
addSourceRef: ...
}
},
...
]
}
Name
| Description
|
---|---|
✖ outFile
|
String. Path to the output document file (.md) to write, absolute or relative to the project root.
|
✖ emitToc
|
Boolean, optional (default = true). If true, then the renderer will add TOC section at the start of the document.
|
✖ header
|
String, optional. If specified, the renderer will prepend this string to the beginning of the document, before the TOC if any, so it is useful to make header and annotation.
The string is raw Markdown code.
|
✖ footer
|
String, optional. If specified, the renderer will append this string a the end of the document. The string is raw Markdown code.
|
✖ addSourceRef
|
Boolean, optional (default = false). If set to true, then the generator will add source file names to the text fragments, it will help
to remind the origin for a particular text piece. This mode is useful while proof reading and debugging of the draft document, especially as your project and the information
across it grows sufficiently multi-file.
|
[UNTRANSLATED-<language>]
prefix,
which can be used to search the interim file for updated and/or untranslated strings.translatorArgs
under lpgwrite-i18n-assist
member in the
✖ renders[]
generation item configuration:lpgwrite-i18n-assist: {
...
renders: [
{
docModel: ...,
renderer: "${LP_HOME}/lpgwrite-i18n-assist" $, // paste verbatim!
lpgwrite-i18n-assist: {
translator: "${LP_HOME}/lpgwrite-i18n-assist-trn-none" $, // paste verbatim!
items: [
{
...
translatorArgs: { lang: ... }
},
...
]
},
...
]
}
.lpinput
extension is appended to the original names),
into a directory specified by
✖ outDir
in the extract job item.<#tag-name ... #>
(tag name is alphanumeric with allowed -
's). Tags can be nested. Aside from tag components, the rest
text format (plaintext, Markdown, HTML, or whatever) is opaque from LP input perspective, its interpretation is up to compilation and generation stages.<#LP ./dataStruct { <#./%title: FDOM data structure#> <#./%order: 1#>
The FDOM data structure explanation starts best with a visual example...
<#img ./lp-model.png #>
The model data consists of *nodes*.
- blah
- blah blah
blah blah blah (see: <#ref dataStruct/parent-member#>, <#ref dataStruct/tagged-tag#> ).
blah blah total of 11 nodes.#>
<#~delimiter~ ... ~delimiter~#>
boundaries. Delimiter is a sequence of any non-~
characters, including empty, and it must match
for escape starting and finishing boundary. Everything between the boundaries is taken as verbatim plain text:<#this-is-tag
this is content data
<#this-is-tag-too and this is content data too#>
<#~~
This is all plain text, <#even-this#>
~~#>
This is again content data <#and-a-tag#>
<#~a~
This is again plain text again, <#~~ and even this ~~#>
~a~#>
#>
-
(it is also possible to write <-#tag-name ... #>
) - these are assumed commented-out and do not have any effect on content and FDOM, although
they still have to be of consistent format (correctly closed tags and escapes).this is data <#with-markup-tag inside#>
and this is <-#dropped-tag#> data with no markup tag <#-this-is-dropped-tag-either and this <#is-not#>, but is ignored as inside of a dropped one#> really
this is data <#with-markup-tag inside#>
and this is data with no markup tag really
LP
, including actually <#LP ...#>
are reserved for Logipard content feed and directives (these names are case-insensitive). Additionally, <# ... #>
is treated as shortcut to <#LP ...#>
. Also a reserved tag is <#ref ...#>
(for Logipard item linking, see below), this name is also case-insensitive. All other tags are called custom tags,
and their handling is up to compile model writer at
✖ Compilation stage
. Their names MAY be case-sensitive, depending on the model writer implementation.<#LP itemName: ...content... #>
or <# itemName: ...content...#>
Item name can be followed by names of FDOM tags, optionally starting from #
:
<#LP itemName tagItemName1 tagItemName2 ...: ...#>
, <#LP itemName #tagItemName1 tagItemName2 ...: ...#>
, <#LP itemName #tagItemName1 #tagItemname2 ...: ...#>
.<#LP A: this goes to item A#>
<#LP B: this goes to item B#>
<#LP C: this goes to item C#>
<#LP D %tagDWithThis#> <-# if there is no content, just FDOM tags added, use of `:` is optional#>
<#LP A: this goes to item A
It is absolutely ok to make multiple feeds into the same item, content from them is appended. It is ok to do this even from different input files,
but then you should keep in mind that the order in which the input files are processed is not guaranteed.
#>
<#LP Outer: this goes to item Outer
<#LP Inner: this goes to item Inner#>
this again goes to item Outer
#>
{
for delimiter instead of :
. In this case, even after the digression finishes, the current scope remains until lingering digression closer tag
is encountered:<#LP A: this goes to A
<#LP B { this goes to B#>
this still goes to B
<#LP }
this goes to A (note that, if closer contains a remainder content, then a line break or a markup tag, at least <-# comment#>, after `}`, is essential) #>
this goes to A
<#LP C/D { this goes to C/D#>
<#LP E { lingering digressions can be nested#>
this goes to E
<#LP } #>
this goes to C/D
<#LP } #>
this goes to A
#>
<#LP A: this goes to A
<#LP.: this goes to A too#>
#>
<#LP B/C: this goes to B/C
<#LP.: this goes to B/C too#>
Doesn't make much sense to add content this way, but adding a tag is a reasonable use case:
<#LP . %tag"B/C"WithThis #>
#>
<#LP D/E/F: this goes to D/E/F
<#LP D/E/F/G: this goes to D/E/F/G#>
this goes to D/E/F
<#LP ./G: this goes to D/E/F/G again#>
#>
<#LP A
<#LP: this is in A (same as <#LP#>) #>
#>
#
prefix:<#LP #%tagToCurrentItem#>
..
, ...
and so on ("up dir") starting segments refers to one, two, etc. levels above the current name level:<#LP D/E/F/G: this goes to D/E/F/G
<#LP ..: this goes to D/E/F#>
<#LP ...: this goes to D/E#>
<#LP ....: this goes to D#>
<#LP .....: this goes to root item#>
#>
On that note, the input file content feeds are digressions in root item scope.
You technically can add content to root item scope, but it is quite meaningless and a bad style.
<#LP A/B
<#LP C/D {#>
<#LP ..: this is in C#>
<#LP ...: it might be in root, but there is outer scope to borrow from, so it is in A/B #>
<#LP ....: it is in A #>
<#LP .....: and only this is in root #>
This is again in C/D
<#LP } #>
This is again in A/B
#>
<#LP A/B
<#LP ./C/D/..: . is A/B, ./C/D is A/B/C/D, ./C/D/.. is A/B/C, so in the end it is A/B/C #>
#>
<#LP A:
<#LP B/C: this is B/C (not A/B/C !)
<#LP D: this is indeed D#>
<#LP C: this is B/C#>
<#LP B: this is B#>
<#LP A: B level is exhausted, but then there is A, so it is A#>
#>
#>
<#LP A/B/C
<#LP ./%tag: it is A/B/C/%tag #>
<#LP A C/%tag: we are in A and tag it by A/B/C/%tag
<#ref ./B#> - reference to A/B
#>
#>
<#LP A:
<#LP B/C { #>
This goes to B/C
<#LP } #> <-#closes B/C#>
This goes to A
#>
<#LP A:
<#LP B/C/D { #>
This goes to B/C/D
<#LP } C #> <-#closes C, but leaves B (note the short name is on same line as `}`) #>
This goes to B
<#LP } #> <-# closes "remaining" part of the digression, which is B (we could also use <#LP } B#> with the same effect)#>
This goes to A
#>
.
segement:<#LP A:
<#LP B/C/D { #>
This goes to B/C/D
<#LP } C/. #>
This goes to B/C (would go to B if we used "} C")
<#LP } #>
Again to A
#>
<#LP A:
<#LP B/C { #>
<#LP D/E { #>
this goes to D/E
<#LP } E #>
this goes to D
<#LP } B #>
closed remaining D and the B/C, this goes to A
#>
<#LP A:
<#LP B/C { #>
<#LP D/E:
this goes to D/E, but note that "D/E" is open as non-lingering digression, and we are still within it
THE FOLLOWING IS INCORRECT: <#LP } E #>
Using "} D" or "} B" or "} C" or "} A" here is disallowed as well, as they pop through still-effective D/E.
#>
<#LP } B #>
<#LP F/G {
The same applies to inside of the lingering digression opener until it is finished.
That is, the following is incorrect: <#LP } F #>
#>
but here, as the lingering digression opening is done, the following one is ok:
<#LP } F #>
#>
<#LP-TRACE-WHERE [optional label]#>
inline markup tag which will print the current scope in the location it is placed, along with optionally provided label and explanation of the name resolution.<#REF item/name#>
markup tag. The name specification obeys naming resolution rules (
✖ FDOM item and tag names resolution
).
References are supported at FDOM compilation stage on a built-in basis - FDOM user, such as generator, does not have to invent a custom tag for them.This is <#ref item/name: a link to item/name with alt text#>
<# name tag1 tag2 ...: ...content...#>
), or, if adding to a currently scoped item, later in an auxiliaty sub-digression (<#. tag1 tag2 ...#>
).
But it also is possible to do the reverse thing - add the currenly scoped item as FDOM tag to some other item: <#LP-TAG-TO other-item-name#>
.<#LP-INCLUDE lp-module-inc.lp-txt#>
or shorter...
<#LP-INC lp-module-inc.lp-txt#>
note that you specify only the file's original extension, no .lpinput suffix
.lpinput-inc
name suffix instead of .lpinput
and are not picked automatically at compilation stage, as they are
are assumed only parts of "main" files to be included manually. You can't include other "main" files (but it is possible to include an module file from another module file)."lp-extract.gen"
, then for module files it can be like
"lp-extract.gen/lp-includes"
. This is for a good reason. Later, at compile stage, when handling <#LP-inc[lude] file-name#>
directives, the file-name
is interpreted in the following way:.
or ..
, then it is path relative to directory of the processed input file (i. e. of one that contains the <#LP-INCLUDE#>
) - but this is quite a rare use case,<includes-dir>/file-name[.lpinput-inc]
via cascading lookup, that is, starting from directory of the processed input file, if not found there - then
in its ../<includes-dir>/file-name[.lpinput-inc]
, then ../../<includes-dir>/file-name[.lpinput-inc]
, and so on, until found or reached <extracted-input-root>
directory. That is, the strategy similar
to what Node.JS does on require(filename)
. This is the recommended method of arranging and using module files.
For example, you can place a module file as a <extracted-input-root>/<includes-dir>/common.lp-txt[.lpinput-inc]
, and then include it with <#LP-INCLUDE common.lp-txt#>
from any input file
under <extracted-input-root>/**
.outDir
-s much as you like, but you should take care to have them matching directories that will be <extracted-input-root>
and <includes-dir>
at compilation
stage (these are specified by
✖ inRootDir
and
✖ lpIncLookupDirName
, respectively). If multiple extract jobs
are targeting the same compile job, then their outDir
-s must be consistent with the compile job's inRootDir
and lpIncLookupDirName
.M
for the next-to-root domain item name), or to quickly move FDOM fragments to other actual locations
without changing the sources. Syntax for alias definition is: <#LP-ALIAS new-name: old-name#>
After that, new-name
will become an alias to old-name
(the old-name
can still be used on its own).<#LP-INCLUDE ...#>
-d fragments, but on per-include basis, not per the module file as is).
There is no concept of aliasing in FDOM, and in the actual compiled output all the names go resolved.<#LP-ALIAS A/B: C#>
<#LP A/B: this goes to C: A/B #>
<#LP A/B/D: this goes to C/D, because starting part A/B aliases C#>
but:
<#LP A/B/../D: this goes to A/D, because A/B/../D resolves to A/D, and A is not aliased #>
also:
<#LP A: this goes to A
<#LP ./B: this goes to C, because effective resolved name is A/B, which is aliased#>
#>
<#LP-TRACE-WHERE#>
output.<#LP-ALIAS A/B: C#>
<#LP A/B: this goes to C#>
<#LP-ALIAS A/B: D#>
<#LP A/B: this goes to D#>
<#LP-ALIAS A: B#>
<#LP-ALIAS A/C: D#> A/C and B/C refer to D (here A is literal aliased name, B is unaliased actual name)
<#LP-ALIAS A/C/E: F#> A/C/E and B/C/E refer to D/E
<#LP-ALIAS A: G#>
<#LP-ALIAS A/C: H#> A/C and G/C now refer to H, A/C/E refers to H/E, B/C still refers to D!
The actual items at this point are B, D, D/E, F, G
<#ref ...#>
,<#LP A/B: some content#>
<#LP-ALIAS A/B: C#> this will be ignored and flag a warning
<#LP-ALIAS A: D#> and this too
the restriction doesn't affect unused names under A/B though:
<#LP-ALIAS A/B/C: C#>
and the A/B itself still can be alias target:
<#LP-ALIAS E: A/B#>
<#LP E: this goes to A/B#>
<#LP-ALIAS E: C#> E can be redefined, since it is an alias from the very beginning
E
as of an alias, it is no longer possible to use/refer an item with actual E
name, or any of its sub-items, in this input file -
E
always refers to an item currently aliased by E
. So this restriction is introduced to prevent confusing interference between aliases and actual names.<#LP-MACRO Mac %tag1 %tag2: macro-content#>
<#LP test-node Mac: test content#>
the same as:
<#LP test-node %tag1 %tag2: macro-content test content#>
<#LP test-node: test content <#. Mac#> other test content #>
the same as:
<#LP test-node: test content <#. %tag1 %tag2: macro-content#> other test content #>
<#LP test-node: test content <#Mac#>#>
the same as:
<#LP test-node %tag1 %tag2: macro-content test content#>
<#LP-MACRO %is-a-class %class: <#LP-TAG-ON classes#> <#LP ./%title: Class title: #> #>
...
<#LP ClassA %class: this is class A, and we added it to <#ref classes#> by using a macro
<#./%title: A#> <-#it will result in %title = "Class title: A"#>
#>
LP-ALIAS
or even other LP-MACRO
, although it generally doesn't make much sense, and you should be careful with these if you decide to use them after all.
Keep in mind that the name scope inside the macro (i. e. where <# . #>
refers, or where name resolution lookup starts) is the node where it is being inserted, not the macro itself.<#LP-MACRO where-am-I: <#LP-TRACE-WHERE#>#>
<#LP A where-am-I: inside A#>
<#LP B where-am-I: inside B#>
LP-ALIAS
(
✖ Name aliasing
), you can't define macro under same effective full name that has been used in the current input file in one of the following ways:<#ref ...#>
,<#LP-MACRO Mac: macro content#>
<#LP Mac/I: may not work as you expect#>
[
{ nameAlias: "M", name: "domain.logipard" },
{
docModel: {
{
name: "DocMain",
query: [
...
],
sort: { ... }
}
},
forEachItem: [
...
]
},
{
docModel: {
{
name: "DocReadme",
query: [
...
],
sort: { ... }
}
},
forEachItem: [
...
]
},
...
]
{...query}
placeholder.{
byMember: "member-name",
keyFormat: "lexical|natural",
order: "asc|desc"
}
{...sort}
placeholder.Name
| Description
|
---|---|
✖ byMember
|
String. Specifies short name of the member to use as a sorting key. The key consists of the member's content which is interpreted as plain text with trimmed leading and trailing
whitespaces. The member is assumed to contain no nested LP markup, otherwise the actual key contains an unguaranteed value. Key comparison is case sensitive.
|
✖ keyFormat
|
String, optional (default is
lexical ). Can be lexical or natural :
|
✖ order
|
String, optional (default is
asc ). Can be asc (for ascending sorting order) or desc (for descending sorting order). |
lexical
). Can be lexical
or natural
:lexical
: the keys are compared as strings (using string lexicographical comparison).natural
: the keys are split into and compared lexicographically as sequences of numeric and non-numeric fragments, where numeric/numeric sequence segments are compared as numbers,
and non-numeric/numeric and non-numeric/non-numeric are compared as strings. I. e., 1.2.3-a
and 1.10-z
are compared as [1, ".", 2, ".", 3, "-a"]
and [1, ".", 10, "-z"]
, the first differing
segments are 2
and 3
, which are numeric/numeric case, the 2
is less, so 1.2.3-a
is less than 1.10-z
. If the key starts from +
or -
followed by a digit, this +
/-
counts as part of the
number of the first segment, which is considered numeric.item1
, item2
, ...item10
, ...,1
, -2
, 3.14
,1.0.3
{ nameAlias: "ItemAliasName", name: "name-string"}
, where name-string
is a full FDOM name (possibly starting with a previously defined alias).
Item alias name here must be a valid FDOM shortname.{ queryAlias: "QueryAliasName", query: {...query}}
.{ conditionAlias: "?CondAliasName", condition: {...condition}}
(using ?
prefix is an optional convention).{ collectionAlias: "CollAliasName", collection: {...collection}}
. It is a permament alias that will be shared by the subsequent queries,
unlike the query local alias that only spans the rest of current query (
✖ Set local collection alias ["alias ..."]
).{
docModel: {
name: "DocumentModelName",
rootItems: {
query: {...query},
sort: {...sort}
},
excludeUnder: {...collection}, // optional
whitelistUnder: {...collection} // optional
},
forEachItem: [
... // list of item readable content specification instructions
]
}
Name
| Description
|
---|---|
✖ forEachItem
|
Specifies the information fragments to include into the readable presentation of each item included into the model. Each instruction can be either
one of listed in
✖ Context definition commands
(be sure you don't assign aliases with conflicting names), or one of the instructions
listed in this section.
|
✖ docModel
|
Specify the document model name and the set of FDOM items to include into this model.
|
"member-field-name"
: any FDOM shortname (note that strings starting from %%
and #
are reserved for other instructions and don't fall under this case).
It tells to emit the immediate content of the given member field of the current item, with no its (sub-)member items, or nothing if there is no such member."#text:...arbitrary string..."
: emit the plain text that follows #text:
prefix, in general inline text style."%%title"
: emit the current item's human readable title (content of its %title
member, or the item's short name if no %title
is available), in
a distinguished header style (or as an interactive title element if applicable to the renderer). In general, not required to do this explicitly - a title
is automatically emitted, unless the item has a private name (shortname starting from #
)."%%brief"
: emit the brief part of the item's direct content (its first paragraph, unless it is code block or a list element), in general inline text style."%%debrief"
: emit the part of the item's direct content remaining after %%brief
, in general inline text style. "%%brief"
instruction followed by
"%%debrief"
instruction emit the full item's direct content."%%refTitle"
: emit the current item's title, in general inline text style, wrapped into a on-page link (Logipard reference if applicable to the renderer).
This instruction makes little sense as is, because links from an item's direct content to the item itself are inherently defunct - it is typically used in
conjunction with #item
(see below)."%%more-start"
: this instruction marks location where the content of item viewed in brief mode finishes. Everything below should only be visible in or
after switching to full mode. This instructon can be only used once per the forEachTeam
section."%%mark-for-toc"
: this instruction indicates that the current item should be included into table-of-contents tree (if applicable to the renderer).
By default, an item is not marked for TOC, and you should take care to include only items significant enough, otherwise the TOC can become overburdened.
It is not necessary to mark every level in the branch - the tree is contracted to the marked items only (i. e., if only item and its grandparent item are
marked, then in the TOC item will appear as direct member of its grandparent)."#item:...spec..."
: any of the above options, except for %%more-start
and %%mark-for-toc
, prefixed by #item:
- e. g. #item:%%title
,
#item:%%refTitle
, #item:fieldName
, etc. It is not allowed as a standalone instruction, only inside emitAsItems...
instruction (see below), and
it refers to the current item of the iterated sub-collection.{
on: {...collection},
query: {...query},
as: "ResultAlias"
}
docModel
.Name
| Description
|
---|---|
✖ on
|
The collection to start with (as in
✖ Collections
). Additionally, if used inside
docModel , a "%%self" alias is defined,
allowed for the on field or inside the query - it refers to the current item. |
✖ query
|
The query to perform, with
on as initial current collection. |
✖ as
|
String. The alias to set for the resulting collection (will replace earlier defined one and will transfer to next instructions, including
forEachItem iterations for next items,
so keep this in mind to avoid order dependent effects). |
{
ifNotEmpty: {...collection},
then: [
...
]
}
docModel
.{
with: {...collection},
sort: {...sort},
emitAsItemsTable: [
[ "column-header-spec", "column-content-spec" ],
...
]
}
Name
| Description
|
---|---|
✖ with
|
The collection (as in
✖ Collections
).
|
✖ sort
|
Optional. The sorting specification (as in
✖ Sorting specification
) to use on the
with collection for this table. |
✖ emitItemsAsTable[]
|
Array. Specification of table columns. Each element specifies the column, in left to right order, and is a two-element sub-array:
|
Name
| Description
|
---|---|
✖ [0]
|
String. The column title. Is interpreted as in
✖ String (text, field refs, etc.)
.
|
✖ [1]
|
String. The column content. Is interpreted as in
✖ String (text, field refs, etc.)
, where
#item: refers to the element of collection assigned to this line. |
{
with: {...collection},
sort: {...sort},
emitAsItemsList: [ "fragment-1-spec" [, "fragment-2-spec", ...] ]
}
Name
| Description
|
---|---|
✖ with
|
The collection (as in
✖ Collections
).
|
✖ sort
|
Optional. The sorting specification (as in
✖ Sorting specification
) to use on the
with collection for this list. |
✖ emitItemsAsList[]
|
Array. Specification of fragments to append to form the list line, in the listed order.
Each fragment is a string interpreted as
✖ String (text, field refs, etc.)
, where
#item: refers to the element of collection assigned to this line. |
{
with: {...collection},
sort: {...sort},
emitAsOwnItems: "basic|full"
}
lpgwrite-example
convention, an item can be emitted at multiple locations in the document, but only one of them is treated as "home" location. The document format can assume it, for example,
the actual item's information site, and just put the links to it into all the others (but as well can ignore this hint).emitAsOwnItems
/emitAsExtItems
, lpgwrite-example
chooses one of them as home location, the ones from emitAsOwnItems
have more priority for this choice.Name
| Description
|
---|---|
✖ with
|
The collection (as in
✖ Collections
).
|
✖ sort
|
Optional. The sorting specification (as in
✖ Sorting specification
) to use on the
with collection for this list. |
✖ emitAsOwnItems
|
String. Specifies the suggested information mode for the items emitted per this instruction. Can be either of...
|
{
with: {...collection},
sort: {...sort},
emitAsExtItems: "basic|full"
}
lpgwrite-example
convention, an item can be emitted at multiple locations in the document, but only one of them is treated as "home" location. The document format can assume it, for example,
the actual item's information site, and just put the links to it into all the others (but as well can ignore this hint).emitAsOwnItems
/emitAsExtItems
, lpgwrite-example
chooses one of them as home location, the ones from emitAsOwnItems
have more priority for this choice.Name
| Description
|
---|---|
✖ with
|
The collection (as in
✖ Collections
).
|
✖ sort
|
Optional. The sorting specification (as in
✖ Sorting specification
) to use on the
with collection for this list. |
✖ emitAsOwnItems
|
String. Specifies the suggested information mode for the items emitted per this instruction. Can be either of...
|
Name
| Description
|
---|---|
✖ name
|
String. Name of the model, will be used to refer to this model from renderer config (see
✖ docModel
).
|
✖ rootItems
|
The initial slice to start the inclusion set from. Root items set is obtained by a query and added to the list of items. The set will then be expanded
to include all items that are referenced (
✖ Inline references
) from, or will be emitted as sub-items of the items already included - all the way down
the tree. This set can be then trimmed down (see
✖ excludeUnder
,
✖ whitelistUnder
).
|
✖ excludeUnder
|
The collection of root items to recursively exclude from the initial set after
✖ rootItems
. If
excludeUnder collection is specified, then,
whenever an item is in FDOM membership tree of one of these items, it is dropped from document and from any collection based lists/tables, and inline links
to it are defunct. |
✖ whitelistUnder
|
The collection of root items to whitelist in the initial set after
✖ rootItems
. If
whitelistUnder collection is specified, then,
unless an item is in FDOM membership tree of one of these items, it is dropped from document and from any collection based lists/tables, and inline links
to it are defunct. |
Name
| Description
|
---|---|
✖ query
|
The query to deliver the root items. Initial current collection for this query is empty, so in order to make sense you should start it from
{ with: ... }
basic query (see
✖ <QuerySpec>
). |
✖ sort
|
The sort specification to determine relative order of the root items in the resulting document. Note that it is top-level order only:
any sub-items will be emitted after containing item and before its next sibling item, and the ordering within sub-items is specified by the respective
emitting instructions.
|
excludeUnder
collection is specified, then,
whenever an item is in FDOM membership tree of one of these items, it is dropped from document and from any collection based lists/tables, and inline links
to it are defunct.excludeUnder
is the inverse to
✖ whitelistUnder
, and generally they should not be used together. However, if they are, excludeUnder
is applied first.whitelistUnder
collection is specified, then,
unless an item is in FDOM membership tree of one of these items, it is dropped from document and from any collection based lists/tables, and inline links
to it are defunct.whitelistUnder
is the inverse to
✖ excludeUnder
, and generally they should not be used together. However, if they are, excludeUnder
is applied first.file
facility
(see
✖ file(...): embedded value from JSON/LPSON file
) with some added parameters, as shown below, and defines model named DocMain
: ...
lpgwrite-example: {
...,
program: file("${LP_HOME}/lpgwrite-example-docprg.lpson" $, {
docprgPrologue: [ ... ], // instructions to inject at the start
docRootItems: {...query},
LS_EXTENDS: "Extends (is a)",
LS_MEMBERS: "Members",
LS_NAME: "Name",
LS_DESCRIPTION: "Description",
LS_MEMBERS_FROM_EXTENTS: "Members from extents",
LS_ARGUMENTS: "Arguments",
LS_RETURNS: "Returns:",
LS_ERRORS: "Errors:",
LS_MEMBERS_DETAILED: "Members (detailed)",
LS_MEMBERS_FROM_EXTENTS_DETAILED: "Members from extents (detailed)",
LS_ARGUMENTS_DETAILED: "Arguments (detailed)",
LS_NOTES: "Notes",
LS_PROPERTIES: "Properties",
LS_PROPERTIES_FROM_EXTENTS: "Properties from extents",
LS_METHODS: "Methods",
LS_METHODS_FROM_EXTENTS: "Methods from extents"
}),
renders: [
{
docModel: "DocMain",
renderer: "${LP_HOME}/lpgwrite-example-render-html" $,
...
},
{
docModel: "DocMain",
renderer: "${LP_HOME}/lpgwrite-example-render-md" $,
...
},
...
]
}
%extends
or %.extends
member (the member itself should not contain any content except for the added tags).%member
or %.member
tag. It is possible to combine it with %property
/%.property
or %method
/%.method
tags.%return
member is assumed to be such description and is appended in the dedicated documentation section. %return
member is assumed to have no title.%errors
member is assumed to be such description and is appended in the dedicated documentation section. %errors
member is assumed to have no title.%errors
member can have sub-members, even marked with %member
.%method
or %.method
tag. It is possible to combine it with %member
/%.member
or %property
/%.property
tags.%title
to specify the full name for human readability.%property
or %.property
tag. It is possible to combine it with %member
/%.member
or %method
/%.method
tags.item/%note
like this:#LP main-item {
Main item content
#LP ./%note/~ {
#LP note 1 content (in-item), with member
#LP ./note-1-member: note 1 member
#LP }
<#LP ./%note/~: note 2 content (in-item), simple#>
More main item content
#LP }
#LP main-item/%note/~: note 3 content (off-item)
%notes
is that there are more options for their ordering control. Appending directly to item's content from multiple sources does not guarantee the resulting order of fragments, and can even disrupt
your intended convention of what will be brief part of information for this item. On the other hand, 'Notes' section location is well-defined, and members of %notes
under it obey %order
hints
(
✖ %order: ordering control
).%note
's own direct content and non-anonymous members are not used. For purpose of concern separation, there is a separate capability for this with a different intended use case - see
✖ extra
.%extra
member you can specify content that will be displayed after the item's main content in inline manner, like it was written at the end of the item's content itself.
More specifically, it will behave like an extra item with no title inserted before the item's detailed members section (and before 'Notes' section, if available), so it will look like
continuation to the item's own content. The intended usage is to add content and members to item/%extra
like this:#LP main-item {
Main item content
#LP ./%extra: main-item's direct extra content
#LP ./%extra/A %member: main-item's extra member A (in-item)
More main item content
#LP ./ownMember %member: main-item's own member
#LP }
#LP main-item/%extra/B %member {
main-item's extra B content (off-item)
#LP ./extra-B-member: extra item B member
#LP }
main-item
's section:# main-item
Main item content
Members:
ownMember | main-item's own member
// end of main-item's brief info
More main item content
// data from %extra starts here
main-item's direct extra content
Members: // of %extra
A | main-item's extra member A (in-item)
B | main-item's extra member B content (off-item)
Members (detailed): // of %extra
# A
main-item's extra member A (in-item)
# B
main-item's extra member B content (off-item)
# extra-B-member
extra item B member
// data from %extra ends here
Members (detailed): // of main-item
# ownMember
main-item's own member
%extra
is display control for members in edge cases.lpgwrite-example-docprg
places item's direct members tagged with %member
, %arg
, %return
, %errors
, and the list of their counterparts
from extents, in the end of item's brief description and before the remaining part of the content. In some cases, this can disrupt the information flow (such as a fenced code fragment
presenting the item's general look, which should better go before the members list). In order to workaround this inconvenience, you can move the list of members, args etc. from the item itself
to members of its ./%extra
item. While they will still look "inline", they are logically part of a different item and will not be parts of the main item's brief display flow.lpgwrite-example-docprg
are provided via added context vars in the LPSON file
operator:Name
| Description
|
---|---|
✖ docprgPrologue
|
The array of instructions to inject at the very start of the doc program. Typically definition of aliases to be used in
✖ docRootItems
.
|
✖ docRootItems
|
The
rootItems section (see
✖ rootItems
) of the generated model (DocMain ). This object will be assigned to the rootItems
entirely as is, with no wrappings and patching, so the user should not rely on any defaults here. |
✖ localization
|
The group of predefined titles to use in the generated page. Moved out to a parameter in order to make them localizeable at this point. This object is a dictionary of strings, with member names
denoting meaning for each string:
|
%extends
list (i. e. ones tagged on %extends
member)%member
%member
marked items defined inside the items from %extends
list, all the way deep through the extendeds tree%property
%property
marked items defined inside the items from %extends
list, all the way deep through the extendeds tree%method
%method
marked items defined inside the items from %extends
list, all the way deep through the extendeds tree%arg
%return
member%error
member%member
marked items%member
marked items defined inside the items from %extends
list, all the way deep through the extendeds tree%arg
marked items%notes
member will be put under[D]
prefix to mark that they are default placeholders and that it better be fixed.null
value).
If the item is a reader resource that needs to be explicitly disposed, the implementation documentation must emphasize that, stipulate the object lifetime, and provide a disposal method.Name
| Description
|
---|---|
✖ .content
|
Read-only property. The item content (text, inline references, and whatever else the reader's backing model supports).
|
✖ .name
|
Read-only property, string. The item's full path name (with no namespace aliases)
|
✖ .shortName
|
Read-only property, string. The item's short name (last segment of the full path name).
|
✖ .tags
|
Read-only property. Collection of the item's tags.
|
✖ .members
|
Read-only property. Collection of the item's members.
|
✖ .isNull
|
Read-only property, bool. Check if item is empty (true) or not (false).
|
✖ .parent
|
Read-only property,
✖ <Item>
. Return parent item (one this item is member to). For root item returns null (not null item).
|
✖ .isConditionTrue(lpqCtx, condSpec)
|
Check if the item satisfies a certain condition, must be done relative to a query context (in order to resolve condition and collection aliases).
|
{ ref: <Item>, text: string }
(ref is
✖ <Item>
), for an inline ref to another item. Text is the ref alt display text, if not empty then it is suggested instead of the ref-d item's
default title.Name
| Description
|
---|---|
✖ .nameAlias(aliasName, item)
|
Set an item alias name (which should be a valid shortname), that can later be used as standalone item name or as starter for another item name within this
✖ <QueryContext>
.
Behaviour in case of already existing alias with the given name is implementation specific.
|
✖ .collectionAlias(aliasName, ...collectionSpecs)
|
Set a named collection alias that can be used later to reference the collection within this context (
✖ <CollectionSpec>
). The collection is built up from collections corresponding to each element of the
specs list. This alias is permament within the context, unlike query local alias (
✖ Set local collection alias ["alias ..."]
).
|
✖ .queryAlias(aliasName, ...querySpecs)
|
Set a named query alias that can be used later to reference the query within this context (
✖ <QuerySpec>
). The list is interpreted as a composite query.
|
✖ .conditionAlias(aliasName, condSpec)
|
Set a named condition alias that can be used later to reference the condition within this context (
✖ <Condition>
).
|
✖ .item([baseItem ,] name)
|
Return item by given path name, either full or relative to the provided base item. The full item name's first segment shortname can be a name alias defined in this
✖ <QueryContext>
.
|
✖ .collection(...collectionSpecs)
|
Returns a collection specified by a list of collection item specs. Each list item is a
✖ <CollectionSpec>
.
|
✖ .with(...collectionSpecs)
|
Set current collection for the subsequent query (call to
✖ .query(...querySpecs)
). Collection is built up from collections corresponding to each element of the specs list.
.with effectively initiates the query chain, but can be used in the middle of the chain as well to override the current collection after a certain step. |
✖ .query(...querySpecs)
|
Perform a query, or a list of queries intepreted as a composite query, given the current collection specified by preceding
✖ .with(...collectionSpecs)
or resulting from previous
.query calls.
Note that the resulting collection is not returned immediately, it becomes new current collection instead. |
✖ .teardownCollection()
|
Finalize the query and return the result (the current collection at time of the call). The current collection itself is reset, so the next query must be re-initialized, starting over from
✖ .with(...collectionSpecs)
.
|
✖ .currentCollectionAlias(aliasName)
|
Set a named collection alias for the current collection that can be used later to reference the collection within this context (
✖ <CollectionSpec>
). Only is usable mid query (when the current collection is
meaningful), otherwise it is an error. This is a local query alias, unlike a permament one (
✖ Set local collection alias ["alias ..."]
).
|
✖ .compileQuery(...querySpecs)
|
Compile a query into a handle object usable later to reference the query within this context (
✖ <QuerySpec>
). The list is interpreted as a composite query.
|
Name
| Description
|
---|---|
✖ aliasName
| Alias name, string.
|
✖ item
|
The item to alias. String (a full path name, probably including another alias) or
✖ <Item>
.
|
Name
| Description
|
---|---|
✖ baseItem
|
Optional. The base item to apply
✖ name
path to.
✖ <Item>
, string or array of strings.
|
✖ name
|
The path to item. String or array of strings. Can begin with a name alias defined in this
✖ <QueryContext>
.
|
some/path
, and the baseItem
provided has path base/item
, then the resulting item is assumed by path base/item/some/path
..with
effectively initiates the query chain, but can be used in the middle of the chain as well to override the current collection after a certain step.Name
| Description
|
---|---|
✖ .size
|
Read-only property, number. Size of the collection (how many items are in it).
|
✖ .contains(item)
|
Check if the collection contains the given item. Must be false for any null (with
✖ .isNull
= true) items.
|
✖ [Symbol.iterator]
|
The collection must be a JS-enumerable object, delivering the contained
✖ <Item>
's in some order (
for (var item of collection) ).
It is recommended for the implementation to keep items declared within the same source in the same order as they are in that source, but the user is not recommended to rely on this assumption. |
for (var item of collection)
).
It is recommended for the implementation to keep items declared within the same source in the same order as they are in that source, but the user is not recommended to rely on this assumption.{ isAnyOf: <CollectionSpec> }
: the isAnyOf
type condition{ hasMembersNamed: <string | RegExp> }
: the hasMembersNamed
type condition, regexp can be given as JS RegExp
(no flags should be used except for i
) or as a regexp source string (assuming no regexp flags){ hasMembersThat: <Condition> }
: the hasMembersThat
type condition{ hasAnyOfTags: <CollectionSpec> }
: the hasAnyOfTags
type condition{ hasAllOfTags: <CollectionSpec> }
: the hasAllOfTags
type condition{ hasParentThat: <Condition> }
: the hasParentThat
type condition{ named: <string | RegExp> }
: the named
type condition, regexp can be given as JS RegExp
(no flags should be used except for i
) or as a regexp source string (assuming no regexp flags){ and: [ ...<Condition> ] }
: the and
type condition, the argument is array of <Condition>
objects{ or: [ ...<Condition> ] }
: the or
type condition, the argument is array of <Condition>
objects{ not: <Condition> }
: the not
type condition{ union: [nestedCollectionSpecs] }
: a set union of collections specified by the array of elements, each of which is also a collection spec item (arbitrary nesting is possible,
but note that every item spec at union
's list topmost level specifies operands for union operation, not a concatenation){ intersect: [nestedCollectionSpecs] }
: a set intersection of collections specified by the array of elements, each of which is also a collection spec item (arbitrary nesting is possible,
but note that every item spec at intersect
's list topmost level specifies operands for intersect operation, not a concatenation){ subtract: [nestedCollectionSpecs] }
: a set difference of collections specified by the array of elements (subtracting 2nd and on elements from 1st element), each of which is also
a collection spec item (arbitrary nesting is possible, but note that every item spec at subtract
's list topmost level specifies operands for subtract operation, not a concatenation)[ ...<QuerySpec> ]
: array of query specs, a composite query where the components are applied in the listed order{ alias: string }
:
✖ Set local collection alias ["alias ..."]
, alias name to set is given as string{ with: <CollectionSpec> }
:
✖ Replace current collection ["with ..."]
{ membersThat: <Condition>, on?: <CollectionSpec>, recursive?: boolean }
:
✖ Select members that satisfy condition ["membersThat ..."]
{ tagsThat: <Condition>, on?: <CollectionSpec>, recursive?: boolean }
:
✖
Select tags of the collection's elements that satisfy condition ["tagsThat ..."]
{ inMembersThat: <Condition>, query: [ ...<QuerySpec> ], on?: <CollectionSpec>, recursive?: boolean }
:
✖
Perform sub-query on members of the collection's elements that satisfy condition ["inMembersThat ..."]
{ inTagsThat: <Condition>, query: [ ...<QuerySpec> ], on?: <CollectionSpec>, recursive?: boolean }
:
✖
Perform sub-query on tags of the collection's elements that satisfy condition ["inTagsThat ..."]
{ inItemsThat: <Condition>, query: [ ...<QuerySpec> ], on?: <CollectionSpec>, recursive?: boolean }
:
✖
Perform sub-query on the collection's elements that satisfy condition ["inItemsThat ..."]
{ subtractQuery: [ ...<QuerySpec> ], on?: <CollectionSpec> }
:
✖
Subtract result of sub-query from current collection ["subtractQuery ..."]
{ unionQuery: [ ...<QuerySpec> ], on?: <CollectionSpec> }
:
✖ Union result of sub-query with current collection ["unionQuery ..."]
{ intersectQuery: [ ...<QuerySpec> ], on?: <CollectionSpec> }
:
✖
Intersect result of sub-query with current collection ["intersectQuery ..."]
{ sideQuery: [ ...<QuerySpec> ], on?: <CollectionSpec> }
:
✖
Perform sub-query with no effect on current collection ["sideQuery ..."]
true
, false
, and null
.
All of these are possible in LPSON as well (with some extended capabilities for lists and strings), plus several more options:vars
: resolves to variables dictionary valuefile(...)
: resolves to value parsed from the specified JSON/LPSON file.fieldName
, or ."fieldName"
, or .(field-name-expr)
, resolves to value of the given field name
of the preceding value$
, resolves to preceding string value where the ${var-name}
placeholders are replaced
with matching context var values{ ... }
), resolves to that dictionary with added "@type"
key and the preceding
value as valuevars.objectField1."objectField2".("objectField${THREE}" $).stringField { value: 123 }
). They are evaluated
left to right with same priority.file(name-value)
or file(name-value, extra-vars-dictionary-value)
. It parses and resolves LPSON value
from the given file, adding/replacing the supplied extra vars to context vars dictionary. The modified dictionary will only be in effect
for the expressions (
✖ 'vars': context vars dictionary
) in the child context inside the embedded file, current file's context vars are not affected.Name
| Description
|
---|---|
✖ name-value
|
a value that resolves to file name. Relative names are relative to the current file's directory
(i. e.,
file("xxx.lpson") from inside yyy/zzz.lpson will refer to file yyy/xxx.lpson ). |
✖ extra-vars-dictionary-value
|
a value that resolves to a dictionary. Keys are names of the context vars to add/override in the
child context, values are the values to set them to.
|
"abc"
, JSON-compatible), single-quoted ('abc'
), or backtick-fenced (`...`abc`...`
).\n
, \"
, \\
, \n
, \r
, \t
, \uXXXX
etc.). The non-matching quote type
('
in "..."
and "
in '...'
) can stay unquoted. Line breaks (raw or escaped) are not allowed in quoted strings.before, `a`, after
// is the same as
before, ``
a
``, after
LP_HOME
: installation directory of the currently running Logipard pipeline executor, can be used to reference the built-insLP_PROJECT_ROOT
: project root directory, use it to construct strings that are meant to be file names relative to project root (not counting file names in file(...)
operator,
there it is done automatically)THISFILE
: path to the current LPSON file, may be useful to refer random items relative to the file location (e. g. "${vars.THISFILE}/../item_in_the_same_dir.png" $
){ "key1": value1, "key2": value2, ... }
, but has some additional features:A-Z
, a-z
, 0-9
(except for as first character) and _
like in JS, but also +
, -
, *
, /
(except for
//
-s and /*
-s that are treated as comment start), $
, and =
. Example:{
"jsonStyleKey": 0,
LP-style-key: 1,
/this-is+allowed=too*$: 2,
jsonStyleKey: 3 // it is the same as "jsonStyleKey"
}
{
a: 1,
...({ b: 2, c: 3 }),
d: 4
}
{
(vars.KEY_NAME): "value"
}
"@type"
key. Example:"string" { value: "123" }
{ "@type": "string", value: "123" }
{ class: "even another dictionary" } { value: "typed value" }
{ "@type": { class: "even another dictionary" }, value: "typed value" }
(expression) { value: "typed value" }
{ "@type": (expression), value: "typed value" }
{
is: "legit",
legit: "too",
}
[ value1, value2, ... ]
, but has some additional features:[
1,
...([2, 3]),
4
}
[
"is",
"legit",
]
{ a: "value" }.a
{ a: "value" }."a"
{ a: "value" }.("a")
// all these expressions resolve to "value"
(= expression)
suffix after the key or key expression:{ a: "value" }.a (= "default value") // resolved to "value"
{ b: "value" }.a (= "default value") // resolved to "default value", as .a is not defined
null
or any false value, the field is defined to have that value.${varName}
- substitute value of context variable varName
, given it resolves to string, number or boolean\$
(\\$
in double-quoted string) - literal $
"Program version ${version}, and this is not a \\${placeholder}" $
// if vars.version is "1.0.0", then it resolves to same as "Program version 1.0.0, and this is not a ${placeholder}"
(= dictionary-expression)
prefix:// vars.a is "value-of-a", vars.b is not defined
"a is ${a}, b is ${b}" $(= { a: 1, b: 2, c: 3 })
// same as "a is value-of-a, b is 2"
<valid-LP-identifier expression-parameter>
.<+vars { a: 10 }>
{
innerA: "${a}",
innerA1: <+vars { a: 11 }> "${a}",
<+vars { a: 12 }>
...{ innerA3: vars.a, innerA4: "${a}" }
}
+vars
annotations in a row, they all are applied in their order, later ones override earlier ones:<+vars { a: 1 }>
<+vars { a: 2, b: 3 }>
"${a} ${b}" $ // "2 3"
<trace vars.a>
{
innerA: "${a}"
}
+vars
annotations, the variables used are ones in effect after the last +vars
before the
trace
:<+vars { a: 10 }>
<trace vars.a> // 10
<+vars { a: 20 }>
vars.a // 20
WHITESPACE ::= \s+
CHARSET_COMMENT ::= //[^\S\r\n]*#charset[^\S\r\n]+([-A-Za-z0-9_]*).*
COMMENT ::= //.*
MULTILINE_COMMENT ::= /\*[\S\s].*?\*/
// whitespaces and comments are dropped from the token string
// '#charset' token is case sensitive, but the charset name itself isn't, and hyphens are ignored - that is,
// you can use #charset utf8, #charset UTF-8, or #charset Utf8-, etc.
SINGLE_QUOTE_STRING ::= '(?:\\.|[^'])*'
DOUBLE_QUOTE_STRING ::= "(?:\\.|[^"])*"
// similarly to plain JSON, quoted strings can not span multiple lines
FENCED_STRING ::= (?<fence>`+)(?:\s*?\n)?([\S\s]*?)(?:(?<=\n)[^\S\r\n]*)?\k<fence>
// fenced string is delimited by runs of backticks of same length (which can start from one backtick), and
// can span multiple lines
// all characters between the fences are taken verbatim, including spaces and newlines, except for trailing
// whitespaces on line with opening fence and/or leading whitespaces on line with closing fence, if the fences
// are, resp., last/first non-whitespace chars on their lines - such whitespace runs, including line feeds, are dropped.
NUMBER ::= [-+]?(?:\d+(?:\.\d+)?(?:[eE]\d+)?|0[Xx][0-9A-Fa-f]+)
// unlike in plain JSON, LPSON allows leading + and hexadecimal integer numbers
PUNCTUATION ::= \.\.\.|[\(\)\[\]\{\}<>.,:]
// the recognized LPSON punctuators are: '[' ']' '{' '}' '(' ')' '<' '>' ',' ':' '.' '...'
IDENTIFIER ::= (?![0-9])(?:[-+A-Za-z_$*=]|\/(?!\/))+
// in addition to digits, letters and underscore, LPSON allows identifiers to contain $, -, +, *, / (except for
// two consecutive /'s, which re treated as a comment start), and =
// the identifier must not start from digit, or from plus or minus followed by digit
ANNOTATION ::= '<' NOISE '>'
SPREAD ::= '...' NOISE
SUBEXPR ::= '(' (NOISE ',')* NOISE? ')'
LIST.ITEM ::= ANNOTATION* (SPREAD | NOISE)
LIST ::= '[' (LIST.ITEM ',')* LIST.ITEM? ']'
KEY_VALUE ::= NOISE ':' NOISE
DICTIONARY.ITEM ::= ANNOTATION* (SPREAD | KEY_VALUE)
DICTIONARY ::= '{' (DICTIONARY.ITEM ',')* DICTIONARY.ITEM? '}'
ATOMIC_VALUE ::= NUMBER | STRING | IDENTIFIER
NOISE.ITEM ::= SUBEXPR | LIST | DICTIONARY | ATOMIC_VALUE | '.'
NOISE ::= NOISE.ITEM+
// NOISE is basically some expression that resolves to a JSON value, but its further structure is out of scope at L1
LPSON_FILE ::= ANNOTATION* NOISE
// the LPSON file contains exactly one, optionally annotated, NOISE symbol
const { loadLpsonFile } = require('logipard/lpson');
...
var [parsedObject, errors] = await loadLpsonFile('path-to-file.lpson', { varA: "A" }); // the 2nd parameter is dictionary of vars
if (errors.length > 0) {
console.log("There were parse errors:");
console.dir(errors);
} else {
console.log("Object parsed successfully, backward JSON serialization:", JSON.stringify(parsedObject));
}
THISFILE
is always overridden by parser.