Compare commits
No commits in common. "master" and "sj_latl" have entirely different histories.
12 changed files with 766 additions and 1275 deletions
21
LICENSE
21
LICENSE
|
@ -1,21 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2021 Sorrel
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
|
@ -14,12 +14,6 @@ Features:
|
||||||
- multi-character phone support
|
- multi-character phone support
|
||||||
- comparative runs for multiple rule sets
|
- comparative runs for multiple rule sets
|
||||||
|
|
||||||
## What is LATL?
|
|
||||||
|
|
||||||
[Read the specification](/src/utils/latl/README.md)
|
|
||||||
|
|
||||||
LATL is a JavaScript targeting compiled language for doing linguistic analysis and transformations.
|
|
||||||
|
|
||||||
## How do I use FCA?
|
## How do I use FCA?
|
||||||
|
|
||||||
An FCA run requires the user to define three parameters:
|
An FCA run requires the user to define three parameters:
|
||||||
|
|
|
@ -29,16 +29,16 @@
|
||||||
|
|
||||||
; -------- distinctive groups
|
; -------- distinctive groups
|
||||||
|
|
||||||
set PLOSIVES = [ p, pʰ, t, tʼ, tʰ, ɾ, kʼ, k, kʰ ]
|
set PLOSIVES [ p, pʰ, t, tʼ, tʰ, ɾ, kʼ, k, kʰ ]
|
||||||
AFFRICATES = [ tʃʰ, dʒ ]
|
AFFRICATES [ tʃʰ, dʒ ]
|
||||||
FRICATIVES = [ f, v, θ, ð, s, z, ʃ, ʒ, ç, x ]
|
FRICATIVES [ f, v, θ, ð, s, z, ʃ, ʒ, ç, x ]
|
||||||
NASALS = [ m, ɱ, n, ŋ ]
|
NASALS [ m, ɱ, n, ŋ ]
|
||||||
LIQUIDS = [ l, ɹ, ɹʲ, ɹˤ ]
|
LIQUIDS [ l, ɹ, ɹʲ, ɹˤ ]
|
||||||
SYLLABICS = [ m̩, n̩, l̩, ɹ̩ ]
|
SYLLABICS [ m̩, n̩, l̩, ɹ̩ ]
|
||||||
VOWELS = [ æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟ ]
|
VOWELS [ æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟ ]
|
||||||
GLIDES = [ j, w ]
|
GLIDES [ j, w ]
|
||||||
LARYNGEALS = [ h, ɦ, ʔ ]
|
LARYNGEALS [ h, ɦ, ʔ ]
|
||||||
VOWELS = [ æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟ ]
|
VOWELS [ æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟ ]
|
||||||
|
|
||||||
; ---- implicit
|
; ---- implicit
|
||||||
; GLOBAL { all sets }
|
; GLOBAL { all sets }
|
||||||
|
@ -48,8 +48,6 @@ set PLOSIVES = [ p, pʰ, t, tʼ, tʰ, ɾ, kʼ, k, kʰ ]
|
||||||
; { SET_A and SET_B } inner join
|
; { SET_A and SET_B } inner join
|
||||||
; { SET_A or SET_B } full outer join
|
; { SET_A or SET_B } full outer join
|
||||||
; { not SET_A } = { GLOBAL not SET_A }
|
; { not SET_A } = { GLOBAL not SET_A }
|
||||||
|
|
||||||
; ---- unnecessary sugar
|
|
||||||
; { not SET_A nor SET_B } = { GLOBAL not { SET_A or SET_B } }
|
; { not SET_A nor SET_B } = { GLOBAL not { SET_A or SET_B } }
|
||||||
|
|
||||||
; ---- set character operations - non-mutable!
|
; ---- set character operations - non-mutable!
|
||||||
|
@ -64,17 +62,11 @@ set PLOSIVES = [ p, pʰ, t, tʼ, tʰ, ɾ, kʼ, k, kʰ ]
|
||||||
|
|
||||||
; ---- TENTATIVE!
|
; ---- TENTATIVE!
|
||||||
; ---- set feature operations - non-mutable!
|
; ---- set feature operations - non-mutable!
|
||||||
; { [ + feature1 - feature2 ] in SET_A } FILTER: where feature1 and feature2 are filtering features
|
; { [ X + feature1 - feature2 ] in SET_A } FILTER: where X is any character and feature1 and feature2 are filtering features
|
||||||
; { SET_A yield [ X + feature1 ] } TRANSFORMATION: performs transformation with (prepended or) appended character
|
; { SET_A yield [ X + feature1 ] } TRANSFORMATION: performs transformation with (prepended or) appended character
|
||||||
; { SET_A yield [ X - feature1 ] }
|
; { SET_A yield [ X - feature1 ] }
|
||||||
; { SET_A yield [ X - feature1 + feature2 ] }
|
; { SET_A yield [ X - feature1 + feature2 ] }
|
||||||
; { [ X + feature1 - feature2 ] in SET_A yield [ - feature1 + feature2 ] } combined FILTER and TRANSFORMATION
|
; { [ X + feature1 - feature2 ] in SET_A yield [ - feature1 + feature2 ] } combined FILTER and TRANSFROMATION
|
||||||
|
|
||||||
; ---- MAPPING
|
|
||||||
set PLOSIVES = [ p, t, k ],
|
|
||||||
FRICATIVES = [ f, s, x ],
|
|
||||||
; pairs PLOSIVES with FRICATIVES that have matching features = [ pf, ts, kx ]
|
|
||||||
AFFRICATES = { PLOSIVES yield [ X concat { [ [ X ] - fricative ] in FRICATIVES } ] }
|
|
||||||
|
|
||||||
; ---- example with join, character, and feature operations
|
; ---- example with join, character, and feature operations
|
||||||
; set SET_C = { [ PHONE +feature1 ] in { SET_A or SET_B } yield [ PHONE concat y ] }
|
; set SET_C = { [ PHONE +feature1 ] in { SET_A or SET_B } yield [ PHONE concat y ] }
|
||||||
|
|
0
src/utils/grammar.js
Normal file
0
src/utils/grammar.js
Normal file
|
@ -34,23 +34,6 @@ Sets are collections of pointers to phones. The GLOBAL set contains all phones,
|
||||||
#### Global Set
|
#### Global Set
|
||||||
[ GLOBAL ] is a shorthand for [ GLOBAL.SETS ]
|
[ GLOBAL ] is a shorthand for [ GLOBAL.SETS ]
|
||||||
#### Set Definition
|
#### Set Definition
|
||||||
Sets are defined with the set keyword followed by an equal sign and a set expression:
|
|
||||||
```
|
|
||||||
set SHORT_VOWELS = [ a, i, u ]
|
|
||||||
```
|
|
||||||
|
|
||||||
A single alias can be provided to the set during definition:
|
|
||||||
```
|
|
||||||
; the alias N can be used to refer to this set
|
|
||||||
set NASAL_PULMONIC_CONSONANTS, N = [ m, ɱ, n̼, n, ɳ, ɲ, ŋ, ɴ ]
|
|
||||||
```
|
|
||||||
|
|
||||||
Lists of sets can be defined using a comma followed by whitespace syntax
|
|
||||||
```
|
|
||||||
set PLOSIVES = [ p, t, k ],
|
|
||||||
FRICATIVES = [ f, s, x ],
|
|
||||||
LABIALIZED_PLOSIVES = { PLOSIVES yield [ X concat ʷ ] }
|
|
||||||
```
|
|
||||||
#### Set Usage
|
#### Set Usage
|
||||||
#### Set Operations
|
#### Set Operations
|
||||||
##### 'and' Operation
|
##### 'and' Operation
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
import { parser } from './parser';
|
|
||||||
|
|
||||||
export const codeGenerator = (latl) => {
|
|
||||||
const results = parser().feed(latl).results;
|
|
||||||
|
|
||||||
const nodeReader = (code, node) => {
|
|
||||||
if (node.length) {
|
|
||||||
return results.reduce(nodeReader, code)
|
|
||||||
}
|
|
||||||
if (!node) return code;
|
|
||||||
if (node.main) {
|
|
||||||
return nodeReader(code, node.main)
|
|
||||||
}
|
|
||||||
return code + node;
|
|
||||||
}
|
|
||||||
|
|
||||||
return nodeReader('', results)
|
|
||||||
|
|
||||||
}
|
|
|
@ -5,110 +5,48 @@ function id(x) { return x[0]; }
|
||||||
|
|
||||||
const { lexer } = require('./lexer.js');
|
const { lexer } = require('./lexer.js');
|
||||||
const getTerminal = d => d ? d[0] : null;
|
const getTerminal = d => d ? d[0] : null;
|
||||||
const getAll = d => d.map((item, i) => ({ [i]: item }));
|
const getAll = d => d.map((item, i) => ({[i]: item}));
|
||||||
const flag = token => d => d.map(item => ({ [token]: item }))
|
const flag = token => d => d.map(item => ({[token]: item}))
|
||||||
const clearNull = d => d.filter(t => !!t && (t.length !== 1 || t[0])).map(t => t.length ? clearNull(t) : t);
|
const clearNull = d => d.filter(t => !!t);
|
||||||
const flagIndex = d => d.map((item, i) => ({[i]: item}))
|
const flagIndex = d => d.map((item, i) => ({[i]: item}))
|
||||||
const remove = _ => null;
|
const remove = _ => null;
|
||||||
const append = d => d.join('');
|
const append = d => d.join('');
|
||||||
const constructSet = d => d.reduce((acc, t) => {
|
const constructSet = d => d.reduce((acc, t) => {
|
||||||
if (t && t.type === 'setIdentifier') acc.push({set: t});
|
if (t && t.type === 'setIdentifier') acc.push({set: t})
|
||||||
if (t && t.length) acc[acc.length - 1].phones = t;
|
if (t && t.length) acc[acc.length - 1].phones = t;
|
||||||
return acc;
|
return acc;
|
||||||
}, []);
|
}, []);
|
||||||
const pipe = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d);
|
const compose = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d)
|
||||||
const objFromArr = d => d.reduce((obj, item) => ({ ...obj, ...item }), {});
|
|
||||||
var grammar = {
|
var grammar = {
|
||||||
Lexer: lexer,
|
Lexer: lexer,
|
||||||
ParserRules: [
|
ParserRules: [
|
||||||
{"name": "main$ebnf$1", "symbols": []},
|
{"name": "main$ebnf$1", "symbols": []},
|
||||||
{"name": "main$ebnf$1$subexpression$1", "symbols": ["_", "statement"]},
|
{"name": "main$ebnf$1$subexpression$1", "symbols": ["statement"]},
|
||||||
{"name": "main$ebnf$1", "symbols": ["main$ebnf$1", "main$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
|
{"name": "main$ebnf$1", "symbols": ["main$ebnf$1", "main$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
|
||||||
{"name": "main", "symbols": ["main$ebnf$1", "_"], "postprocess": pipe(
|
{"name": "main", "symbols": ["main$ebnf$1"], "postprocess": compose(flag('main'), getTerminal)},
|
||||||
clearNull,
|
|
||||||
// recursive call to fix repeat?
|
|
||||||
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
|
|
||||||
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
|
|
||||||
flag('main'),
|
|
||||||
getTerminal,
|
|
||||||
) },
|
|
||||||
{"name": "_$ebnf$1$subexpression$1", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)]},
|
{"name": "_$ebnf$1$subexpression$1", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)]},
|
||||||
{"name": "_$ebnf$1", "symbols": ["_$ebnf$1$subexpression$1"], "postprocess": id},
|
{"name": "_$ebnf$1", "symbols": ["_$ebnf$1$subexpression$1"], "postprocess": id},
|
||||||
{"name": "_$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
|
{"name": "_$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
|
||||||
{"name": "_", "symbols": ["_$ebnf$1"], "postprocess": remove},
|
{"name": "_", "symbols": ["_$ebnf$1"], "postprocess": remove},
|
||||||
{"name": "__", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)], "postprocess": remove},
|
{"name": "__", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)], "postprocess": remove},
|
||||||
{"name": "equal", "symbols": [(lexer.has("equal") ? {type: "equal"} : equal)], "postprocess": remove},
|
|
||||||
{"name": "statement", "symbols": ["comment"]},
|
{"name": "statement", "symbols": ["comment"]},
|
||||||
{"name": "statement", "symbols": ["definition"], "postprocess": pipe(
|
{"name": "statement", "symbols": ["definition"], "postprocess": compose(clearNull, getTerminal)},
|
||||||
d => d.flatMap(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
|
{"name": "comment", "symbols": [(lexer.has("comment") ? {type: "comment"} : comment)], "postprocess": compose(remove, getTerminal)},
|
||||||
// recursive call to fit repeat?
|
{"name": "definition", "symbols": [(lexer.has("kwSet") ? {type: "kwSet"} : kwSet), "__", "setDefinition"], "postprocess": d => ({token: 'setDefinition', sets: d[2]})},
|
||||||
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
|
{"name": "setDefinition$ebnf$1", "symbols": []},
|
||||||
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
|
{"name": "setDefinition$ebnf$1$subexpression$1", "symbols": [(lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "__", (lexer.has("equal") ? {type: "equal"} : equal), "__", "setExpression", (lexer.has("comma") ? {type: "comma"} : comma), "__"]},
|
||||||
// may split from other definition statements
|
{"name": "setDefinition$ebnf$1", "symbols": ["setDefinition$ebnf$1", "setDefinition$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
|
||||||
d => d.map(t => t && t.length > 1 ? ({ type: 'set', ...objFromArr(t) }) : null)
|
{"name": "setDefinition", "symbols": ["setDefinition$ebnf$1", (lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "__", (lexer.has("equal") ? {type: "equal"} : equal), "__", "setExpression"], "postprocess": constructSet},
|
||||||
) },
|
{"name": "setExpression", "symbols": [(lexer.has("openSquareBracket") ? {type: "openSquareBracket"} : openSquareBracket), "_", "phoneList", "_", (lexer.has("closeSquareBracket") ? {type: "closeSquareBracket"} : closeSquareBracket)], "postprocess": d => d.filter(t => t && t.length)},
|
||||||
{"name": "comment", "symbols": [(lexer.has("comment") ? {type: "comment"} : comment)], "postprocess": pipe(getTerminal, remove)},
|
|
||||||
{"name": "definition$ebnf$1", "symbols": []},
|
|
||||||
{"name": "definition$ebnf$1$subexpression$1", "symbols": ["setDefinition", (lexer.has("comma") ? {type: "comma"} : comma), "__"]},
|
|
||||||
{"name": "definition$ebnf$1", "symbols": ["definition$ebnf$1", "definition$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
|
|
||||||
{"name": "definition", "symbols": [(lexer.has("kwSet") ? {type: "kwSet"} : kwSet), "__", "definition$ebnf$1", "setDefinition"], "postprocess": pipe(
|
|
||||||
// not yet sure why this call is required twice
|
|
||||||
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
|
|
||||||
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
|
|
||||||
d => d.map(u => u && u.length ? u.map(v => v.length ? v.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet')[0] : v) : u),
|
|
||||||
clearNull,
|
|
||||||
) },
|
|
||||||
{"name": "setDefinition$ebnf$1$subexpression$1", "symbols": ["setAlias"]},
|
|
||||||
{"name": "setDefinition$ebnf$1", "symbols": ["setDefinition$ebnf$1$subexpression$1"], "postprocess": id},
|
|
||||||
{"name": "setDefinition$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
|
|
||||||
{"name": "setDefinition", "symbols": [(lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "setDefinition$ebnf$1", "__", "equal", "__", "setExpression"], "postprocess":
|
|
||||||
pipe(
|
|
||||||
d => d.filter(t => !!t && t.length !== 0),
|
|
||||||
d => d.map(u => u && u.length ? u.map(t => t && t.length ? t.filter(v => v && v.type !== 'comma') : t) : u),
|
|
||||||
d => d.map(t => t.type === 'setIdentifier' ? { setIdentifier: t.toString() } : t),
|
|
||||||
d => d.map(t => t && t.length && t[0].hasOwnProperty('setExpression') ? t[0] : t),
|
|
||||||
d => d.map(t => t.length ?
|
|
||||||
// pretty ugly ([ { type: 'aias', alias: [ string ] }] ) => { setAlias: str }
|
|
||||||
{ setAlias: t.reduce((aliases, token) => token && token.type === 'alias' ? [...aliases, ...token.alias] : aliases, [])[0] }
|
|
||||||
: t),
|
|
||||||
)
|
|
||||||
},
|
|
||||||
{"name": "setExpression", "symbols": [(lexer.has("openSquareBracket") ? {type: "openSquareBracket"} : openSquareBracket), "_", "phoneList", "_", (lexer.has("closeSquareBracket") ? {type: "closeSquareBracket"} : closeSquareBracket)]},
|
|
||||||
{"name": "setExpression$ebnf$1$subexpression$1", "symbols": ["setOperation"]},
|
|
||||||
{"name": "setExpression$ebnf$1", "symbols": ["setExpression$ebnf$1$subexpression$1"], "postprocess": id},
|
|
||||||
{"name": "setExpression$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
|
|
||||||
{"name": "setExpression", "symbols": [(lexer.has("openCurlyBracket") ? {type: "openCurlyBracket"} : openCurlyBracket), "_", "setExpression$ebnf$1", "_", (lexer.has("closeCurlyBracket") ? {type: "closeCurlyBracket"} : closeCurlyBracket)], "postprocess":
|
|
||||||
pipe(
|
|
||||||
// filters commas and whitespace
|
|
||||||
d => d.filter(t => t && t.length),
|
|
||||||
d => d.map(t => t.map(u => u[0])),
|
|
||||||
flag('setExpression')
|
|
||||||
) },
|
|
||||||
{"name": "setAlias", "symbols": [(lexer.has("comma") ? {type: "comma"} : comma), "_", (lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier)], "postprocess": pipe(
|
|
||||||
d => d && d.length ? d.filter(t => !!t) : d,
|
|
||||||
d => d.map(t => t.type === 'setIdentifier' ? t.toString() : null),
|
|
||||||
d => d.filter(t => !!t),
|
|
||||||
d => ({type: 'alias', alias: d }),
|
|
||||||
) },
|
|
||||||
{"name": "phoneList$ebnf$1", "symbols": []},
|
{"name": "phoneList$ebnf$1", "symbols": []},
|
||||||
{"name": "phoneList$ebnf$1$subexpression$1$ebnf$1", "symbols": []},
|
{"name": "phoneList$ebnf$1$subexpression$1", "symbols": [(lexer.has("phone") ? {type: "phone"} : phone), (lexer.has("comma") ? {type: "comma"} : comma), "_"]},
|
||||||
{"name": "phoneList$ebnf$1$subexpression$1$ebnf$1$subexpression$1", "symbols": [(lexer.has("comma") ? {type: "comma"} : comma), "_"]},
|
|
||||||
{"name": "phoneList$ebnf$1$subexpression$1$ebnf$1", "symbols": ["phoneList$ebnf$1$subexpression$1$ebnf$1", "phoneList$ebnf$1$subexpression$1$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
|
|
||||||
{"name": "phoneList$ebnf$1$subexpression$1", "symbols": [(lexer.has("phone") ? {type: "phone"} : phone), "phoneList$ebnf$1$subexpression$1$ebnf$1"]},
|
|
||||||
{"name": "phoneList$ebnf$1", "symbols": ["phoneList$ebnf$1", "phoneList$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
|
{"name": "phoneList$ebnf$1", "symbols": ["phoneList$ebnf$1", "phoneList$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
|
||||||
{"name": "phoneList", "symbols": ["phoneList$ebnf$1"], "postprocess":
|
{"name": "phoneList", "symbols": ["phoneList$ebnf$1", (lexer.has("phone") ? {type: "phone"} : phone)], "postprocess": d => d.filter(t => t && (t.type === 'phone' || t.length) )
|
||||||
pipe(
|
.map(t => {
|
||||||
d => d ? d[0].map(t => t.filter(u => u.type === 'phone').map(u => u.toString())) : d
|
if (!t.length) return t;
|
||||||
)
|
t.filter(st => st && st.type === 'phone')
|
||||||
},
|
return t;
|
||||||
{"name": "setOperation", "symbols": ["orOperation"]},
|
}) }
|
||||||
{"name": "setOperation", "symbols": [(lexer.has("identifier") ? {type: "identifier"} : identifier)], "postprocess": pipe(
|
|
||||||
d => d.type ? d : ({ identifier: d.toString(), type: 'identifier' })
|
|
||||||
)},
|
|
||||||
{"name": "orOperation", "symbols": ["_", "setOperation", "__", (lexer.has("kwSetOr") ? {type: "kwSetOr"} : kwSetOr), "__", "setOperation", "_"], "postprocess": pipe(
|
|
||||||
d => d.filter(d => !!d),
|
|
||||||
d => ({ type: 'operator', operator: 'or', operands: [ d[0], d[2] ] }),
|
|
||||||
) }
|
|
||||||
]
|
]
|
||||||
, ParserStart: "main"
|
, ParserStart: "main"
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,32 +1,24 @@
|
||||||
@{%
|
@{%
|
||||||
const { lexer } = require('./lexer.js');
|
const { lexer } = require('./lexer.js');
|
||||||
const getTerminal = d => d ? d[0] : null;
|
const getTerminal = d => d ? d[0] : null;
|
||||||
const getAll = d => d.map((item, i) => ({ [i]: item }));
|
const getAll = d => d.map((item, i) => ({[i]: item}));
|
||||||
const flag = token => d => d.map(item => ({ [token]: item }))
|
const flag = token => d => d.map(item => ({[token]: item}))
|
||||||
const clearNull = d => d.filter(t => !!t && (t.length !== 1 || t[0])).map(t => t.length ? clearNull(t) : t);
|
const clearNull = d => d.filter(t => !!t);
|
||||||
const flagIndex = d => d.map((item, i) => ({[i]: item}))
|
const flagIndex = d => d.map((item, i) => ({[i]: item}))
|
||||||
const remove = _ => null;
|
const remove = _ => null;
|
||||||
const append = d => d.join('');
|
const append = d => d.join('');
|
||||||
const constructSet = d => d.reduce((acc, t) => {
|
const constructSet = d => d.reduce((acc, t) => {
|
||||||
if (t && t.type === 'setIdentifier') acc.push({set: t});
|
if (t && t.type === 'setIdentifier') acc.push({set: t})
|
||||||
if (t && t.length) acc[acc.length - 1].phones = t;
|
if (t && t.length) acc[acc.length - 1].phones = t;
|
||||||
return acc;
|
return acc;
|
||||||
}, []);
|
}, []);
|
||||||
const pipe = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d);
|
const compose = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d)
|
||||||
const objFromArr = d => d.reduce((obj, item) => ({ ...obj, ...item }), {});
|
|
||||||
%}
|
%}
|
||||||
|
|
||||||
@lexer lexer
|
@lexer lexer
|
||||||
|
|
||||||
main -> (_ statement):* _
|
main -> (statement):*
|
||||||
{% pipe(
|
{% compose(flag('main'), getTerminal) %}
|
||||||
clearNull,
|
|
||||||
// recursive call to fix repeat?
|
|
||||||
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
|
|
||||||
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
|
|
||||||
flag('main'),
|
|
||||||
getTerminal,
|
|
||||||
) %}
|
|
||||||
|
|
||||||
_ -> (%whiteSpace):?
|
_ -> (%whiteSpace):?
|
||||||
{% remove %}
|
{% remove %}
|
||||||
|
@ -34,76 +26,54 @@ _ -> (%whiteSpace):?
|
||||||
__ -> %whiteSpace
|
__ -> %whiteSpace
|
||||||
{% remove %}
|
{% remove %}
|
||||||
|
|
||||||
equal -> %equal
|
statement -> comment | definition
|
||||||
{% remove %}
|
{% compose(clearNull, getTerminal) %}
|
||||||
|
|
||||||
statement -> comment | definition
|
|
||||||
{% pipe(
|
|
||||||
d => d.flatMap(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
|
|
||||||
// recursive call to fit repeat?
|
|
||||||
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
|
|
||||||
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
|
|
||||||
// may split from other definition statements
|
|
||||||
d => d.map(t => t && t.length > 1 ? ({ type: 'set', ...objFromArr(t) }) : null)
|
|
||||||
) %}
|
|
||||||
|
|
||||||
comment -> %comment
|
comment -> %comment
|
||||||
{% pipe(getTerminal, remove) %}
|
{% compose(remove, getTerminal) %}
|
||||||
|
|
||||||
# SETS
|
# SETS
|
||||||
definition -> %kwSet __ (setDefinition %comma __):* setDefinition
|
definition -> %kwSet __ setDefinition {% d => ({token: 'setDefinition', sets: d[2]}) %}
|
||||||
{% pipe(
|
setDefinition -> (%setIdentifier __ %equal __ setExpression %comma __):* %setIdentifier __ %equal __ setExpression
|
||||||
// not yet sure why this call is required twice
|
{% constructSet %}
|
||||||
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
|
|
||||||
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
|
|
||||||
d => d.map(u => u && u.length ? u.map(v => v.length ? v.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet')[0] : v) : u),
|
|
||||||
clearNull,
|
|
||||||
) %}
|
|
||||||
setDefinition -> %setIdentifier (setAlias):? __ equal __ setExpression
|
|
||||||
{%
|
|
||||||
pipe(
|
|
||||||
d => d.filter(t => !!t && t.length !== 0),
|
|
||||||
d => d.map(u => u && u.length ? u.map(t => t && t.length ? t.filter(v => v && v.type !== 'comma') : t) : u),
|
|
||||||
d => d.map(t => t.type === 'setIdentifier' ? { setIdentifier: t.toString() } : t),
|
|
||||||
d => d.map(t => t && t.length && t[0].hasOwnProperty('setExpression') ? t[0] : t),
|
|
||||||
d => d.map(t => t.length ?
|
|
||||||
// pretty ugly ([ { type: 'aias', alias: [ string ] }] ) => { setAlias: str }
|
|
||||||
{ setAlias: t.reduce((aliases, token) => token && token.type === 'alias' ? [...aliases, ...token.alias] : aliases, [])[0] }
|
|
||||||
: t),
|
|
||||||
)
|
|
||||||
%}
|
|
||||||
setExpression -> %openSquareBracket _ phoneList _ %closeSquareBracket
|
setExpression -> %openSquareBracket _ phoneList _ %closeSquareBracket
|
||||||
| %openCurlyBracket _ (setOperation):? _ %closeCurlyBracket
|
{% d => d.filter(t => t && t.length) %}
|
||||||
{%
|
phoneList -> (%phone %comma _):* %phone
|
||||||
pipe(
|
{% d => d.filter(t => t && (t.type === 'phone' || t.length) )
|
||||||
// filters commas and whitespace
|
.map(t => {
|
||||||
d => d.filter(t => t && t.length),
|
if (!t.length) return t;
|
||||||
d => d.map(t => t.map(u => u[0])),
|
t.filter(st => st && st.type === 'phone')
|
||||||
flag('setExpression')
|
return t;
|
||||||
) %}
|
}) %}
|
||||||
|
|
||||||
setAlias -> %comma _ %setIdentifier
|
|
||||||
{% pipe(
|
|
||||||
d => d && d.length ? d.filter(t => !!t) : d,
|
|
||||||
d => d.map(t => t.type === 'setIdentifier' ? t.toString() : null),
|
|
||||||
d => d.filter(t => !!t),
|
|
||||||
d => ({type: 'alias', alias: d }),
|
|
||||||
) %}
|
|
||||||
|
|
||||||
phoneList -> (%phone (%comma _):* ):*
|
# assignmentExpression:
|
||||||
{%
|
# /*
|
||||||
pipe(
|
# * SPEC:
|
||||||
d => d ? d[0].map(t => t.filter(u => u.type === 'phone').map(u => u.toString())) : d
|
# * conditionalExpression
|
||||||
)
|
# * | leftHandSideExpression assignmentOperator assignmentExpression
|
||||||
%}
|
# */
|
||||||
setOperation -> orOperation
|
# (leftHandSideExpression assignmentOperator) =>
|
||||||
| %identifier
|
# leftHandSideExpression assignmentOperator assignmentExpression
|
||||||
{% pipe(
|
# | conditionalExpression
|
||||||
d => d.type ? d : ({ identifier: d.toString(), type: 'identifier' })
|
# ;
|
||||||
)%}
|
|
||||||
|
|
||||||
orOperation -> _ setOperation __ %kwSetOr __ setOperation _
|
# assignmentExpressionNoln:
|
||||||
{% pipe(
|
# conditionalExpressionNoln
|
||||||
d => d.filter(d => !!d),
|
# | leftHandSideExpression assignmentOperator assignmentExpressionNoln
|
||||||
d => ({ type: 'operator', operator: 'or', operands: [ d[0], d[2] ] }),
|
# ;
|
||||||
) %}
|
|
||||||
|
# assignmentOperator:
|
||||||
|
# /* note that in the grammar these are listed out explicitely */
|
||||||
|
# EQ | TIMESEQ | DIVIDEEQ | PERCENTEQ | PLUSEQ | MINUSEQ | LSHIFTEQ | RSHIFTEQ
|
||||||
|
# | GT3EQ | AMPEREQ | CAROTEQ | PIPEEQ
|
||||||
|
# ;
|
||||||
|
|
||||||
|
# expression:
|
||||||
|
# /*
|
||||||
|
# * SPEC:
|
||||||
|
# * assignmentExpression
|
||||||
|
# * | expression COMMA assignmentExpression
|
||||||
|
# */
|
||||||
|
# assignmentExpression (expressionTail)*
|
||||||
|
# ;
|
|
@ -1,124 +1,117 @@
|
||||||
const moo = require("moo");
|
const moo = require('moo');
|
||||||
|
|
||||||
const lexer = moo.states({
|
const lexer = moo.states({
|
||||||
main: {
|
main: {
|
||||||
comment: /;.*$/,
|
comment: /;.*$/,
|
||||||
star: { match: /\*/, push: "epoch" },
|
star: { match: /\*/, push: 'epoch' },
|
||||||
slash: { match: /\//, push: "lexicon" },
|
slash: { match: /\//, push: 'lexicon' },
|
||||||
// change so that identifiers are always upper, keywords are always lower, phones are always lower
|
// change so that identifiers are always upper, keywords are always lower, phones are always lower
|
||||||
kwSet: {
|
'kwSet': { match: 'set', type: moo.keywords({ 'kwSet': 'set '}), push: 'setDefinition'},
|
||||||
match: "set",
|
identifier: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/, },
|
||||||
type: moo.keywords({ kwSet: "set " }),
|
openBracket: { match: /\[/, push: 'feature' },
|
||||||
push: "setDefinition",
|
whiteSpace: { match: /\s+/, lineBreaks: true },
|
||||||
},
|
newLine: { match: /\n+/, lineBreaks: true }
|
||||||
identifier: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/ },
|
|
||||||
openBracket: { match: /\[/, push: "feature" },
|
|
||||||
whiteSpace: { match: /\s+/, lineBreaks: true },
|
|
||||||
newLine: { match: /\n+/, lineBreaks: true },
|
|
||||||
},
|
},
|
||||||
|
|
||||||
epoch: {
|
epoch: {
|
||||||
identifier: {
|
identifier: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/, push: 'rule' },
|
||||||
match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/,
|
openParen: { match: /\(/, push: 'ruleDefinition' },
|
||||||
push: "rule",
|
pipe: { match: /\|/, pop: true },
|
||||||
},
|
greaterThan: /\>/,
|
||||||
openParen: { match: /\(/, push: "ruleDefinition" },
|
arrow: /\-\>/,
|
||||||
pipe: { match: /\|/, pop: true },
|
hash: /#/,
|
||||||
greaterThan: /\>/,
|
slash: /\//,
|
||||||
arrow: /\-\>/,
|
dot: /\./,
|
||||||
hash: /#/,
|
underscore: /\_/,
|
||||||
slash: /\//,
|
newLine: { match: /\n/, lineBreaks: true }
|
||||||
dot: /\./,
|
|
||||||
underscore: /\_/,
|
|
||||||
newLine: { match: /\n/, lineBreaks: true },
|
|
||||||
},
|
},
|
||||||
|
|
||||||
ruleDefinition: {
|
ruleDefinition: {
|
||||||
doubleTick: { match: /``/, push: "ruleName" },
|
doubleTick: { match: /``/, push: 'ruleName' },
|
||||||
singleTick: { match: /`/, push: "ruleDescription" },
|
singleTick: { match: /`/, push: 'ruleDescription' },
|
||||||
// push rule
|
// push rule
|
||||||
closeParen: { match: /\)/, pop: true },
|
closeParen: { match: /\)/, pop: true },
|
||||||
newLine: { match: /\n/, lineBreaks: true },
|
newLine: { match: /\n/, lineBreaks: true }
|
||||||
},
|
},
|
||||||
|
|
||||||
ruleName: {
|
ruleName: {
|
||||||
ruleName: { match: /.+(?=``)/ },
|
ruleName: { match: /.+(?=``)/ },
|
||||||
doubleTick: { match: /``/, pop: true },
|
doubleTick: { match: /``/, pop: true }
|
||||||
},
|
},
|
||||||
|
|
||||||
ruleDescription: {
|
ruleDescription: {
|
||||||
ruleDescription: { match: /.+(?=`)/ },
|
ruleDescription: { match: /.+(?=`)/ },
|
||||||
singleTick: { match: /`/, pop: true },
|
singleTick: { match: /`/, pop: true }
|
||||||
},
|
},
|
||||||
|
|
||||||
rule: {
|
rule: {
|
||||||
openSquareBracket: { match: /\[/, push: "ruleFeature" },
|
openSquareBracket: { match: /\[/, push: 'ruleFeature' },
|
||||||
// whiteSpace: { match: /\s/ },
|
// whiteSpace: { match: /\s/ },
|
||||||
newLine: { match: /\n/, pop: true, lineBreaks: true },
|
newLine: { match: /\n/, pop: true, lineBreaks: true }
|
||||||
},
|
},
|
||||||
|
|
||||||
ruleFeature: {
|
ruleFeature: {
|
||||||
ruleFeature: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/ },
|
ruleFeature: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/ },
|
||||||
closeBracket: { match: /\]/, pop: true },
|
closeBracket: { match: /\]/, pop: true },
|
||||||
newLine: { match: /\n/, lineBreaks: true },
|
newLine: { match: /\n/, lineBreaks: true }
|
||||||
},
|
},
|
||||||
|
|
||||||
lexicon: {
|
lexicon: {
|
||||||
slash: { match: /\//, pop: true },
|
slash: { match: /\//, pop: true },
|
||||||
newLine: { match: /\n/, lineBreaks: true },
|
newLine: { match: /\n/, lineBreaks: true }
|
||||||
},
|
},
|
||||||
|
|
||||||
feature: {
|
feature: {
|
||||||
closeBracket: { match: /\]/, pop: true },
|
closeBracket: { match: /\]/, pop: true },
|
||||||
positiveAssignment: /\+=/,
|
positiveAssignment: /\+=/,
|
||||||
negativeAssignment: /\-=/,
|
negativeAssignment: /\-=/,
|
||||||
newLine: { match: /\n/, lineBreaks: true },
|
newLine: { match: /\n/, lineBreaks: true }
|
||||||
},
|
},
|
||||||
|
|
||||||
setDefinition: {
|
setDefinition: {
|
||||||
comment: /;.*$/,
|
comment: /;.*$/,
|
||||||
setIdentifier: { match: /[A-Z]+[A-Z_]*/ },
|
setIdentifier: { match: /[A-Z]+[A-Z_]*/ },
|
||||||
openCurlyBracket: { match: /\{/, push: "setOperation" },
|
openCurlyBracket: { match: /\{/, push: 'setOperation' },
|
||||||
equal: /=/,
|
equal: /=/,
|
||||||
openSquareBracket: /\[/,
|
openSquareBracket: /\[/,
|
||||||
phone: /[\u00c0-\u03FFa-z]+/,
|
phone: /[\u00c0-\u03FFa-z]+/,
|
||||||
closeSquareBracket: { match: /\]/ },
|
closeSquareBracket: { match: /\]/ },
|
||||||
comma: { match: /,/, push: "commaOperation" },
|
comma: { match: /,/, push: 'commaOperation' },
|
||||||
whiteSpace: { match: /[\t ]+/ },
|
whiteSpace: { match: /[\t ]+/ },
|
||||||
newLine: { match: /\n/, pop: true, lineBreaks: true },
|
newLine: { match: /\n/, pop: true, lineBreaks: true },
|
||||||
},
|
},
|
||||||
|
|
||||||
setOperation: {
|
setOperation: {
|
||||||
closeCurlyBracket: { match: /\}/, pop: true },
|
closeCurlyBracket: { match: /\}/, pop: true },
|
||||||
// ! restrict identifiers
|
// ! restrict identifiers
|
||||||
keyword: {
|
keyword: { match: ['not', 'and', 'or', 'nor', 'in', 'yield', 'concat', 'dissoc'], type: moo.keywords({
|
||||||
match: ["not", "and", "or", "nor", "in", "yield", "concat", "dissoc"],
|
'kw-set-not': 'not' ,
|
||||||
type: moo.keywords({
|
'kw-set-and': 'and' ,
|
||||||
kwSetNot: "not",
|
'kw-set-or': 'or' ,
|
||||||
kwSetAnd: "and",
|
'kw-set-nor': 'nor' ,
|
||||||
kwSetOr: "or",
|
'kw-set-in': 'in' ,
|
||||||
kwSetNor: "nor",
|
'kw-set-yield': 'yield' ,
|
||||||
kwSetIn: "in",
|
'kw-set-concat': 'concat',
|
||||||
kwSetYield: "yield",
|
'kw-set-dissoc': 'dissoc'
|
||||||
kwSetConcat: "concat",
|
})
|
||||||
kwSetDissoc: "dissoc",
|
|
||||||
}),
|
|
||||||
},
|
},
|
||||||
identifier: /[A-Z]+[A-Z_]+/,
|
identifier: /[A-Z]+[A-Z_]+/,
|
||||||
whiteSpace: { match: /\s+/, lineBreaks: true },
|
whiteSpace: /[\t ]+/,
|
||||||
openSquareBracket: /\[/,
|
openSquareBracket: /\]/,
|
||||||
closeSquareBracket: /\]/,
|
closeSquareBracket: /\[/,
|
||||||
identifier: /[A-Z]+[A-Z_]*/,
|
identifier: /[A-Z]+[A-Z_]*/,
|
||||||
phone: /[\u00c0-\u03FFa-z]+/,
|
phone: /[\u00c0-\u03FFa-z]+/,
|
||||||
|
newLine: { match: /\n/, lineBreaks: true }
|
||||||
},
|
},
|
||||||
|
|
||||||
commaOperation: {
|
commaOperation: {
|
||||||
// if comma is detected during a definition, the commaOperation consumes all white space and pops back to definition
|
// if comma is detected during a definition, the commaOperation consumes all white space and pops back to definition
|
||||||
// this prevents popping back to main
|
// this prevents popping back to main
|
||||||
comment: /\s*;.*$/,
|
comment: /\s*;.*$/,
|
||||||
whiteSpace: { match: /\s+/, lineBreaks: true, pop: true },
|
whiteSpace: { match: /\s+/, lineBreaks: true, pop: true },
|
||||||
newLine: { match: /\n/, lineBreaks: true, pop: true },
|
newLine: { match: /\n/, lineBreaks: true, pop: true }
|
||||||
},
|
}
|
||||||
|
|
||||||
});
|
});
|
||||||
|
|
||||||
module.exports = { lexer };
|
module.exports = {lexer};
|
File diff suppressed because it is too large
Load diff
|
@ -1,10 +0,0 @@
|
||||||
import { assertionData } from './assertionData';
|
|
||||||
import { codeGenerator } from '../codeGenerator';
|
|
||||||
|
|
||||||
describe('codeGenerator', () => {
|
|
||||||
it('parses simple comment', () => {
|
|
||||||
const { latl, code } = assertionData.simpleComment;
|
|
||||||
const generatedCode = codeGenerator(latl);
|
|
||||||
expect(generatedCode).toEqual(code);
|
|
||||||
});
|
|
||||||
})
|
|
|
@ -1,180 +1,49 @@
|
||||||
import { lexer } from "../lexer";
|
import { lexer } from '../lexer';
|
||||||
import { parser } from "../parser";
|
import { parser } from '../parser';
|
||||||
import { assertionData } from "./assertionData";
|
import { assertionData } from './assertionData';
|
||||||
|
|
||||||
describe("parser", () => {
|
describe('parser', () => {
|
||||||
it("parses simple comment", () => {
|
it('parses simple comment', () => {
|
||||||
const { latl, AST } = assertionData.simpleComment;
|
const { latl } = assertionData.simpleComment;
|
||||||
const feedResults = parser().feed(latl).results;
|
const AST = parser().feed(latl).results;
|
||||||
expect(feedResults.length).toBe(1);
|
expect(AST.length).toBe(1);
|
||||||
expect(feedResults[0]).toStrictEqual(AST);
|
console.log(AST[0])
|
||||||
});
|
// expect(AST[0]).toStrictEqual()
|
||||||
|
})
|
||||||
|
|
||||||
it("parses simple set definition", () => {
|
// it('parses multiple set definitions with comma operator', () => {
|
||||||
const { latl, AST } = assertionData.simpleSetDefinition;
|
// const { latl } = assertionData.commaSetDefinition;
|
||||||
const feedResults = parser().feed(latl).results;
|
// const AST = parser().feed(latl)
|
||||||
expect(feedResults.length).toBe(1);
|
// console.log(AST)
|
||||||
expect(feedResults[0]).toStrictEqual(AST);
|
// });
|
||||||
});
|
|
||||||
|
|
||||||
it("parses multiple set definitions with comma operator", () => {
|
// it('lexes set definition with alias', () => {
|
||||||
const { latl, AST } = assertionData.commaSetDefinition;
|
// const { latl, tokens } = assertionData.setAliasDefinition;
|
||||||
const feedResults = parser().feed(latl).results;
|
// const stream = getStream(latl);
|
||||||
expect(feedResults.length).toBe(1);
|
// expect(stream).toStrictEqual(tokens);
|
||||||
expect(feedResults[0]).toStrictEqual(AST);
|
// });
|
||||||
});
|
|
||||||
|
|
||||||
it("lexes set definition with alias", () => {
|
// it('lexes set definition with set join', () => {
|
||||||
const { latl, AST } = assertionData.setAliasDefinition;
|
// const { latl, tokens } = assertionData.setDefinitionJoin;
|
||||||
const feedResults = parser().feed(latl).results;
|
// const stream = getStream(latl);
|
||||||
expect(feedResults[0]).toStrictEqual(AST);
|
// expect(stream).toStrictEqual(tokens);
|
||||||
});
|
// });
|
||||||
|
|
||||||
it.skip("lexes set definition with set join", () => {
|
// it('lexes set definition with yield operation', () => {
|
||||||
const { latl, AST } = assertionData.setDefinitionJoin;
|
// const { latl, tokens } = assertionData.setDefinitionYield;
|
||||||
const feedResults = parser().feed(latl).results;
|
// const stream = getStream(latl);
|
||||||
expect(feedResults[0]).toStrictEqual(AST);
|
// expect(stream).toStrictEqual(tokens);
|
||||||
});
|
// });
|
||||||
|
|
||||||
it.todo(
|
// it('lexes all set join operations', () => {
|
||||||
"lexes set definition with yield operation"
|
// const { latl, tokens } = assertionData.setOperationsJoin;
|
||||||
// , () => {
|
// const stream = getStream(latl);
|
||||||
// const { latl, tokens } = assertionData.setDefinitionYield;
|
// expect(stream).toStrictEqual(tokens);
|
||||||
// const stream = getStream(latl);
|
// });
|
||||||
// expect(stream).toStrictEqual(tokens);
|
|
||||||
// }
|
|
||||||
);
|
|
||||||
|
|
||||||
it.todo(
|
// it('lexes set filter, concat, and dissoc operations', () => {
|
||||||
"lexes all set join operations"
|
// const { latl, tokens } = assertionData.setOperations;
|
||||||
// , () => {
|
// const stream = getStream(latl);
|
||||||
// const { latl, tokens } = assertionData.setOperationsJoin;
|
// expect(stream).toStrictEqual(tokens);
|
||||||
// const stream = getStream(latl);
|
// })
|
||||||
// expect(stream).toStrictEqual(tokens);
|
})
|
||||||
// }
|
|
||||||
);
|
|
||||||
|
|
||||||
it.todo(
|
|
||||||
"lexes set filter, concat, and dissoc operations"
|
|
||||||
// , () => {
|
|
||||||
// const { latl, tokens } = assertionData.setOperations;
|
|
||||||
// const stream = getStream(latl);
|
|
||||||
// expect(stream).toStrictEqual(tokens);
|
|
||||||
// }
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
// {
|
|
||||||
// "set":
|
|
||||||
// [
|
|
||||||
// [
|
|
||||||
// [
|
|
||||||
// {
|
|
||||||
// "col": 5,
|
|
||||||
// "line": 2,
|
|
||||||
// "lineBreaks": 0,
|
|
||||||
// "offset": 5,
|
|
||||||
// "text": "NASAL_PULMONIC_CONSONANTS",
|
|
||||||
// "toString": [tokenToString],
|
|
||||||
// "type": "setIdentifier",
|
|
||||||
// "value": "NASAL_PULMONIC_CONSONANTS",
|
|
||||||
// },
|
|
||||||
// null,
|
|
||||||
// {
|
|
||||||
// "col": 45,
|
|
||||||
// "line": 2,
|
|
||||||
// "lineBreaks": 0,
|
|
||||||
// "offset": 45,
|
|
||||||
// "text": "=",
|
|
||||||
// "toString": [tokenToString],
|
|
||||||
// "type": "equal",
|
|
||||||
// "value": "=",
|
|
||||||
// },
|
|
||||||
// null,
|
|
||||||
// [
|
|
||||||
// [
|
|
||||||
// {
|
|
||||||
// "col": 49,
|
|
||||||
// "line": 2,
|
|
||||||
// "lineBreaks": 0,
|
|
||||||
// "offset": 49,
|
|
||||||
// "text": "m̥",
|
|
||||||
// "toString": [tokenToString],
|
|
||||||
// "type": "phone",
|
|
||||||
// "value": "m̥",
|
|
||||||
// },
|
|
||||||
// {
|
|
||||||
// "col": 91,
|
|
||||||
// "line": 2,
|
|
||||||
// "lineBreaks": 0,
|
|
||||||
// "offset": 91,
|
|
||||||
// "text": "ɴ",
|
|
||||||
// "toString": [tokenToString],
|
|
||||||
// "type": "phone",
|
|
||||||
// "value": "ɴ",
|
|
||||||
// },
|
|
||||||
// ],
|
|
||||||
// ],
|
|
||||||
// {
|
|
||||||
// "col": 94,
|
|
||||||
// "line": 2,
|
|
||||||
// "lineBreaks": 0,
|
|
||||||
// "offset": 94,
|
|
||||||
// "text": ",",
|
|
||||||
// "toString": [tokenToString],
|
|
||||||
// "type": "comma",
|
|
||||||
// "value": ",",
|
|
||||||
// },
|
|
||||||
// null,
|
|
||||||
// ],
|
|
||||||
// ],
|
|
||||||
// - "setIdentifier": "STOP_PULMONIC_CONSONANTS",
|
|
||||||
// {
|
|
||||||
// "col": 5,
|
|
||||||
// "line": 3,
|
|
||||||
// "lineBreaks": 0,
|
|
||||||
// "offset": 100,
|
|
||||||
// "text": "STOP_PULMONIC_CONSONANTS",
|
|
||||||
// "toString": [tokenToString],
|
|
||||||
// "type": "setIdentifier",
|
|
||||||
// "value": "STOP_PULMONIC_CONSONANTS",
|
|
||||||
// },
|
|
||||||
// null,
|
|
||||||
// {
|
|
||||||
// "col": 45,
|
|
||||||
// "line": 3,
|
|
||||||
// "lineBreaks": 0,
|
|
||||||
// "offset": 140,
|
|
||||||
// "text": "=",
|
|
||||||
// "toString": [tokenToString],
|
|
||||||
// "type": "equal",
|
|
||||||
// "value": "=",
|
|
||||||
// },
|
|
||||||
// null,
|
|
||||||
// [
|
|
||||||
// [
|
|
||||||
// {
|
|
||||||
// "col": 49,
|
|
||||||
// "line": 3,
|
|
||||||
// "lineBreaks": 0,
|
|
||||||
// "offset": 144,
|
|
||||||
// "text": "p",
|
|
||||||
// "toString": [tokenToString],
|
|
||||||
// "type": "phone",
|
|
||||||
// "value": "p",
|
|
||||||
// },
|
|
||||||
// {
|
|
||||||
// "col": 104,
|
|
||||||
// "line": 3,
|
|
||||||
// "lineBreaks": 0,
|
|
||||||
// "offset": 199,
|
|
||||||
// "text": "ʔ",
|
|
||||||
// "toString": [tokenToString],
|
|
||||||
// "type": "phone",
|
|
||||||
// "value": "ʔ",
|
|
||||||
// },
|
|
||||||
// ],
|
|
||||||
// ],
|
|
||||||
// ],
|
|
||||||
// "token": "kwSet",
|
|
||||||
// }
|
|
||||||
|
|
Loading…
Reference in a new issue