Compare commits

..

12 commits

Author SHA1 Message Date
Sorrel
96bbb274df
Create LICENSE 2021-04-14 20:34:07 -04:00
Sorrel Bri
bb69a9ffa4 patch test bug incorrectly oriented square brackets 2020-05-21 19:36:43 -07:00
Sorrel Bri
dfae87e408 stub working parse of or Operation on sets; phoneList being read as setAlias 2020-05-21 19:02:18 -07:00
Sorrel Bri
c264b56c2e stub AST results for set definition with join 2020-05-18 22:37:19 -07:00
Sorrel Bri
73761e6f60 fix syntax errors in example latl file 2020-05-18 22:02:00 -07:00
Sorrel Bri
bb8c05a579 add support for set aliases 2020-05-09 22:18:07 -07:00
Sorrel Bri
9619b4a07c update latl README with set definition 2020-05-09 16:24:00 -07:00
Sorrel Bri
abfe14b410 construct AST properly for multi set definitions 2020-05-09 15:22:01 -07:00
Sorrel Bri
40aec30537 parse AST for single set definition 2020-05-08 23:32:49 -07:00
Sorrel Bri
3d4d1cd66e hack set definition postprocessors 2020-05-07 23:24:19 -07:00
Sorrel Bri
dee27b0d30 init codeGenerator in latl 2020-05-06 22:31:15 -07:00
Sorrel Bri
432630e600 add postprocessors to grammar.ne for cleaning tree of empty nodes 2020-04-14 22:03:56 -07:00
12 changed files with 1275 additions and 766 deletions

21
LICENSE Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 Sorrel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -14,6 +14,12 @@ Features:
- multi-character phone support - multi-character phone support
- comparative runs for multiple rule sets - comparative runs for multiple rule sets
## What is LATL?
[Read the specification](/src/utils/latl/README.md)
LATL is a JavaScript targeting compiled language for doing linguistic analysis and transformations.
## How do I use FCA? ## How do I use FCA?
An FCA run requires the user to define three parameters: An FCA run requires the user to define three parameters:

View file

@ -29,16 +29,16 @@
; -------- distinctive groups ; -------- distinctive groups
set PLOSIVES [ p, pʰ, t, tʼ, tʰ, ɾ, kʼ, k, kʰ ] set PLOSIVES = [ p, pʰ, t, tʼ, tʰ, ɾ, kʼ, k, kʰ ]
AFFRICATES [ tʃʰ, dʒ ] AFFRICATES = [ tʃʰ, dʒ ]
FRICATIVES [ f, v, θ, ð, s, z, ʃ, ʒ, ç, x ] FRICATIVES = [ f, v, θ, ð, s, z, ʃ, ʒ, ç, x ]
NASALS [ m, ɱ, n, ŋ ] NASALS = [ m, ɱ, n, ŋ ]
LIQUIDS [ l, ɹ, ɹʲ, ɹˤ ] LIQUIDS = [ l, ɹ, ɹʲ, ɹˤ ]
SYLLABICS [ m̩, n̩, l̩, ɹ̩ ] SYLLABICS = [ m̩, n̩, l̩, ɹ̩ ]
VOWELS [ æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟ ] VOWELS = [ æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟ ]
GLIDES [ j, w ] GLIDES = [ j, w ]
LARYNGEALS [ h, ɦ, ʔ ] LARYNGEALS = [ h, ɦ, ʔ ]
VOWELS [ æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟ ] VOWELS = [ æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟ ]
; ---- implicit ; ---- implicit
; GLOBAL { all sets } ; GLOBAL { all sets }
@ -48,6 +48,8 @@ set PLOSIVES [ p, pʰ, t, tʼ, tʰ, ɾ, kʼ, k, kʰ ]
; { SET_A and SET_B } inner join ; { SET_A and SET_B } inner join
; { SET_A or SET_B } full outer join ; { SET_A or SET_B } full outer join
; { not SET_A } = { GLOBAL not SET_A } ; { not SET_A } = { GLOBAL not SET_A }
; ---- unnecessary sugar
; { not SET_A nor SET_B } = { GLOBAL not { SET_A or SET_B } } ; { not SET_A nor SET_B } = { GLOBAL not { SET_A or SET_B } }
; ---- set character operations - non-mutable! ; ---- set character operations - non-mutable!
@ -62,11 +64,17 @@ set PLOSIVES [ p, pʰ, t, tʼ, tʰ, ɾ, kʼ, k, kʰ ]
; ---- TENTATIVE! ; ---- TENTATIVE!
; ---- set feature operations - non-mutable! ; ---- set feature operations - non-mutable!
; { [ X + feature1 - feature2 ] in SET_A } FILTER: where X is any character and feature1 and feature2 are filtering features ; { [ + feature1 - feature2 ] in SET_A } FILTER: where feature1 and feature2 are filtering features
; { SET_A yield [ X + feature1 ] } TRANSFORMATION: performs transformation with (prepended or) appended character ; { SET_A yield [ X + feature1 ] } TRANSFORMATION: performs transformation with (prepended or) appended character
; { SET_A yield [ X - feature1 ] } ; { SET_A yield [ X - feature1 ] }
; { SET_A yield [ X - feature1 + feature2 ] } ; { SET_A yield [ X - feature1 + feature2 ] }
; { [ X + feature1 - feature2 ] in SET_A yield [ - feature1 + feature2 ] } combined FILTER and TRANSFROMATION ; { [ X + feature1 - feature2 ] in SET_A yield [ - feature1 + feature2 ] } combined FILTER and TRANSFORMATION
; ---- MAPPING
set PLOSIVES = [ p, t, k ],
FRICATIVES = [ f, s, x ],
; pairs PLOSIVES with FRICATIVES that have matching features = [ pf, ts, kx ]
AFFRICATES = { PLOSIVES yield [ X concat { [ [ X ] - fricative ] in FRICATIVES } ] }
; ---- example with join, character, and feature operations ; ---- example with join, character, and feature operations
; set SET_C = { [ PHONE +feature1 ] in { SET_A or SET_B } yield [ PHONE concat y ] } ; set SET_C = { [ PHONE +feature1 ] in { SET_A or SET_B } yield [ PHONE concat y ] }

View file

View file

@ -34,6 +34,23 @@ Sets are collections of pointers to phones. The GLOBAL set contains all phones,
#### Global Set #### Global Set
[ GLOBAL ] is a shorthand for [ GLOBAL.SETS ] [ GLOBAL ] is a shorthand for [ GLOBAL.SETS ]
#### Set Definition #### Set Definition
Sets are defined with the set keyword followed by an equal sign and a set expression:
```
set SHORT_VOWELS = [ a, i, u ]
```
A single alias can be provided to the set during definition:
```
; the alias N can be used to refer to this set
set NASAL_PULMONIC_CONSONANTS, N = [ m, ɱ, n̼, n, ɳ, ɲ, ŋ, ɴ ]
```
Lists of sets can be defined using a comma followed by whitespace syntax
```
set PLOSIVES = [ p, t, k ],
FRICATIVES = [ f, s, x ],
LABIALIZED_PLOSIVES = { PLOSIVES yield [ X concat ʷ ] }
```
#### Set Usage #### Set Usage
#### Set Operations #### Set Operations
##### 'and' Operation ##### 'and' Operation

View file

@ -0,0 +1,19 @@
import { parser } from './parser';
export const codeGenerator = (latl) => {
const results = parser().feed(latl).results;
const nodeReader = (code, node) => {
if (node.length) {
return results.reduce(nodeReader, code)
}
if (!node) return code;
if (node.main) {
return nodeReader(code, node.main)
}
return code + node;
}
return nodeReader('', results)
}

View file

@ -5,48 +5,110 @@ function id(x) { return x[0]; }
const { lexer } = require('./lexer.js'); const { lexer } = require('./lexer.js');
const getTerminal = d => d ? d[0] : null; const getTerminal = d => d ? d[0] : null;
const getAll = d => d.map((item, i) => ({[i]: item})); const getAll = d => d.map((item, i) => ({ [i]: item }));
const flag = token => d => d.map(item => ({[token]: item})) const flag = token => d => d.map(item => ({ [token]: item }))
const clearNull = d => d.filter(t => !!t); const clearNull = d => d.filter(t => !!t && (t.length !== 1 || t[0])).map(t => t.length ? clearNull(t) : t);
const flagIndex = d => d.map((item, i) => ({[i]: item})) const flagIndex = d => d.map((item, i) => ({[i]: item}))
const remove = _ => null; const remove = _ => null;
const append = d => d.join(''); const append = d => d.join('');
const constructSet = d => d.reduce((acc, t) => { const constructSet = d => d.reduce((acc, t) => {
if (t && t.type === 'setIdentifier') acc.push({set: t}) if (t && t.type === 'setIdentifier') acc.push({set: t});
if (t && t.length) acc[acc.length - 1].phones = t; if (t && t.length) acc[acc.length - 1].phones = t;
return acc; return acc;
}, []); }, []);
const compose = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d) const pipe = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d);
const objFromArr = d => d.reduce((obj, item) => ({ ...obj, ...item }), {});
var grammar = { var grammar = {
Lexer: lexer, Lexer: lexer,
ParserRules: [ ParserRules: [
{"name": "main$ebnf$1", "symbols": []}, {"name": "main$ebnf$1", "symbols": []},
{"name": "main$ebnf$1$subexpression$1", "symbols": ["statement"]}, {"name": "main$ebnf$1$subexpression$1", "symbols": ["_", "statement"]},
{"name": "main$ebnf$1", "symbols": ["main$ebnf$1", "main$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}}, {"name": "main$ebnf$1", "symbols": ["main$ebnf$1", "main$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "main", "symbols": ["main$ebnf$1"], "postprocess": compose(flag('main'), getTerminal)}, {"name": "main", "symbols": ["main$ebnf$1", "_"], "postprocess": pipe(
clearNull,
// recursive call to fix repeat?
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
flag('main'),
getTerminal,
) },
{"name": "_$ebnf$1$subexpression$1", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)]}, {"name": "_$ebnf$1$subexpression$1", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)]},
{"name": "_$ebnf$1", "symbols": ["_$ebnf$1$subexpression$1"], "postprocess": id}, {"name": "_$ebnf$1", "symbols": ["_$ebnf$1$subexpression$1"], "postprocess": id},
{"name": "_$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}}, {"name": "_$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
{"name": "_", "symbols": ["_$ebnf$1"], "postprocess": remove}, {"name": "_", "symbols": ["_$ebnf$1"], "postprocess": remove},
{"name": "__", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)], "postprocess": remove}, {"name": "__", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)], "postprocess": remove},
{"name": "equal", "symbols": [(lexer.has("equal") ? {type: "equal"} : equal)], "postprocess": remove},
{"name": "statement", "symbols": ["comment"]}, {"name": "statement", "symbols": ["comment"]},
{"name": "statement", "symbols": ["definition"], "postprocess": compose(clearNull, getTerminal)}, {"name": "statement", "symbols": ["definition"], "postprocess": pipe(
{"name": "comment", "symbols": [(lexer.has("comment") ? {type: "comment"} : comment)], "postprocess": compose(remove, getTerminal)}, d => d.flatMap(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
{"name": "definition", "symbols": [(lexer.has("kwSet") ? {type: "kwSet"} : kwSet), "__", "setDefinition"], "postprocess": d => ({token: 'setDefinition', sets: d[2]})}, // recursive call to fit repeat?
{"name": "setDefinition$ebnf$1", "symbols": []}, d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
{"name": "setDefinition$ebnf$1$subexpression$1", "symbols": [(lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "__", (lexer.has("equal") ? {type: "equal"} : equal), "__", "setExpression", (lexer.has("comma") ? {type: "comma"} : comma), "__"]}, d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
{"name": "setDefinition$ebnf$1", "symbols": ["setDefinition$ebnf$1", "setDefinition$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}}, // may split from other definition statements
{"name": "setDefinition", "symbols": ["setDefinition$ebnf$1", (lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "__", (lexer.has("equal") ? {type: "equal"} : equal), "__", "setExpression"], "postprocess": constructSet}, d => d.map(t => t && t.length > 1 ? ({ type: 'set', ...objFromArr(t) }) : null)
{"name": "setExpression", "symbols": [(lexer.has("openSquareBracket") ? {type: "openSquareBracket"} : openSquareBracket), "_", "phoneList", "_", (lexer.has("closeSquareBracket") ? {type: "closeSquareBracket"} : closeSquareBracket)], "postprocess": d => d.filter(t => t && t.length)}, ) },
{"name": "comment", "symbols": [(lexer.has("comment") ? {type: "comment"} : comment)], "postprocess": pipe(getTerminal, remove)},
{"name": "definition$ebnf$1", "symbols": []},
{"name": "definition$ebnf$1$subexpression$1", "symbols": ["setDefinition", (lexer.has("comma") ? {type: "comma"} : comma), "__"]},
{"name": "definition$ebnf$1", "symbols": ["definition$ebnf$1", "definition$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "definition", "symbols": [(lexer.has("kwSet") ? {type: "kwSet"} : kwSet), "__", "definition$ebnf$1", "setDefinition"], "postprocess": pipe(
// not yet sure why this call is required twice
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
d => d.map(u => u && u.length ? u.map(v => v.length ? v.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet')[0] : v) : u),
clearNull,
) },
{"name": "setDefinition$ebnf$1$subexpression$1", "symbols": ["setAlias"]},
{"name": "setDefinition$ebnf$1", "symbols": ["setDefinition$ebnf$1$subexpression$1"], "postprocess": id},
{"name": "setDefinition$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
{"name": "setDefinition", "symbols": [(lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "setDefinition$ebnf$1", "__", "equal", "__", "setExpression"], "postprocess":
pipe(
d => d.filter(t => !!t && t.length !== 0),
d => d.map(u => u && u.length ? u.map(t => t && t.length ? t.filter(v => v && v.type !== 'comma') : t) : u),
d => d.map(t => t.type === 'setIdentifier' ? { setIdentifier: t.toString() } : t),
d => d.map(t => t && t.length && t[0].hasOwnProperty('setExpression') ? t[0] : t),
d => d.map(t => t.length ?
// pretty ugly ([ { type: 'aias', alias: [ string ] }] ) => { setAlias: str }
{ setAlias: t.reduce((aliases, token) => token && token.type === 'alias' ? [...aliases, ...token.alias] : aliases, [])[0] }
: t),
)
},
{"name": "setExpression", "symbols": [(lexer.has("openSquareBracket") ? {type: "openSquareBracket"} : openSquareBracket), "_", "phoneList", "_", (lexer.has("closeSquareBracket") ? {type: "closeSquareBracket"} : closeSquareBracket)]},
{"name": "setExpression$ebnf$1$subexpression$1", "symbols": ["setOperation"]},
{"name": "setExpression$ebnf$1", "symbols": ["setExpression$ebnf$1$subexpression$1"], "postprocess": id},
{"name": "setExpression$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
{"name": "setExpression", "symbols": [(lexer.has("openCurlyBracket") ? {type: "openCurlyBracket"} : openCurlyBracket), "_", "setExpression$ebnf$1", "_", (lexer.has("closeCurlyBracket") ? {type: "closeCurlyBracket"} : closeCurlyBracket)], "postprocess":
pipe(
// filters commas and whitespace
d => d.filter(t => t && t.length),
d => d.map(t => t.map(u => u[0])),
flag('setExpression')
) },
{"name": "setAlias", "symbols": [(lexer.has("comma") ? {type: "comma"} : comma), "_", (lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier)], "postprocess": pipe(
d => d && d.length ? d.filter(t => !!t) : d,
d => d.map(t => t.type === 'setIdentifier' ? t.toString() : null),
d => d.filter(t => !!t),
d => ({type: 'alias', alias: d }),
) },
{"name": "phoneList$ebnf$1", "symbols": []}, {"name": "phoneList$ebnf$1", "symbols": []},
{"name": "phoneList$ebnf$1$subexpression$1", "symbols": [(lexer.has("phone") ? {type: "phone"} : phone), (lexer.has("comma") ? {type: "comma"} : comma), "_"]}, {"name": "phoneList$ebnf$1$subexpression$1$ebnf$1", "symbols": []},
{"name": "phoneList$ebnf$1$subexpression$1$ebnf$1$subexpression$1", "symbols": [(lexer.has("comma") ? {type: "comma"} : comma), "_"]},
{"name": "phoneList$ebnf$1$subexpression$1$ebnf$1", "symbols": ["phoneList$ebnf$1$subexpression$1$ebnf$1", "phoneList$ebnf$1$subexpression$1$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "phoneList$ebnf$1$subexpression$1", "symbols": [(lexer.has("phone") ? {type: "phone"} : phone), "phoneList$ebnf$1$subexpression$1$ebnf$1"]},
{"name": "phoneList$ebnf$1", "symbols": ["phoneList$ebnf$1", "phoneList$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}}, {"name": "phoneList$ebnf$1", "symbols": ["phoneList$ebnf$1", "phoneList$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "phoneList", "symbols": ["phoneList$ebnf$1", (lexer.has("phone") ? {type: "phone"} : phone)], "postprocess": d => d.filter(t => t && (t.type === 'phone' || t.length) ) {"name": "phoneList", "symbols": ["phoneList$ebnf$1"], "postprocess":
.map(t => { pipe(
if (!t.length) return t; d => d ? d[0].map(t => t.filter(u => u.type === 'phone').map(u => u.toString())) : d
t.filter(st => st && st.type === 'phone') )
return t; },
}) } {"name": "setOperation", "symbols": ["orOperation"]},
{"name": "setOperation", "symbols": [(lexer.has("identifier") ? {type: "identifier"} : identifier)], "postprocess": pipe(
d => d.type ? d : ({ identifier: d.toString(), type: 'identifier' })
)},
{"name": "orOperation", "symbols": ["_", "setOperation", "__", (lexer.has("kwSetOr") ? {type: "kwSetOr"} : kwSetOr), "__", "setOperation", "_"], "postprocess": pipe(
d => d.filter(d => !!d),
d => ({ type: 'operator', operator: 'or', operands: [ d[0], d[2] ] }),
) }
] ]
, ParserStart: "main" , ParserStart: "main"
} }

View file

@ -1,24 +1,32 @@
@{% @{%
const { lexer } = require('./lexer.js'); const { lexer } = require('./lexer.js');
const getTerminal = d => d ? d[0] : null; const getTerminal = d => d ? d[0] : null;
const getAll = d => d.map((item, i) => ({[i]: item})); const getAll = d => d.map((item, i) => ({ [i]: item }));
const flag = token => d => d.map(item => ({[token]: item})) const flag = token => d => d.map(item => ({ [token]: item }))
const clearNull = d => d.filter(t => !!t); const clearNull = d => d.filter(t => !!t && (t.length !== 1 || t[0])).map(t => t.length ? clearNull(t) : t);
const flagIndex = d => d.map((item, i) => ({[i]: item})) const flagIndex = d => d.map((item, i) => ({[i]: item}))
const remove = _ => null; const remove = _ => null;
const append = d => d.join(''); const append = d => d.join('');
const constructSet = d => d.reduce((acc, t) => { const constructSet = d => d.reduce((acc, t) => {
if (t && t.type === 'setIdentifier') acc.push({set: t}) if (t && t.type === 'setIdentifier') acc.push({set: t});
if (t && t.length) acc[acc.length - 1].phones = t; if (t && t.length) acc[acc.length - 1].phones = t;
return acc; return acc;
}, []); }, []);
const compose = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d) const pipe = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d);
const objFromArr = d => d.reduce((obj, item) => ({ ...obj, ...item }), {});
%} %}
@lexer lexer @lexer lexer
main -> (statement):* main -> (_ statement):* _
{% compose(flag('main'), getTerminal) %} {% pipe(
clearNull,
// recursive call to fix repeat?
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
flag('main'),
getTerminal,
) %}
_ -> (%whiteSpace):? _ -> (%whiteSpace):?
{% remove %} {% remove %}
@ -26,54 +34,76 @@ _ -> (%whiteSpace):?
__ -> %whiteSpace __ -> %whiteSpace
{% remove %} {% remove %}
equal -> %equal
{% remove %}
statement -> comment | definition statement -> comment | definition
{% compose(clearNull, getTerminal) %} {% pipe(
d => d.flatMap(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
// recursive call to fit repeat?
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
// may split from other definition statements
d => d.map(t => t && t.length > 1 ? ({ type: 'set', ...objFromArr(t) }) : null)
) %}
comment -> %comment comment -> %comment
{% compose(remove, getTerminal) %} {% pipe(getTerminal, remove) %}
# SETS # SETS
definition -> %kwSet __ setDefinition {% d => ({token: 'setDefinition', sets: d[2]}) %} definition -> %kwSet __ (setDefinition %comma __):* setDefinition
setDefinition -> (%setIdentifier __ %equal __ setExpression %comma __):* %setIdentifier __ %equal __ setExpression {% pipe(
{% constructSet %} // not yet sure why this call is required twice
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
d => d.map(u => u && u.length ? u.map(v => v.length ? v.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet')[0] : v) : u),
clearNull,
) %}
setDefinition -> %setIdentifier (setAlias):? __ equal __ setExpression
{%
pipe(
d => d.filter(t => !!t && t.length !== 0),
d => d.map(u => u && u.length ? u.map(t => t && t.length ? t.filter(v => v && v.type !== 'comma') : t) : u),
d => d.map(t => t.type === 'setIdentifier' ? { setIdentifier: t.toString() } : t),
d => d.map(t => t && t.length && t[0].hasOwnProperty('setExpression') ? t[0] : t),
d => d.map(t => t.length ?
// pretty ugly ([ { type: 'aias', alias: [ string ] }] ) => { setAlias: str }
{ setAlias: t.reduce((aliases, token) => token && token.type === 'alias' ? [...aliases, ...token.alias] : aliases, [])[0] }
: t),
)
%}
setExpression -> %openSquareBracket _ phoneList _ %closeSquareBracket setExpression -> %openSquareBracket _ phoneList _ %closeSquareBracket
{% d => d.filter(t => t && t.length) %} | %openCurlyBracket _ (setOperation):? _ %closeCurlyBracket
phoneList -> (%phone %comma _):* %phone {%
{% d => d.filter(t => t && (t.type === 'phone' || t.length) ) pipe(
.map(t => { // filters commas and whitespace
if (!t.length) return t; d => d.filter(t => t && t.length),
t.filter(st => st && st.type === 'phone') d => d.map(t => t.map(u => u[0])),
return t; flag('setExpression')
}) %} ) %}
setAlias -> %comma _ %setIdentifier
{% pipe(
d => d && d.length ? d.filter(t => !!t) : d,
d => d.map(t => t.type === 'setIdentifier' ? t.toString() : null),
d => d.filter(t => !!t),
d => ({type: 'alias', alias: d }),
) %}
# assignmentExpression: phoneList -> (%phone (%comma _):* ):*
# /* {%
# * SPEC: pipe(
# * conditionalExpression d => d ? d[0].map(t => t.filter(u => u.type === 'phone').map(u => u.toString())) : d
# * | leftHandSideExpression assignmentOperator assignmentExpression )
# */ %}
# (leftHandSideExpression assignmentOperator) => setOperation -> orOperation
# leftHandSideExpression assignmentOperator assignmentExpression | %identifier
# | conditionalExpression {% pipe(
# ; d => d.type ? d : ({ identifier: d.toString(), type: 'identifier' })
)%}
# assignmentExpressionNoln: orOperation -> _ setOperation __ %kwSetOr __ setOperation _
# conditionalExpressionNoln {% pipe(
# | leftHandSideExpression assignmentOperator assignmentExpressionNoln d => d.filter(d => !!d),
# ; d => ({ type: 'operator', operator: 'or', operands: [ d[0], d[2] ] }),
) %}
# assignmentOperator:
# /* note that in the grammar these are listed out explicitely */
# EQ | TIMESEQ | DIVIDEEQ | PERCENTEQ | PLUSEQ | MINUSEQ | LSHIFTEQ | RSHIFTEQ
# | GT3EQ | AMPEREQ | CAROTEQ | PIPEEQ
# ;
# expression:
# /*
# * SPEC:
# * assignmentExpression
# * | expression COMMA assignmentExpression
# */
# assignmentExpression (expressionTail)*
# ;

View file

@ -1,117 +1,124 @@
const moo = require('moo'); const moo = require("moo");
const lexer = moo.states({ const lexer = moo.states({
main: { main: {
comment: /;.*$/, comment: /;.*$/,
star: { match: /\*/, push: 'epoch' }, star: { match: /\*/, push: "epoch" },
slash: { match: /\//, push: 'lexicon' }, slash: { match: /\//, push: "lexicon" },
// change so that identifiers are always upper, keywords are always lower, phones are always lower // change so that identifiers are always upper, keywords are always lower, phones are always lower
'kwSet': { match: 'set', type: moo.keywords({ 'kwSet': 'set '}), push: 'setDefinition'}, kwSet: {
identifier: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/, }, match: "set",
openBracket: { match: /\[/, push: 'feature' }, type: moo.keywords({ kwSet: "set " }),
whiteSpace: { match: /\s+/, lineBreaks: true }, push: "setDefinition",
newLine: { match: /\n+/, lineBreaks: true } },
identifier: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/ },
openBracket: { match: /\[/, push: "feature" },
whiteSpace: { match: /\s+/, lineBreaks: true },
newLine: { match: /\n+/, lineBreaks: true },
}, },
epoch: { epoch: {
identifier: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/, push: 'rule' }, identifier: {
openParen: { match: /\(/, push: 'ruleDefinition' }, match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/,
pipe: { match: /\|/, pop: true }, push: "rule",
greaterThan: /\>/, },
arrow: /\-\>/, openParen: { match: /\(/, push: "ruleDefinition" },
hash: /#/, pipe: { match: /\|/, pop: true },
slash: /\//, greaterThan: /\>/,
dot: /\./, arrow: /\-\>/,
underscore: /\_/, hash: /#/,
newLine: { match: /\n/, lineBreaks: true } slash: /\//,
dot: /\./,
underscore: /\_/,
newLine: { match: /\n/, lineBreaks: true },
}, },
ruleDefinition: { ruleDefinition: {
doubleTick: { match: /``/, push: 'ruleName' }, doubleTick: { match: /``/, push: "ruleName" },
singleTick: { match: /`/, push: 'ruleDescription' }, singleTick: { match: /`/, push: "ruleDescription" },
// push rule // push rule
closeParen: { match: /\)/, pop: true }, closeParen: { match: /\)/, pop: true },
newLine: { match: /\n/, lineBreaks: true } newLine: { match: /\n/, lineBreaks: true },
}, },
ruleName: { ruleName: {
ruleName: { match: /.+(?=``)/ }, ruleName: { match: /.+(?=``)/ },
doubleTick: { match: /``/, pop: true } doubleTick: { match: /``/, pop: true },
}, },
ruleDescription: { ruleDescription: {
ruleDescription: { match: /.+(?=`)/ }, ruleDescription: { match: /.+(?=`)/ },
singleTick: { match: /`/, pop: true } singleTick: { match: /`/, pop: true },
}, },
rule: { rule: {
openSquareBracket: { match: /\[/, push: 'ruleFeature' }, openSquareBracket: { match: /\[/, push: "ruleFeature" },
// whiteSpace: { match: /\s/ }, // whiteSpace: { match: /\s/ },
newLine: { match: /\n/, pop: true, lineBreaks: true } newLine: { match: /\n/, pop: true, lineBreaks: true },
}, },
ruleFeature: { ruleFeature: {
ruleFeature: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/ }, ruleFeature: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/ },
closeBracket: { match: /\]/, pop: true }, closeBracket: { match: /\]/, pop: true },
newLine: { match: /\n/, lineBreaks: true } newLine: { match: /\n/, lineBreaks: true },
}, },
lexicon: { lexicon: {
slash: { match: /\//, pop: true }, slash: { match: /\//, pop: true },
newLine: { match: /\n/, lineBreaks: true } newLine: { match: /\n/, lineBreaks: true },
}, },
feature: { feature: {
closeBracket: { match: /\]/, pop: true }, closeBracket: { match: /\]/, pop: true },
positiveAssignment: /\+=/, positiveAssignment: /\+=/,
negativeAssignment: /\-=/, negativeAssignment: /\-=/,
newLine: { match: /\n/, lineBreaks: true } newLine: { match: /\n/, lineBreaks: true },
}, },
setDefinition: { setDefinition: {
comment: /;.*$/, comment: /;.*$/,
setIdentifier: { match: /[A-Z]+[A-Z_]*/ }, setIdentifier: { match: /[A-Z]+[A-Z_]*/ },
openCurlyBracket: { match: /\{/, push: 'setOperation' }, openCurlyBracket: { match: /\{/, push: "setOperation" },
equal: /=/, equal: /=/,
openSquareBracket: /\[/, openSquareBracket: /\[/,
phone: /[\u00c0-\u03FFa-z]+/, phone: /[\u00c0-\u03FFa-z]+/,
closeSquareBracket: { match: /\]/ }, closeSquareBracket: { match: /\]/ },
comma: { match: /,/, push: 'commaOperation' }, comma: { match: /,/, push: "commaOperation" },
whiteSpace: { match: /[\t ]+/ }, whiteSpace: { match: /[\t ]+/ },
newLine: { match: /\n/, pop: true, lineBreaks: true }, newLine: { match: /\n/, pop: true, lineBreaks: true },
}, },
setOperation: { setOperation: {
closeCurlyBracket: { match: /\}/, pop: true }, closeCurlyBracket: { match: /\}/, pop: true },
// ! restrict identifiers // ! restrict identifiers
keyword: { match: ['not', 'and', 'or', 'nor', 'in', 'yield', 'concat', 'dissoc'], type: moo.keywords({ keyword: {
'kw-set-not': 'not' , match: ["not", "and", "or", "nor", "in", "yield", "concat", "dissoc"],
'kw-set-and': 'and' , type: moo.keywords({
'kw-set-or': 'or' , kwSetNot: "not",
'kw-set-nor': 'nor' , kwSetAnd: "and",
'kw-set-in': 'in' , kwSetOr: "or",
'kw-set-yield': 'yield' , kwSetNor: "nor",
'kw-set-concat': 'concat', kwSetIn: "in",
'kw-set-dissoc': 'dissoc' kwSetYield: "yield",
}) kwSetConcat: "concat",
kwSetDissoc: "dissoc",
}),
}, },
identifier: /[A-Z]+[A-Z_]+/, identifier: /[A-Z]+[A-Z_]+/,
whiteSpace: /[\t ]+/, whiteSpace: { match: /\s+/, lineBreaks: true },
openSquareBracket: /\]/, openSquareBracket: /\[/,
closeSquareBracket: /\[/, closeSquareBracket: /\]/,
identifier: /[A-Z]+[A-Z_]*/, identifier: /[A-Z]+[A-Z_]*/,
phone: /[\u00c0-\u03FFa-z]+/, phone: /[\u00c0-\u03FFa-z]+/,
newLine: { match: /\n/, lineBreaks: true }
}, },
commaOperation: { commaOperation: {
// if comma is detected during a definition, the commaOperation consumes all white space and pops back to definition // if comma is detected during a definition, the commaOperation consumes all white space and pops back to definition
// this prevents popping back to main // this prevents popping back to main
comment: /\s*;.*$/, comment: /\s*;.*$/,
whiteSpace: { match: /\s+/, lineBreaks: true, pop: true }, whiteSpace: { match: /\s+/, lineBreaks: true, pop: true },
newLine: { match: /\n/, lineBreaks: true, pop: true } newLine: { match: /\n/, lineBreaks: true, pop: true },
} },
}); });
module.exports = {lexer}; module.exports = { lexer };

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,10 @@
import { assertionData } from './assertionData';
import { codeGenerator } from '../codeGenerator';
describe('codeGenerator', () => {
it('parses simple comment', () => {
const { latl, code } = assertionData.simpleComment;
const generatedCode = codeGenerator(latl);
expect(generatedCode).toEqual(code);
});
})

View file

@ -1,49 +1,180 @@
import { lexer } from '../lexer'; import { lexer } from "../lexer";
import { parser } from '../parser'; import { parser } from "../parser";
import { assertionData } from './assertionData'; import { assertionData } from "./assertionData";
describe('parser', () => { describe("parser", () => {
it('parses simple comment', () => { it("parses simple comment", () => {
const { latl } = assertionData.simpleComment; const { latl, AST } = assertionData.simpleComment;
const AST = parser().feed(latl).results; const feedResults = parser().feed(latl).results;
expect(AST.length).toBe(1); expect(feedResults.length).toBe(1);
console.log(AST[0]) expect(feedResults[0]).toStrictEqual(AST);
// expect(AST[0]).toStrictEqual() });
})
// it('parses multiple set definitions with comma operator', () => { it("parses simple set definition", () => {
// const { latl } = assertionData.commaSetDefinition; const { latl, AST } = assertionData.simpleSetDefinition;
// const AST = parser().feed(latl) const feedResults = parser().feed(latl).results;
// console.log(AST) expect(feedResults.length).toBe(1);
// }); expect(feedResults[0]).toStrictEqual(AST);
});
// it('lexes set definition with alias', () => { it("parses multiple set definitions with comma operator", () => {
// const { latl, tokens } = assertionData.setAliasDefinition; const { latl, AST } = assertionData.commaSetDefinition;
// const stream = getStream(latl); const feedResults = parser().feed(latl).results;
// expect(stream).toStrictEqual(tokens); expect(feedResults.length).toBe(1);
// }); expect(feedResults[0]).toStrictEqual(AST);
});
// it('lexes set definition with set join', () => { it("lexes set definition with alias", () => {
// const { latl, tokens } = assertionData.setDefinitionJoin; const { latl, AST } = assertionData.setAliasDefinition;
// const stream = getStream(latl); const feedResults = parser().feed(latl).results;
// expect(stream).toStrictEqual(tokens); expect(feedResults[0]).toStrictEqual(AST);
// }); });
// it('lexes set definition with yield operation', () => { it.skip("lexes set definition with set join", () => {
// const { latl, tokens } = assertionData.setDefinitionYield; const { latl, AST } = assertionData.setDefinitionJoin;
// const stream = getStream(latl); const feedResults = parser().feed(latl).results;
// expect(stream).toStrictEqual(tokens); expect(feedResults[0]).toStrictEqual(AST);
// }); });
// it('lexes all set join operations', () => { it.todo(
// const { latl, tokens } = assertionData.setOperationsJoin; "lexes set definition with yield operation"
// const stream = getStream(latl); // , () => {
// expect(stream).toStrictEqual(tokens); // const { latl, tokens } = assertionData.setDefinitionYield;
// }); // const stream = getStream(latl);
// expect(stream).toStrictEqual(tokens);
// }
);
// it('lexes set filter, concat, and dissoc operations', () => { it.todo(
// const { latl, tokens } = assertionData.setOperations; "lexes all set join operations"
// const stream = getStream(latl); // , () => {
// expect(stream).toStrictEqual(tokens); // const { latl, tokens } = assertionData.setOperationsJoin;
// }) // const stream = getStream(latl);
}) // expect(stream).toStrictEqual(tokens);
// }
);
it.todo(
"lexes set filter, concat, and dissoc operations"
// , () => {
// const { latl, tokens } = assertionData.setOperations;
// const stream = getStream(latl);
// expect(stream).toStrictEqual(tokens);
// }
);
});
// {
// "set":
// [
// [
// [
// {
// "col": 5,
// "line": 2,
// "lineBreaks": 0,
// "offset": 5,
// "text": "NASAL_PULMONIC_CONSONANTS",
// "toString": [tokenToString],
// "type": "setIdentifier",
// "value": "NASAL_PULMONIC_CONSONANTS",
// },
// null,
// {
// "col": 45,
// "line": 2,
// "lineBreaks": 0,
// "offset": 45,
// "text": "=",
// "toString": [tokenToString],
// "type": "equal",
// "value": "=",
// },
// null,
// [
// [
// {
// "col": 49,
// "line": 2,
// "lineBreaks": 0,
// "offset": 49,
// "text": "m̥",
// "toString": [tokenToString],
// "type": "phone",
// "value": "m̥",
// },
// {
// "col": 91,
// "line": 2,
// "lineBreaks": 0,
// "offset": 91,
// "text": "ɴ",
// "toString": [tokenToString],
// "type": "phone",
// "value": "ɴ",
// },
// ],
// ],
// {
// "col": 94,
// "line": 2,
// "lineBreaks": 0,
// "offset": 94,
// "text": ",",
// "toString": [tokenToString],
// "type": "comma",
// "value": ",",
// },
// null,
// ],
// ],
// - "setIdentifier": "STOP_PULMONIC_CONSONANTS",
// {
// "col": 5,
// "line": 3,
// "lineBreaks": 0,
// "offset": 100,
// "text": "STOP_PULMONIC_CONSONANTS",
// "toString": [tokenToString],
// "type": "setIdentifier",
// "value": "STOP_PULMONIC_CONSONANTS",
// },
// null,
// {
// "col": 45,
// "line": 3,
// "lineBreaks": 0,
// "offset": 140,
// "text": "=",
// "toString": [tokenToString],
// "type": "equal",
// "value": "=",
// },
// null,
// [
// [
// {
// "col": 49,
// "line": 3,
// "lineBreaks": 0,
// "offset": 144,
// "text": "p",
// "toString": [tokenToString],
// "type": "phone",
// "value": "p",
// },
// {
// "col": 104,
// "line": 3,
// "lineBreaks": 0,
// "offset": 199,
// "text": "ʔ",
// "toString": [tokenToString],
// "type": "phone",
// "value": "ʔ",
// },
// ],
// ],
// ],
// "token": "kwSet",
// }