parse AST for single set definition

This commit is contained in:
Sorrel Bri 2020-05-08 23:32:49 -07:00
parent 3d4d1cd66e
commit 40aec30537
4 changed files with 131 additions and 50 deletions

View file

@ -17,6 +17,7 @@ function id(x) { return x[0]; }
return acc; return acc;
}, []); }, []);
const pipe = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d); const pipe = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d);
const objFromArr = d => d.reduce((obj, item) => ({ ...obj, ...item }), {});
var grammar = { var grammar = {
Lexer: lexer, Lexer: lexer,
ParserRules: [ ParserRules: [
@ -24,32 +25,52 @@ var grammar = {
{"name": "main$ebnf$1$subexpression$1", "symbols": ["_", "statement"]}, {"name": "main$ebnf$1$subexpression$1", "symbols": ["_", "statement"]},
{"name": "main$ebnf$1", "symbols": ["main$ebnf$1", "main$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}}, {"name": "main$ebnf$1", "symbols": ["main$ebnf$1", "main$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "main", "symbols": ["main$ebnf$1", "_"], "postprocess": pipe( {"name": "main", "symbols": ["main$ebnf$1", "_"], "postprocess": pipe(
getTerminal, getTerminal,
clearNull, clearNull,
flag('main'), flag('main'),
getTerminal, getTerminal,
) }, ) },
{"name": "_$ebnf$1$subexpression$1", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)]}, {"name": "_$ebnf$1$subexpression$1", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)]},
{"name": "_$ebnf$1", "symbols": ["_$ebnf$1$subexpression$1"], "postprocess": id}, {"name": "_$ebnf$1", "symbols": ["_$ebnf$1$subexpression$1"], "postprocess": id},
{"name": "_$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}}, {"name": "_$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
{"name": "_", "symbols": ["_$ebnf$1"], "postprocess": remove}, {"name": "_", "symbols": ["_$ebnf$1"], "postprocess": remove},
{"name": "__", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)], "postprocess": remove}, {"name": "__", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)], "postprocess": remove},
{"name": "equal", "symbols": [(lexer.has("equal") ? {type: "equal"} : equal)], "postprocess": remove},
{"name": "statement", "symbols": ["comment"]}, {"name": "statement", "symbols": ["comment"]},
{"name": "statement", "symbols": ["definition"], "postprocess": pipe(getTerminal)}, {"name": "statement", "symbols": ["definition"], "postprocess": pipe(
objFromArr
) },
{"name": "comment", "symbols": [(lexer.has("comment") ? {type: "comment"} : comment)], "postprocess": pipe(getTerminal, remove)}, {"name": "comment", "symbols": [(lexer.has("comment") ? {type: "comment"} : comment)], "postprocess": pipe(getTerminal, remove)},
{"name": "definition", "symbols": [(lexer.has("kwSet") ? {type: "kwSet"} : kwSet), "__", "setDefinition"], "postprocess": d => ({[d[0].value]: d[2]})}, {"name": "definition", "symbols": [(lexer.has("kwSet") ? {type: "kwSet"} : kwSet), "__", "setDefinition"], "postprocess": pipe(
d => ({[d[0].value]: objFromArr(d[2]) }),
) },
{"name": "setDefinition$ebnf$1", "symbols": []}, {"name": "setDefinition$ebnf$1", "symbols": []},
{"name": "setDefinition$ebnf$1$subexpression$1", "symbols": [(lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "__", (lexer.has("equal") ? {type: "equal"} : equal), "__", "setExpression", (lexer.has("comma") ? {type: "comma"} : comma), "__"]}, {"name": "setDefinition$ebnf$1$subexpression$1", "symbols": [(lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "__", "equal", "__", "setExpression", (lexer.has("comma") ? {type: "comma"} : comma), "__"]},
{"name": "setDefinition$ebnf$1", "symbols": ["setDefinition$ebnf$1", "setDefinition$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}}, {"name": "setDefinition$ebnf$1", "symbols": ["setDefinition$ebnf$1", "setDefinition$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "setDefinition", "symbols": ["setDefinition$ebnf$1", (lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "__", (lexer.has("equal") ? {type: "equal"} : equal), "__", "setExpression"], "postprocess": d => { {"name": "setDefinition", "symbols": ["setDefinition$ebnf$1", (lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "__", "equal", "__", "setExpression"], "postprocess":
if (d.type === 'setIdentifier') return { setIdentifier: d.value } pipe(
return d d => d.filter(t => !!t && t.length !== 0),
} }, d => d.map(t => t.type === 'setIdentifier' ? { setIdentifier: t.toString() } : t),
{"name": "setExpression", "symbols": [(lexer.has("openSquareBracket") ? {type: "openSquareBracket"} : openSquareBracket), "_", "phoneList", "_", (lexer.has("closeSquareBracket") ? {type: "closeSquareBracket"} : closeSquareBracket)]}, d => d.map(t => t && t.length && t[0].hasOwnProperty('setExpression') ? t[0] : t)
)
},
{"name": "setExpression", "symbols": [(lexer.has("openSquareBracket") ? {type: "openSquareBracket"} : openSquareBracket), "_", "phoneList", "_", (lexer.has("closeSquareBracket") ? {type: "closeSquareBracket"} : closeSquareBracket)], "postprocess":
pipe(
d => d.filter(t => t && t.length),
d => d.map(t => t.map(u => u[0])),
flag('setExpression')
) },
{"name": "phoneList$ebnf$1", "symbols": []}, {"name": "phoneList$ebnf$1", "symbols": []},
{"name": "phoneList$ebnf$1$subexpression$1", "symbols": [(lexer.has("phone") ? {type: "phone"} : phone), (lexer.has("comma") ? {type: "comma"} : comma), "_"]}, {"name": "phoneList$ebnf$1$subexpression$1$ebnf$1", "symbols": []},
{"name": "phoneList$ebnf$1$subexpression$1$ebnf$1$subexpression$1", "symbols": [(lexer.has("comma") ? {type: "comma"} : comma), "_"]},
{"name": "phoneList$ebnf$1$subexpression$1$ebnf$1", "symbols": ["phoneList$ebnf$1$subexpression$1$ebnf$1", "phoneList$ebnf$1$subexpression$1$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "phoneList$ebnf$1$subexpression$1", "symbols": [(lexer.has("phone") ? {type: "phone"} : phone), "phoneList$ebnf$1$subexpression$1$ebnf$1"]},
{"name": "phoneList$ebnf$1", "symbols": ["phoneList$ebnf$1", "phoneList$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}}, {"name": "phoneList$ebnf$1", "symbols": ["phoneList$ebnf$1", "phoneList$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "phoneList", "symbols": ["phoneList$ebnf$1", (lexer.has("phone") ? {type: "phone"} : phone)], "postprocess": pipe(d => d ? d.toString() : d)} {"name": "phoneList", "symbols": ["phoneList$ebnf$1"], "postprocess":
pipe(
d => d ? d[0].map(t => t.filter(u => u.type === 'phone').map(u => u.toString())) : d
)
}
] ]
, ParserStart: "main" , ParserStart: "main"
} }

View file

@ -13,6 +13,7 @@
return acc; return acc;
}, []); }, []);
const pipe = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d); const pipe = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d);
const objFromArr = d => d.reduce((obj, item) => ({ ...obj, ...item }), {});
%} %}
@lexer lexer @lexer lexer
@ -23,7 +24,7 @@ main -> (_ statement):* _
clearNull, clearNull,
flag('main'), flag('main'),
getTerminal, getTerminal,
) %} ) %}
_ -> (%whiteSpace):? _ -> (%whiteSpace):?
{% remove %} {% remove %}
@ -31,26 +32,41 @@ _ -> (%whiteSpace):?
__ -> %whiteSpace __ -> %whiteSpace
{% remove %} {% remove %}
equal -> %equal
{% remove %}
statement -> comment | definition statement -> comment | definition
{% pipe(getTerminal) %} {% pipe(
objFromArr
) %}
comment -> %comment comment -> %comment
{% pipe(getTerminal, remove) %} {% pipe(getTerminal, remove) %}
# SETS # SETS
definition -> %kwSet __ setDefinition definition -> %kwSet __ setDefinition
{% d => ({[d[0].value]: d[2]}) %} {% pipe(
setDefinition -> (%setIdentifier __ %equal __ setExpression %comma __):* %setIdentifier __ %equal __ setExpression d => ({[d[0].value]: objFromArr(d[2]) }),
{% d => { ) %}
if (d.type === 'setIdentifier') return { setIdentifier: d.value } # {% flag('definition') %}
return d setDefinition -> (%setIdentifier __ equal __ setExpression %comma __):* %setIdentifier __ equal __ setExpression
} %} {%
pipe(
d => d.filter(t => !!t && t.length !== 0),
d => d.map(t => t.type === 'setIdentifier' ? { setIdentifier: t.toString() } : t),
d => d.map(t => t && t.length && t[0].hasOwnProperty('setExpression') ? t[0] : t)
)
%}
setExpression -> %openSquareBracket _ phoneList _ %closeSquareBracket setExpression -> %openSquareBracket _ phoneList _ %closeSquareBracket
# {% pipe(d => d.filter(t => t && t.length)) %} {%
phoneList -> (%phone %comma _):* %phone pipe(
{% pipe(d => d ? d.toString() : d) %} d => d.filter(t => t && t.length),
# {% d => d.filter(t => t && (t.type === 'phone' || t[0]) ) d => d.map(t => t.map(u => u[0])),
# .flatMap(t => { flag('setExpression')
# if (!t.length) return t; ) %}
# return t[0].filter(st => st && st.type === 'phone') phoneList -> (%phone (%comma _):* ):*
# }) %} {%
pipe(
d => d ? d[0].map(t => t.filter(u => u.type === 'phone').map(u => u.toString())) : d
)
%}

View file

@ -10,12 +10,37 @@ export const assertionData = {
code: '' code: ''
}, },
simpleSetDefinition: { simpleSetDefinition: {
latl: `set PLOSIVES`, latl: `set NASAL_PULMONIC_CONSONANTS = [ m̥, m, ɱ ]`,
tokens: [ tokens: [
{ type: 'kwSet', value: 'set' }, { type: 'kwSet', value: 'set' },
{ type: 'whiteSpace', value: ' ' }, { type: 'whiteSpace', value: ' ' },
{ type: 'setIdentifier', value: 'PLOSIVES' } { type: 'setIdentifier', value: "NASAL_PULMONIC_CONSONANTS" },
{ type: "whiteSpace", value: " ", },
{ type: "equal", value: "=", },
{ type: "whiteSpace", value: " ", },
{ type: "openSquareBracket", value: "[", },
{ type: "whiteSpace", value: " ", },
{ type: "phone", value: "m̥", },
{ type: "comma", value: ",", },
{ type: "whiteSpace", value: " ", },
{ type: "phone", value: "m", },
{ type: "comma", value: ",", },
{ type: "whiteSpace", value: " ", },
{ type: "phone", value: "ɱ", },
{ type: "whiteSpace", value: " ", },
{ type: "closeSquareBracket", value: "]"}
], ],
AST: {
main: [
{
set: {
setIdentifier: 'NASAL_PULMONIC_CONSONANTS',
setExpression: [ 'm̥', 'm', 'ɱ' ]
}
}
]
}
,
code: '' code: ''
}, },
commaSetDefinition: { commaSetDefinition: {
@ -137,14 +162,16 @@ set NASAL_PULMONIC_CONSONANTS = [ m̥, m, ɱ, n̼, n̥, n, ɳ̊,
AST: { AST: {
main: [ main: [
{ {
set: { set: [
setIdentifier: 'NASAL_PULMONIC_CONSONANTS', {
items: [ 'm̥', 'm', 'ɱ', 'n̼', 'n̥', 'n', 'ɳ̊', 'ɳ', 'ɲ̊', 'ɲ', `ŋ`, ' ̊ŋ', 'ɴ' ] setIdentifier: 'NASAL_PULMONIC_CONSONANTS',
}, items: [ 'm̥', 'm', 'ɱ', 'n̼', 'n̥', 'n', 'ɳ̊', 'ɳ', 'ɲ̊', 'ɲ', `ŋ`, ' ̊ŋ', 'ɴ' ]
set: { },
setIdentifier: 'STOP_PULMONIC_CONSONANTS', {
items: [ 'p', 'b', 'p̪', 'b̪', 't̼', 'd̼', 't', 'd', 'ʈ', 'ɖ', 'c', 'ɟ', 'k', 'ɡ', 'q', 'ɢ', 'ʡ', 'ʔ' ] setIdentifier: 'STOP_PULMONIC_CONSONANTS',
} items: [ 'p', 'b', 'p̪', 'b̪', 't̼', 'd̼', 't', 'd', 'ʈ', 'ɖ', 'c', 'ɟ', 'k', 'ɡ', 'q', 'ɢ', 'ʡ', 'ʔ' ]
}
]
} }
] ]
} }

View file

@ -10,6 +10,13 @@ describe('parser', () => {
expect(feedResults[0]).toStrictEqual(AST) expect(feedResults[0]).toStrictEqual(AST)
}) })
it('parses simple set definition', () => {
const { latl, AST } = assertionData.simpleSetDefinition;
const feedResults = parser().feed(latl).results;
expect(feedResults.length).toBe(1);
expect(feedResults[0]).toStrictEqual(AST);
})
it('parses multiple set definitions with comma operator', () => { it('parses multiple set definitions with comma operator', () => {
const { latl, AST } = assertionData.commaSetDefinition; const { latl, AST } = assertionData.commaSetDefinition;
const feedResults = parser().feed(latl).results; const feedResults = parser().feed(latl).results;
@ -17,35 +24,45 @@ describe('parser', () => {
expect(feedResults[0]).toStrictEqual(AST); expect(feedResults[0]).toStrictEqual(AST);
}); });
// it('lexes set definition with alias', () => { it.todo('lexes set definition with alias'
// , () => {
// const { latl, tokens } = assertionData.setAliasDefinition; // const { latl, tokens } = assertionData.setAliasDefinition;
// const stream = getStream(latl); // const stream = getStream(latl);
// expect(stream).toStrictEqual(tokens); // expect(stream).toStrictEqual(tokens);
// }); // }
);
// it('lexes set definition with set join', () => { it.todo('lexes set definition with set join'
// , () => {
// const { latl, tokens } = assertionData.setDefinitionJoin; // const { latl, tokens } = assertionData.setDefinitionJoin;
// const stream = getStream(latl); // const stream = getStream(latl);
// expect(stream).toStrictEqual(tokens); // expect(stream).toStrictEqual(tokens);
// }); // }
);
// it('lexes set definition with yield operation', () => { it.todo('lexes set definition with yield operation'
// , () => {
// const { latl, tokens } = assertionData.setDefinitionYield; // const { latl, tokens } = assertionData.setDefinitionYield;
// const stream = getStream(latl); // const stream = getStream(latl);
// expect(stream).toStrictEqual(tokens); // expect(stream).toStrictEqual(tokens);
// }); // }
);
// it('lexes all set join operations', () => { it.todo('lexes all set join operations'
// , () => {
// const { latl, tokens } = assertionData.setOperationsJoin; // const { latl, tokens } = assertionData.setOperationsJoin;
// const stream = getStream(latl); // const stream = getStream(latl);
// expect(stream).toStrictEqual(tokens); // expect(stream).toStrictEqual(tokens);
// }); // }
);
// it('lexes set filter, concat, and dissoc operations', () => { it.todo('lexes set filter, concat, and dissoc operations'
// , () => {
// const { latl, tokens } = assertionData.setOperations; // const { latl, tokens } = assertionData.setOperations;
// const stream = getStream(latl); // const stream = getStream(latl);
// expect(stream).toStrictEqual(tokens); // expect(stream).toStrictEqual(tokens);
// }) // }
)
}) })
// { // {