Compare commits

..

No commits in common. "master" and "sj_refactor_test" have entirely different histories.

44 changed files with 117 additions and 4187 deletions

21
LICENSE
View file

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021 Sorrel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -14,12 +14,6 @@ Features:
- multi-character phone support - multi-character phone support
- comparative runs for multiple rule sets - comparative runs for multiple rule sets
## What is LATL?
[Read the specification](/src/utils/latl/README.md)
LATL is a JavaScript targeting compiled language for doing linguistic analysis and transformations.
## How do I use FCA? ## How do I use FCA?
An FCA run requires the user to define three parameters: An FCA run requires the user to define three parameters:

View file

36
package-lock.json generated
View file

@ -4857,11 +4857,6 @@
} }
} }
}, },
"discontinuous-range": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/discontinuous-range/-/discontinuous-range-1.0.0.tgz",
"integrity": "sha1-44Mx8IRLukm5qctxx3FYWqsbxlo="
},
"dns-equal": { "dns-equal": {
"version": "1.0.0", "version": "1.0.0",
"resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz",
@ -10015,11 +10010,6 @@
} }
} }
}, },
"moo": {
"version": "0.5.1",
"resolved": "https://registry.npmjs.org/moo/-/moo-0.5.1.tgz",
"integrity": "sha512-I1mnb5xn4fO80BH9BLcF0yLypy2UKl+Cb01Fu0hJRkJjlCRtxZMWkTdAtDd5ZqCOxtCkhmRwyI57vWT+1iZ67w=="
},
"move-concurrently": { "move-concurrently": {
"version": "1.0.1", "version": "1.0.1",
"resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz", "resolved": "https://registry.npmjs.org/move-concurrently/-/move-concurrently-1.0.1.tgz",
@ -10092,18 +10082,6 @@
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
"integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=" "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc="
}, },
"nearley": {
"version": "2.19.1",
"resolved": "https://registry.npmjs.org/nearley/-/nearley-2.19.1.tgz",
"integrity": "sha512-xq47GIUGXxU9vQg7g/y1o1xuKnkO7ev4nRWqftmQrLkfnE/FjRqDaGOUakM8XHPn/6pW3bGjU2wgoJyId90rqg==",
"requires": {
"commander": "^2.19.0",
"moo": "^0.5.0",
"railroad-diagrams": "^1.0.0",
"randexp": "0.4.6",
"semver": "^5.4.1"
}
},
"negotiator": { "negotiator": {
"version": "0.6.2", "version": "0.6.2",
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz",
@ -12191,20 +12169,6 @@
"performance-now": "^2.1.0" "performance-now": "^2.1.0"
} }
}, },
"railroad-diagrams": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/railroad-diagrams/-/railroad-diagrams-1.0.0.tgz",
"integrity": "sha1-635iZ1SN3t+4mcG5Dlc3RVnN234="
},
"randexp": {
"version": "0.4.6",
"resolved": "https://registry.npmjs.org/randexp/-/randexp-0.4.6.tgz",
"integrity": "sha512-80WNmd9DA0tmZrw9qQa62GPPWfuXJknrmVmLcxvq4uZBdYqb1wYoKTmnlGUchvVWe0XiLupYkBoXVOxz3C8DYQ==",
"requires": {
"discontinuous-range": "1.0.0",
"ret": "~0.1.10"
}
},
"randombytes": { "randombytes": {
"version": "2.1.0", "version": "2.1.0",
"resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",

View file

@ -7,8 +7,6 @@
"flow-bin": "^0.113.0", "flow-bin": "^0.113.0",
"gh-pages": "^2.2.0", "gh-pages": "^2.2.0",
"local-storage": "^2.0.0", "local-storage": "^2.0.0",
"moo": "^0.5.1",
"nearley": "^2.19.1",
"node-sass": "^4.13.1", "node-sass": "^4.13.1",
"react": "^16.12.0", "react": "^16.12.0",
"react-dom": "^16.12.0", "react-dom": "^16.12.0",
@ -17,8 +15,6 @@
}, },
"scripts": { "scripts": {
"start": "react-scripts start", "start": "react-scripts start",
"compile-grammar": "nearleyc src/utils/latl/grammar.ne -o src/utils/latl/grammar.js",
"test-grammar": "nearley-test src/utils/latl/grammar.js --input",
"flow": "flow", "flow": "flow",
"build": "react-scripts build", "build": "react-scripts build",
"test": "react-scripts test", "test": "react-scripts test",

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 152 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 148 KiB

Binary file not shown.

Binary file not shown.

View file

@ -1,88 +0,0 @@
set NASAL_PULMONIC_CONSONANTS = [ m̥, m, ɱ, n̼, n̥, n, ɳ̊, ɳ, ɲ̊, ɲ, ŋ, ̊ŋ, ɴ ],
STOP_PULMONIC_CONSONANTS = [ p, b, p̪, b̪, t̼, d̼, t, d, ʈ, ɖ, c, ɟ, k, ɡ, q, ɢ, ʡ, ʔ ],
S_FRICATIVE_PULMONIC_CONSONANTS = [ s, z, ʃ, ʒ, ʂ, ʐ, ɕ, ʑ ],
FRICATIVE_PULMONIC_CONSONANTS = [ ɸ, β, f, v, θ̼, ð̼, θ, ð, θ̠, ð̠, ɹ̠̊˔, ɹ̠˔, ɻ˔, ç, ʝ, x, ɣ, χ, ʁ, ħ, ʕ, h, ɦ ],
APPROXIMANT_PULMONIC_CONSONANTS = [ ʋ̥, ʋ, ɹ̥, ɹ, ɻ̊, ɻ, j̊, j, ɰ̊, ɰ, ʔ̞ ],
TAP_PULMONIC_CONSONANTS = [ ⱱ̟, ⱱ, ɾ̼, ɾ̥, ɾ, ɽ̊, ɽ, ɢ̆, ʡ̆ ],
TRILL_PULMONIC_CONSONANTS = [ ʙ̥, ʙ, r̥, r, ɽ̊r̥, ɽr, ʀ̥, ʀ, ʜ, ʢ ],
L_FRICATIVE_PULMONIC_CONSONANTS = [ ɬ, ɮ, ɭ̊˔, ɭ˔, ʎ̝̊, ʎ̝, ʟ̝̊, ʟ̝ ],
L_APPROXIMANT_PULMONIC_CONSONANTS = [ l̥, l, ɭ̊, ɭ, ʎ̥, ʎ, ʟ̥, ʟ, ʟ̠ ],
L_TAP_PULMONIC_CONSONANTS = [ ɺ, ɭ̆, ʎ̆, ʟ̆ ],
AFFRICATE_PULMONIC_CONSONANTS = [ pɸ, bβ, p̪f, b̪v, t̪θ, d̪ð, tɹ̝̊, dɹ̝, t̠ɹ̠̊˔, d̠ɹ̠˔, cç, ɟʝ, kx, ɡɣ, qχ, ʡʢ, ʔh ],
S_AFFRICATE_PULMONIC_CONSONANTS = [ ts, dz, t̠ʃ, d̠ʒ, ʈʂ, ɖʐ, tɕ, dʑ ],
L_AFFRICATE_PULMONIC_CONSONANTS = [ tɬ, dɮ, ʈɭ̊˔, cʎ̝̊, kʟ̝̊, ɡʟ̝ ],
DOUBLE_STOP_PULMONIC_CONSONANTS = [ t͡p, d͡b, k͡p, ɡ͡b, q͡ʡ ],
DOUBLE_NASAL_PULMONIC_CONSONANTS = [ n͡m, ŋ͡m ],
DOUBLE_FRICATIVE_PULMONIC_CONSONANTS = [ ɧ ],
DOUBLE_APPROXIMANT_PULMONIC_CONSONANTS = [ ʍ, w, ɥ̊, ɥ, ɫ ]
set PULMONIC_CONSONANTS, C = { NASAL_PULMONIC_CONSONANTS or STOP_PULMONIC_CONSONANTS
or S_FRICATIVE_PULMONIC_CONSONANTS or FRICATIVE_PULMONIC_CONSONANTS
or APPROXIMANT_PULMONIC_CONSONANTS or TAP_PULMONIC_CONSONANTS
or TRILL_PULMONIC_CONSONANTS or L_FRICATIVE_PULMONIC_CONSONANTS
or L_APPROXIMANT_PULMONIC_CONSONANTS or L_TAP_PULMONIC_CONSONANTS
or AFFRICATE_PULMONIC_CONSONANTS or S_AFFRICATE_PULMONIC_CONSONANTS
or L_AFFRICATE_PULMONIC_CONSONANTS or DOUBLE_STOP_PULMONIC_CONSONANTS
or DOUBLE_NASAL_PULMONIC_CONSONANTS or DOUBLE_FRICATIVE_PULMONIC_CONSONANTS
or DOUBLE_APPROXIMANT_PULMONIC_CONSONANTS
}
set STOP_EJECTIVE_CONSONANTS = [ pʼ, tʼ, ʈʼ, cʼ, kʼ, qʼ, ʡʼ ],
FRICATIVE_EJECTIVE_CONSONANTS = [ ɸʼ, fʼ, θʼ, sʼ, ʃʼ, ʂʼ, ɕʼ, xʼ, χʼ ],
L_FRICATIVE_EJECTIVE_CONSONANTS = [ ɬʼ ],
AFFRICATE_EJECTIVE_CONSONANTS = [ tsʼ, t̠ʃʼ, ʈʂʼ, kxʼ, qχʼ ],
L_AFFRICATE_EJECTIVE_CONSONANTS = [ tɬʼ, cʎ̝̊ʼ, kʟ̝̊ʼ ]
set EJECTIVE_CONSONANTS = { STOP_EJECTIVE_CONSONANTS or FRICATIVE_EJECTIVE_CONSONANTS
or L_FRICATIVE_EJECTIVE_CONSONANTS or AFFRICATE_EJECTIVE_CONSONANTS
or L_AFFRICATE_EJECTIVE_CONSONANTS
}
set TENUIS_CLICK_CONSONANTS = [ ʘ, ǀ, ǃ, ǂ ],
VOICED_CLICK_CONSONANTS = [ ʘ̬, ǀ̬, ǃ̬, ǂ̬ ],
NASAL_CLICK_CONSONANTS = [ ʘ̃, ǀ̃, ǃ̃, ǂ̃ ],
L_CLICK_CONSONANTS = [ ǁ, ǁ̬ ]
set CLICK_CONSONANTS = { TENUIS_CLICK_CONSONANTS or VOICED_CLICK_CONSONANTS
or NASAL_CLICK_CONSONANTS or L_CLICK_CONSONANTS
}
set IMPLOSIVE_CONSONANTS = [ ɓ, ɗ, ᶑ, ʄ, ɠ, ʛ, ɓ̥, ɗ̥, ᶑ̊, ʄ̊, ɠ̊, ʛ̥ ]
set NON_PULMONIC_CONSONANTS = { EJECTIVE_CONSONANTS or CLICK_CONSONANTS or IMPLOSIVE_CONSONANTS }
set CONSONANTS = { PULMONIC_CONSONANTS or NON_PULMONIC_CONSONANTS }
set MODAL_VOWELS = [ i, y, ɨ, ʉ, ɯ, u, ɪ, ʏ, ʊ, e, ø ɘ, ɵ ɤ, o, ø̞ ə, o̞, ɛ, œ ɜ, ɞ ʌ, ɔ, æ, ɐ, a, ɶ, ä, ɑ, ɒ ],
BREATHY_VOWELS = { [ V ] in MODAL_VOWELS yield [ V̤ ] },
VOICELESS_VOWELS = { [ V ] in MODAL_VOWELS yield [ V̥ ] },
CREAKY_VOWELS = { [ V ] in MODAL_VOWELS yield [ V̰ ] }
set SHORT_ORAL_VOWELS = { MODAL_VOWELS or BREATHY_VOWELS or CREAKY_VOWELS or VOICELESS_VOWELS },
LONG_ORAL_VOWELS = { [ V ] in SHORT_ORAL_VOWELS [ Vː ] },
ORAL_VOWELS = { SHORT_ORAL_VOWELS or LONG_ORAL_VOWELS }
set NASAL_VOWELS = { [ V ] in ORAL_VOWELS yield [ Ṽ ] },
SHORT_NASAL_VOWELS = { [ Vː ] in NASAL_VOWELS yield [ V ]ː },
LONG_NASAL_VOWELS = { [ Vː ] in NASAL_VOWELS }
set VOWELS = { ORAL_VOWELS or NASAL_VOWELS }
set PHONES = { VOWELS or CONSONANTS }
; print [ GLOBAL ]
[lateral
+=
L_AFFRICATE_EJECTIVE_CONSONANTS, L_AFFRICATE_PULMONIC_CONSONANTS, L_APPROXIMANT_PULMONIC_CONSONANTS,
L_CLICK_CONSONANTS, L_FRICATIVE_EJECTIVE_CONSONANTS, L_FRICATIVE_PULMONIC_CONSONANTS, L_TAP_PULMONIC_CONSONANTS
-=
{ not { [+ lateral ] in CONSONANTS } }, VOWELS
; alternative
; { not { [+ lateral ] in PHONES } }
]
*proto-lang
|child-lang

View file

@ -1,644 +0,0 @@
; -------- GA ENGLISH PHONETIC INVENTORY
; ---- VOWELS = æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟
; -- NASAL = æ̃ / ẽ / ə̃ / ɑ̃ / ɔ̃ / ɪ̃ / ɛ̃ / ʌ̃ / ʊ̃ / ĩ / ũ
; ɪ̞ / ʊ̞ = lowered
; u̟ = advanced
; -- LABIAL = u̟ / ʊ̞ / ɔ
; -- +HIGH = i / u̟ / ʊ̞ / ɪ̞
; -- -HIGH = ɑ / æ / e / ə / ɛ / ʌ
; -- +LOW = ɑ / æ / ɛ
; -- -LOW = i / u̟ / ʊ̞ / ɪ̞ / e / ə / ʌ
; -- +BACK = ɑ / ɔ / ʌ / ʊ̞ / u̟
; -- -BACK = æ / e / ə / ɪ̞ / ɛ / i
; -- +TENSE = e / i / u̟ / ɑ
; -- -TENSE = æ / ə / ɪ̞ / ɛ / ʌ / ʊ̞ / ɔ
; ---- DIPHTHONGS = eə / eɪ̯ / ju̟ / äɪ̞ / ɔɪ̞ / oʊ̞ / aʊ̞ / ɑɹ / iɹ / ɛɹ / ɔɹ / ʊɹ
; ---- CONSONANTS = p (pʰ) / b (b̥) / t (tʰ)(ɾ)(ʔ) / d (d̥)(ɾ) / tʃ / dʒ (d̥ʒ̊) / k (kʰ) / g (g̊) / f / v (v̥) / θ / ð (ð̥) /
; s / z (z̥) / ʃ / ʒ (ʒ̊) / h (ɦ)(ç) / m (ɱ)(m̩) / n(n̩) / ŋ / l (l̩)/ ɹ (ɹʲ ~ ɹˤ)(ɹ̩) / w (w̥) / j / x / ʔ
; -- PLOSIVES = p / p' / pʰ / t / t' / tʰ ɾ / k / k' / kʰ
; -- AFFRICATES = tʃ / dʒ
; -- FRICATIVES = f / v / θ / ð / s / z / ʃ / ʒ / ç / x
; -- NASAL OBSTRUENTS = m ɱ / n / ŋ
; -- LIQUIDS = l
; -- RHOTIC LIQUIDS = ɹ ɹʲ ɹˤ
; -- SYLLABIC CONSONANTS = m̩ / n̩ / l̩ / ɹ̩
; -- GLIDES = j / w
; -- LARYNGEALS = h ɦ / ʔ [- consonantal sonorant +/- LARYNGEAL FEATURES] only
; -------- distinctive groups
set PLOSIVES = [ p, pʰ, t, tʼ, tʰ, ɾ, kʼ, k, kʰ ]
AFFRICATES = [ tʃʰ, dʒ ]
FRICATIVES = [ f, v, θ, ð, s, z, ʃ, ʒ, ç, x ]
NASALS = [ m, ɱ, n, ŋ ]
LIQUIDS = [ l, ɹ, ɹʲ, ɹˤ ]
SYLLABICS = [ m̩, n̩, l̩, ɹ̩ ]
VOWELS = [ æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟ ]
GLIDES = [ j, w ]
LARYNGEALS = [ h, ɦ, ʔ ]
VOWELS = [ æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟ ]
; ---- implicit
; GLOBAL { all sets }
; ---- set join operations non-mutable!
; { SET_A not SET_B } left anti join
; { SET_A and SET_B } inner join
; { SET_A or SET_B } full outer join
; { not SET_A } = { GLOBAL not SET_A }
; ---- unnecessary sugar
; { not SET_A nor SET_B } = { GLOBAL not { SET_A or SET_B } }
; ---- set character operations - non-mutable!
; { [ Xy ] in SET_A } FILTER: where X is any character and y is a filtering character
; { SET_A yield [ Xy ] } CONCATENATE: performs transformation with (prepended or) appended character
; { SET_A yield [ X concat y ] }
; { SET_A yield [ y concat X ] }
; { SET_A yield y[ X ] } DISSOCIATE: performs transformation removing prepended (or appended) character
; { SET_A yield y dissoc [ X ] }
; { SET_A yield [ X ] dissoc y }
; { [ Xy ] in SET_A yield [ X ]y } combined FILTER and DISSOCIATE
; ---- TENTATIVE!
; ---- set feature operations - non-mutable!
; { [ + feature1 - feature2 ] in SET_A } FILTER: where feature1 and feature2 are filtering features
; { SET_A yield [ X + feature1 ] } TRANSFORMATION: performs transformation with (prepended or) appended character
; { SET_A yield [ X - feature1 ] }
; { SET_A yield [ X - feature1 + feature2 ] }
; { [ X + feature1 - feature2 ] in SET_A yield [ - feature1 + feature2 ] } combined FILTER and TRANSFORMATION
; ---- MAPPING
set PLOSIVES = [ p, t, k ],
FRICATIVES = [ f, s, x ],
; pairs PLOSIVES with FRICATIVES that have matching features = [ pf, ts, kx ]
AFFRICATES = { PLOSIVES yield [ X concat { [ [ X ] - fricative ] in FRICATIVES } ] }
; ---- example with join, character, and feature operations
; set SET_C = { [ PHONE +feature1 ] in { SET_A or SET_B } yield [ PHONE concat y ] }
; -------- main class features
[consonantal
+=
PLOSIVES, AFFRICATES, FRICATIVES, NASALS, LIQUIDS, SYLLABICS
-=
VOWELS, GLIDES, LARYNGEALS
]
[sonorant
+=
VOWELS, GLIDES, LIQUIDS, NASALS, SYLLABICS
-=
PLOSIVES, AFFRICATES, FRICATIVES, LARYNGEALS
]
[approximant
+=
VOWELS, LIQUIDS, GLIDES,
; SYLLABIC LIQUIDS
l̩, ɹ̩
-=
PLOSIVES, AFFRICATES, FRICATIVES, NASALS,
; SYLLABIC NASALS
m̩, n̩
]
; -------- laryngeal features
[voice
+=
VOWELS, GLIDES, LIQUIDS, NASALS, SYLLABICS,
; VOICED FRICATIVES
v, ð, z, ʒ,
; VOICED AFFRICATES
dʒ,
; VOICED LARYNGEALS
ɦ
-=
PLOSIVES,
; VOICELESS AFFRICATES
tʃ,
; VOICELESS FRICATIVES
f, θ, s, ʃ, ç, x,
; VOICELESS LARYNGEALS
h, ʔ
]
[spreadGlottis
+=
; ASPIRATED PLOSIVES
pʰ, tʰ, kʰ,
; ASPIRATED AFFRICATES
; SPREAD LARYNGEALS
h ɦ
-=
VOWELS, FRICATIVES, NASALS, LIQUIDS, SYLLABICS, GLIDES,
; UNASPIRATED PLOSIVES
p, pʼ, t, tʼ, ɾ, k, kʼ,
; UNASPIRATED AFFRICATES
tʃ, dʒ,
; CONSTRICTED LARYNGEALS
ʔ
]
[constrictedGlottis
+=
; LARYNGEALIZED RHOTIC
ɹˤ,
; CONSTRICTED LARYNGEAL
ʔ,
; EJECTIVE PLOSIVES
pʼ, tʼ, kʼ
-=
VOWELS, AFFRICATES, FRICATIVES, NASALS, SYLLABICS, GLIDES,
; UNCONSTRICTED PLOSIVES
{ PLOSIVES not [ p', t', k' ] },
; NON-CONSTRICTED LIQUIDS
l, ɹ ɹʲ,
; SPREAD LARYNGEALS
h ɦ,
]
; -------- manner features
[continuant
+=
; FRICATIVES
f, v, θ, ð, s, z, ʃ, ʒ, ç, x,
; VOWELS
æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟, æ̃, ẽ, ə̃, ɑ̃, ɔ̃, ɪ̃, ɛ̃, ʌ̃, ʊ̃, ĩ, ũ
; LIQUIDS + RHOTICS
l, ɹ ɹʲ ɹˤ,
; GLIDES
j, w,
; SYLLABIC LIQUIDS
l̩, ɹ̩,
; TAPS
ɾ
-=
; NON-TAP PLOSIVES
p, pʼ, pʰ, t, tʼ, tʰ, k, kʼ, kʰ,
; AFFRICATES
tʃ, dʒ,
; NASALS
m ɱ, n, ŋ,
; SYLLABIC NASALS
m̩, n̩
]
[nasal
+=
; NASALS
m ɱ, n, ŋ,
; SYLLABIC NASALS
m̩, n̩
-=
; VOWELS
æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟, æ̃, ẽ, ə̃, ɑ̃, ɔ̃, ɪ̃, ɛ̃, ʌ̃, ʊ̃, ĩ, ũ
; FRICATIVES
f, v, θ, ð, s, z, ʃ, ʒ, ç, x,
; LIQUIDS + RHOTICS
l, ɹ ɹʲ ɹˤ,
; GLIDES
j, w,
; SYLLABIC LIQUIDS
l̩, ɹ̩,
; PLOSIVES
p, pʼ, pʰ, t, tʼ, tʰ ɾ, k, kʼ, kʰ,
; AFFRICATES
tʃ, dʒ,
]
[strident
+=
; STRIDENT FRICATIVES
f, v, s, z, ʃ, ʒ,
; STRIDENT AFFRICATES
tʃ, dʒ
-=
; VOWELS
æ̃, ẽ, ə̃, ɑ̃, ɔ̃, ɪ̃, ɛ̃, ʌ̃, ʊ̃, ĩ, ũ
; PLOSIVES
p, pʼ, pʰ, t, tʼ, tʰ ɾ, k, kʼ, kʰ,
; NON-STRIDENT FRICATIVES
θ, ð, ç, x,
; NASAL OBSTRUENTS
m ɱ, n, ŋ,
; RHOTICS + LIQUIDS
l, ɹ ɹʲ ɹˤ,
; SYLLABIC CONSONANTS
m̩, n̩, l̩, ɹ̩,
; GLIDES
j, w
]
[lateral
+=
; LATERAL LIQUIDS
l,
; SYLLABIC LATERALS,
-=
; VOWELS
æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟, æ̃, ẽ, ə̃, ɑ̃, ɔ̃, ɪ̃, ɛ̃, ʌ̃, ʊ̃, ĩ, ũ
; PLOSIVES
p, pʼ, pʰ, t, tʼ, tʰ ɾ, k, kʼ, kʰ
; AFFRICATES
tʃ, dʒ
; FRICATIVES
f, v, θ, ð, s, z, ʃ, ʒ, ç, x
; NASAL OBSTRUENTS
m ɱ, n, ŋ
; RHOTIC LIQUIDS
ɹ ɹʲ ɹˤ
; NON-LIQUID SYLLABIC CONSONANTS
m̩, n̩, ɹ̩
; GLIDES
j, w
]
; -------- ---- PLACE features
; -------- labial features
[labial
+=
; ROUNDED VOWELS
u̟, ʊ̞, ɔ, ʊ̃, ũ, ɔ̃
; LABIAL PLOSIVES
p, pʼ, pʰ,
; LABIAL FRICATIVES
f, v,
; LABIAL NASALS
m ɱ,
; LABIAL SYLLABIC CONSONANTS
m̩,
; LABIAL GLIDES
w
-=
; UNROUNDED VOWELS
æ, e, ə, ɑ, ɪ̞, ɛ, ʌ, i, æ̃, ẽ, ə̃, ɑ̃, ɪ̃, ɛ̃, ʌ̃, ĩ,
; NON-LABIAL PLOSIVES
t, tʼ, tʰ ɾ, k, kʼ, kʰ,
; NON-LABIAL AFFRICATES
tʃ, dʒ,
; NON-LABIAL FRICATIVES
θ, ð, s, z, ʃ, ʒ, ç, x,
; NON-LABIAL NASAL OBSTRUENTS
n, ŋ,
; LIQUIDS
l,
; RHOTIC LIQUIDS
ɹ ɹʲ ɹˤ,
; NON-LABIAL SYLLABIC CONSONANTS
n̩, l̩, ɹ̩,
; NON-LABIAL GLIDES
j
]
; -------- coronal features
[coronal
+=
; CORONAL PLOSIVES
t, tʼ, tʰ ɾ,
; CORONAL AFFRICATES
tʃ, dʒ,
; CORONAL FRICATIVES
θ, ð, s, z, ʃ, ʒ,
; CORONAL NASALS
n,
; CORONAL LIQUIDS
l
; CORONAL RHOTIC LIQUIDS
ɹ
; CORONAL SYLLABIC CONSONANTS
n̩, l̩, ɹ̩
-=
; VOWELS
æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟, æ̃, ẽ, ə̃, ɑ̃, ɔ̃, ɪ̃, ɛ̃, ʌ̃, ʊ̃, ĩ, ũ
; NON-CORONAL PLOSIVES
p, pʼ, pʰ, k, kʼ, kʰ
; NON-CORONAL FRICATIVES
f, v, ç, x
; NON-CORONAL NASAL OBSTRUENTS
m ɱ, ŋ
; NON-CORONAL RHOTIC LIQUIDS
ɹʲ ɹˤ
; NON-CORONAL SYLLABIC CONSONANTS
m̩,
; NON-CORONAL GLIDES
j, w
]
[anterior
+=
; ALVEOLAR PLOSIVES
t, tʼ, tʰ ɾ,
; ALVEOLAR AFFRICATES
tʃ, dʒ,
; DENTAL FRICATIVES
θ, ð,
; ALVEOLAR FRICATIVES
s, z,
; ALVEOLAR NASALS
n,
; ALVEOLAR LIQUIDS
l
; ALVEOLAR SYLLABIC CONSONANTS
n̩, l̩,
-=
; POSTALVEOLAR FRICATIVES
ʃ, ʒ,
; POSTALVEOLAR RHOTIC LIQUIDS
ɹ,
; POSTALVEOLAR SYLLABIC CONSONANTS
ɹ̩,
; -- NON-CORONALs
; VOWELS
æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟, æ̃, ẽ, ə̃, ɑ̃, ɔ̃, ɪ̃, ɛ̃, ʌ̃, ʊ̃, ĩ, ũ
; NON-CORONAL PLOSIVES
p, pʼ, pʰ, k, kʼ, kʰ
; NON-CORONAL FRICATIVES
f, v, ç, x
; NON-CORONAL NASAL OBSTRUENTS
m ɱ, ŋ
; NON-CORONAL RHOTIC LIQUIDS
ɹʲ ɹˤ
; NON-CORONAL SYLLABIC CONSONANTS
m̩,
; NON-CORONAL GLIDES
j, w
]
[distributed
+=
; DENTAL FRICATIVES
θ, ð,
; POSTALVEOLAR FRICATIVES
ʃ, ʒ,
; POSTALVEOLAR RHOTIC LIQUIDS
ɹ,
; POSTALVEOLAR SYLLABIC CONSONANTS
ɹ̩,
-=
; apical, retroflex
; ALVEOLAR PLOSIVES
t, tʼ, tʰ ɾ,
; ALVEOLAR FRICATIVES
s, z,
; ALVEOLAR NASALS
n,
; ALVEOLAR LIQUIDS
l
; ALVEOLAR SYLLABIC CONSONANTS
n̩, l̩,
; -- NON-CORONALS
; VOWELS
æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟, æ̃, ẽ, ə̃, ɑ̃, ɔ̃, ɪ̃, ɛ̃, ʌ̃, ʊ̃, ĩ, ũ
; NON-CORONAL PLOSIVES
p, pʼ, pʰ, k, kʼ, kʰ
; NON-CORONAL FRICATIVES
f, v, ç, x
; NON-CORONAL NASAL OBSTRUENTS
m ɱ, ŋ
; NON-CORONAL RHOTIC LIQUIDS
ɹʲ ɹˤ
; NON-CORONAL SYLLABIC CONSONANTS
m̩,
; NON-CORONAL GLIDES
j, w
]
; -------- dorsal features
[dorsal
+=
; VOWELS
æ, e, ə, ɑ, ɔ, ɪ̞, ɛ, ʌ, ʊ̞, i, u̟, æ̃, ẽ, ə̃, ɑ̃, ɔ̃, ɪ̃, ɛ̃, ʌ̃, ʊ̃, ĩ, ũ
; DORSAL PLOSIVES
k, kʼ, kʰ,
; DORSAL FRICATIVES
ç, x,
; DORSAL NASAL OBSTRUENTS
ŋ,
; DORSAL RHOTIC LIQUIDS
ɹʲ ɹˤ
; DORSAL GLIDES
j
-=
; NON-DORSAL PLOSIVES
p, pʼ, pʰ, t, tʼ, tʰ ɾ,
; NON-DORSAL AFFRICATES
tʃ, dʒ,
; NON-DORSAL FRICATIVES
f, v, θ, ð, s, z, ʃ, ʒ,
; NON-DORSAL NASALS
m ɱ, n,
; NON-DORSAL LIQUIDS
l
; NON-DORSAL RHOTIC LIQUIDS
ɹ
; NON-DORSAL SYLLABIC CONSONANTS
m̩, n̩, l̩, ɹ̩
; NON-DORSAL GLIDES
w
]
[high
+=
; HIGH VOWELS
i, u̟, ʊ̞, ɪ̞, ĩ, ũ, ʊ̃, ɪ̃
; HIGH DORSAL PLOSIVES
k, kʼ, kʰ,
; HIGH DORSAL FRICATIVES
ç, x,
; HIGH DORSAL NASAL OBSTRUENTS
ŋ,
; HIGH RHOTIC LIQUIDS
ɹʲ
; HIGH DORSAL GLIDES
j, w
-= χ, e, o, a
; NON-HIGH VOWELS
ɑ, æ, e, ə, ɛ, ʌ, æ̃, ẽ, ə̃, ɑ̃, ɔ̃, ɛ̃, ʌ̃,
; NON-HIGH RHOTIC LIQUIDS
ɹˤ
; -- NON-DORSALS
; NON-DORSAL PLOSIVES
p, pʼ, pʰ, t, tʼ, tʰ ɾ,
; NON-DORSAL AFFRICATES
tʃ, dʒ,
; NON-DORSAL FRICATIVES
f, v, θ, ð, s, z, ʃ, ʒ,
; NON-DORSAL NASALS
m ɱ, n,
; NON-DORSAL LIQUIDS
l
; NON-DORSAL RHOTIC LIQUIDS
ɹ
; NON-DORSAL SYLLABIC CONSONANTS
m̩, n̩, l̩, ɹ̩
; NON-DORSAL GLIDES
w
]
[low
+=
; LOW VOWELS
ɑ, æ, ɛ, æ̃, ɑ̃, ɛ̃,
; LOW DORSAL RHOTIC LIQUIDS
ɹˤ
-= a, ɛ, ɔ
; NON-LOW VOWELS
i, u̟, ʊ̞, ɪ̞, e, ə, ʌ, ẽ, ə̃, ɔ̃, ɪ̃, ʌ̃, ʊ̃, ĩ, ũ
; NON-LOW DORSAL PLOSIVES
k, kʼ, kʰ,
; NON-LOW DORSAL FRICATIVES
ç, x,
; NON-LOW DORSAL NASAL OBSTRUENTS
ŋ,
; NON-LOW DORSAL RHOTIC LIQUIDS
ɹʲ
; DORSAL GLIDES
j
; -- NON-DORSALS
; NON-DORSAL PLOSIVES
p, pʼ, pʰ, t, tʼ, tʰ ɾ,
; NON-DORSAL AFFRICATES
tʃ, dʒ,
; NON-DORSAL FRICATIVES
f, v, θ, ð, s, z, ʃ, ʒ,
; NON-DORSAL NASALS
m ɱ, n,
; NON-DORSAL LIQUIDS
l
; NON-DORSAL RHOTIC LIQUIDS
ɹ
; NON-DORSAL SYLLABIC CONSONANTS
m̩, n̩, l̩, ɹ̩
; NON-DORSAL GLIDES
w
]
[back
+=
; k, kʼ, ɣ, χ, u, ə, o, ʌ, ɑ
; BACK VOWELS
ɑ, ɔ, ʌ, ʊ̞, u̟, ɑ̃, ɔ̃, ʌ̃, ʊ̃, ũ,
; BACK DORSAL PLOSIVES
k, kʼ, kʰ,
; BACK DORSAL FRICATIVES
x,
; BACK DORSAL NASAL OBSTRUENTS
ŋ,
; BACK DORSAL RHOTIC LIQUIDS
ɹˤ
-= ç, k̟, i, y, ø, ɛ
; NON-BACK DORSAL FRICATIVES
ç,
; NON-BACK DORSAL RHOTIC LIQUIDS
ɹʲ
; NON-BACK DORSAL GLIDES
j
; NON-BACK VOWELS
æ, e, ə, ɪ̞, ɛ, i, æ̃, ẽ, ə̃, ɪ̃, ɛ̃, ĩ
; -- NON-DORSALS
; NON-DORSAL PLOSIVES
p, pʼ, pʰ, t, tʼ, tʰ ɾ,
; NON-DORSAL AFFRICATES
tʃ, dʒ,
; NON-DORSAL FRICATIVES
f, v, θ, ð, s, z, ʃ, ʒ,
; NON-DORSAL NASALS
m ɱ, n,
; NON-DORSAL LIQUIDS
l
; NON-DORSAL RHOTIC LIQUIDS
ɹ
; NON-DORSAL SYLLABIC CONSONANTS
m̩, n̩, l̩, ɹ̩
; NON-DORSAL GLIDES
w
]
[tense ; compare to ATR or RTR
+=
; TENSE VOWELS
e, i, u̟, ɑ, ĩ, ũ, ẽ, ɑ̃,
-=
; NON-TENSE VOWELS
æ, ə, ɪ̞, ɛ, ʌ, ʊ̞, ɔ, æ̃, ə̃, ɔ̃, ɪ̃, ɛ̃, ʌ̃, ʊ̃,
; DORSAL PLOSIVES
k, kʼ, kʰ,
; DORSAL FRICATIVES
ç, x,
; DORSAL NASAL OBSTRUENTS
ŋ,
; DORSAL RHOTIC LIQUIDS
ɹʲ ɹˤ,
; DORSAL GLIDES
j
; -- NON-DORSALS
; NON-DORSAL PLOSIVES
p, pʼ, pʰ, t, tʼ, tʰ ɾ,
; NON-DORSAL AFFRICATES
tʃ, dʒ,
; NON-DORSAL FRICATIVES
f, v, θ, ð, s, z, ʃ, ʒ,
; NON-DORSAL NASALS
m ɱ, n,
; NON-DORSAL LIQUIDS
l
; NON-DORSAL RHOTIC LIQUIDS
ɹ
; NON-DORSAL SYLLABIC CONSONANTS
m̩, n̩, l̩, ɹ̩
; NON-DORSAL GLIDES
w
]
*PROTO
|Gif Lang
*PROTO
|Jif Lang
; -- Devoicing, all our z's become s's
[ + voice consonantal - nasal]>[- voice]/._.
; -- loss of schwa, the is th'
ə>0/._.
; -- Ejectivization, all our pits become pit's
[+ spreadGlottis - continuant]>[+ constrictedGlottis - spreadGlottis]/._[+ constrictedGlottis]
[+ spreadGlottis - continuant]>[+ constrictedGlottis - spreadGlottis]/[+ constrictedGlottis]_.
[+ constrictedGlottis]>0/[+ constrictedGlottis - continuant]_.
[+ constrictedGlottis]>0/._[+ constrictedGlottis - continuant]
; -- r color spreading, all our reports become rihpahts
[- consonantal tense]>[+ tense]/ɹ_.
[- consonantal tense]>[+ tense]/._ɹ
[- consonantal high]>[+ high]/ɹʲ_.
[- consonantal high]>[+ high]/._ɹʲ
[- consonantal back]>[+ back]/ɹˤ_.
[- consonantal back]>[+ back]/._ɹˤ
ɹ>0/._.
ɹʲ>0/._.
ɹˤ>0/._.
; -- Deaspiration, tiff is diff and diff is tiff
[+ spreadGlottis - continuant]>[- spreadGlottis]/._.
; "JavaScript"
; "gif or jif? I say zhaif"
; "This request returns an empty object"
; "I love going to waffle js!"
; "A donut a day makes living with the threat of pandemic easier"

View file

@ -13,15 +13,15 @@ import Latl from './components/Latl';
import LatlOutput from './components/LatlOutput'; import LatlOutput from './components/LatlOutput';
import { stateReducer } from './reducers/reducer'; import { stateReducer } from './reducers/reducer';
import { clearState, waffleState } from './reducers/reducer.init'; import { initState } from './reducers/reducer.init';
const PhonoChangeApplier = () => { const PhonoChangeApplier = () => {
const [ state, dispatch ] = useReducer( const [ state, dispatch ] = useReducer(
stateReducer, stateReducer,
{}, {},
waffleState initState
) )
const { lexicon, phones, phonemes, epochs, options, features, results, errors, latl, parseResults } = state; const { lexicon, phones, phonemes, epochs, options, features, results, errors, latl } = state;
return ( return (
<> <>
@ -30,7 +30,7 @@ const PhonoChangeApplier = () => {
<Link to="/">Back to GUI</Link> <Link to="/">Back to GUI</Link>
<div className="PhonoChangeApplier PhonoChangeApplier--latl"> <div className="PhonoChangeApplier PhonoChangeApplier--latl">
<Latl latl={latl} dispatch={dispatch}/> <Latl latl={latl} dispatch={dispatch}/>
<LatlOutput results={results} options={options} parseResults={parseResults} errors={errors} dispatch={dispatch}/> <LatlOutput results={results} options={options} dispatch={dispatch}/>
</div> </div>
</Route> </Route>

View file

@ -7,12 +7,13 @@ import { render } from 'react-dom';
const Epochs = ({epochs, errors, dispatch}) => { const Epochs = ({epochs, errors, dispatch}) => {
const addEpoch = e => { const addEpoch = e => {
e.preventDefault() e.preventDefault()
let index = epochs.length + 1; let index = epochs.length + 1;
dispatch({ dispatch({
type: 'ADD_EPOCH', type: 'ADD_EPOCH',
value: {name: `epoch ${index}`} value: {name: `Epoch ${index}`}
}) })
} }
@ -47,8 +48,7 @@ const Epochs = ({epochs, errors, dispatch}) => {
} }
const renderEpochs = () => { const renderEpochs = () => {
if (epochs && epochs.length) { if (epochs && epochs.length) return epochs.map((epoch, index) => {
return epochs.map((epoch, index) => {
const epochError = errors.epoch ? errors.error : null const epochError = errors.epoch ? errors.error : null
return ( return (
<div <div
@ -65,7 +65,6 @@ const Epochs = ({epochs, errors, dispatch}) => {
{renderAddEpochButton(index)} {renderAddEpochButton(index)}
</div> </div>
)}); )});
}
return renderAddEpochButton(-1) return renderAddEpochButton(-1)
} }

View file

@ -45,7 +45,7 @@ const Features = ({ phones, features, dispatch }) => {
<li key={`feature__${featureName}`}> <li key={`feature__${featureName}`}>
<span className="feature--names-and-phones"> <span className="feature--names-and-phones">
<span className="feature--feature-name"> <span className="feature--feature-name">
{`[+ ${featureName} ]`} {`[+ ${featureName}]`}
</span> </span>
<span className="feature--feature-phones"> <span className="feature--feature-phones">
{plus} {plus}
@ -53,7 +53,7 @@ const Features = ({ phones, features, dispatch }) => {
</span> </span>
<span className="feature--names-and-phones"> <span className="feature--names-and-phones">
<span className="feature--feature-name"> <span className="feature--feature-name">
{`[- ${featureName} ]`} {`[- ${featureName}]`}
</span> </span>
<span className="feature--feature-phones"> <span className="feature--feature-phones">
{minus} {minus}

View file

@ -5,7 +5,6 @@ div.Features {
li { li {
display: grid; display: grid;
gap: 0.5em;
grid-template-columns: 10fr 10fr 1fr; grid-template-columns: 10fr 10fr 1fr;
margin: 0.5em 0; margin: 0.5em 0;
place-items: center center; place-items: center center;
@ -13,7 +12,6 @@ div.Features {
span.feature--names-and-phones { span.feature--names-and-phones {
display: grid; display: grid;
grid-template-columns: repeat(auto-fit, minmax(100px, 1fr)); grid-template-columns: repeat(auto-fit, minmax(100px, 1fr));
place-items: center center;
} }
span.feature-name { span.feature-name {

View file

@ -3,23 +3,12 @@ import './Latl.scss';
const Latl = ({latl, dispatch}) => { const Latl = ({latl, dispatch}) => {
const { innerWidth, innerHeight } = window; const { innerWidth, innerHeight } = window;
console.log(innerWidth, innerHeight)
const handleChange = e => {
const setLatlAction = {
type: 'SET_LATL',
value: e.target.value
}
dispatch(setLatlAction);
}
return ( return (
<div className="Latl"> <div className="Latl">
<h3>.LATL</h3> <h3>.LATL</h3>
<textarea name="latl" id="latl" <textarea name="latl" id="latl"
value={latl} cols={'' + Math.floor(innerWidth / 15)} rows={'' + Math.floor(innerHeight / 30)}
cols={'' + Math.floor(innerWidth / 15)}
rows={'' + Math.floor(innerHeight / 30)}
onChange={handleChange}
/> />
</div> </div>
); );

View file

@ -1,45 +1,15 @@
import React from 'react'; import React from 'react';
import './LatlOutput.scss'; import './LatlOutput.scss';
import Output from './Output';
const LatlOutput = ({results, options, dispatch, errors, parseResults}) => {
const handleClick = e => dispatchFunc => {
e.preventDefault()
return dispatchFunc();
}
const dispatchClear = () => {
const clearAction = {
type: 'CLEAR',
value: {}
}
dispatch(clearAction)
}
const dispatchParse = () => {
const parseAction = {
type: 'PARSE_LATL',
value: {}
}
dispatch(parseAction)
}
const dispatchRun = () => {
const runAction = {
type: 'RUN',
value: {}
}
dispatch(runAction)
}
const LatlOutput = ({results, options, dispatch}) => {
return ( return (
<div className="LatlOutput"> <div className="LatlOutput">
<h3>Output</h3> <h3>Output</h3>
<form> <form action={() => {}}>
<input <input
className="form form--remove" className="form form--remove"
type="submit" type="submit"
onClick={e=>handleClick(e)(dispatchClear)} onClick={e=>{e.preventDefault()}}
value="Clear" value="Clear"
/> />
@ -48,7 +18,7 @@ const LatlOutput = ({results, options, dispatch, errors, parseResults}) => {
name="Parse" name="Parse"
className="form form--add" className="form form--add"
type="submit" type="submit"
onClick={e=>handleClick(e)(dispatchParse)} onClick={e=>{e.preventDefault()}}
value="Parse" value="Parse"
/> />
@ -57,11 +27,10 @@ const LatlOutput = ({results, options, dispatch, errors, parseResults}) => {
name="Run" name="Run"
className="form form--add" className="form form--add"
type="submit" type="submit"
onClick={e=>handleClick(e)(dispatchRun)} onClick={e=>{e.preventDefault()}}
value="Run" value="Run"
/> />
</form> </form>
<Output results={results} errors={errors} options={options} parseResults={parseResults}/>
</div> </div>
); );
} }

View file

@ -2,7 +2,7 @@ import React from 'react';
import './Output.scss'; import './Output.scss';
const Output = props => { const Output = props => {
const { results, options, errors, parseResults } = props; const { results, options, errors } = props;
const renderResults = () => { const renderResults = () => {
switch(options.output) { switch(options.output) {
case 'default': case 'default':
@ -28,7 +28,6 @@ const Output = props => {
<div className="Output" data-testid="Output"> <div className="Output" data-testid="Output">
<h3>Results of Run</h3> <h3>Results of Run</h3>
<div data-testid="Output-lexicon" className="Output__container"> <div data-testid="Output-lexicon" className="Output__container">
{parseResults ? parseResults : <></>}
{results && results.length ? renderResults() : <></>} {results && results.length ? renderResults() : <></>}
</div> </div>
</div> </div>

View file

@ -10,17 +10,6 @@ const ProtoLang = ({ lexicon, dispatch }) => {
return lexicon.map(getProperty('lexeme')).join('\n'); return lexicon.map(getProperty('lexeme')).join('\n');
} }
const handleChange = e => {
const value = e.target.value.split(/\n/).map(line => {
const lexeme = line.split('#')[0].trim();
const epoch = line.split('#')[1] || '';
return { lexeme, epoch }
})
dispatch({
type: 'SET_LEXICON',
value
})
}
return ( return (
<div className="ProtoLang" data-testid="ProtoLang"> <div className="ProtoLang" data-testid="ProtoLang">
<h3>Proto Language Lexicon</h3> <h3>Proto Language Lexicon</h3>
@ -32,8 +21,22 @@ const ProtoLang = ({ lexicon, dispatch }) => {
rows="10" rows="10"
data-testid="ProtoLang-Lexicon__textarea" data-testid="ProtoLang-Lexicon__textarea"
value={renderLexicon()} value={renderLexicon()}
onChange={e => handleChange(e)} onChange={e=> {
> console.log(e.target.value.split(/\n/).map(line => {
const lexeme = line.split('#')[0].trim();
const epoch = line.split('#')[1] || '';
return { lexeme, epoch }
}))
dispatch({
type: 'SET_LEXICON',
value: e.target.value.split(/\n/).map(line => {
const lexeme = line.split('#')[0].trim();
const epoch = line.split('#')[1] || '';
return { lexeme, epoch }
})
})
}
}>
</textarea> </textarea>
</form> </form>
</div> </div>

View file

@ -48,8 +48,10 @@ const SoundChangeSuite = props => {
list={`${epoch.name}-parents-list`} list={`${epoch.name}-parents-list`}
value={epoch.parent || 'none'} value={epoch.parent || 'none'}
onChange={e=>changeHandler( onChange={e=>changeHandler(
e, ()=>setEpoch({...epoch, parent:e.target.value}) e, ()=>{
) console.log(e.target.value)
setEpoch({...epoch, parent:e.target.value})
})
} }
> >
{parentsOptions()} {parentsOptions()}

View file

@ -1,3 +1,3 @@
export const clearOutput = (state, action) => { export const clearOutput = (state, action) => {
return { ...state, results: [], errors: {}, parseResults: '' }; return { ...state, results: [], errors: {} };
} }

View file

@ -5,7 +5,7 @@ describe('Epochs', () => {
beforeEach(()=> { beforeEach(()=> {
state.epochs = [ state.epochs = [
{ {
name: 'epoch-1', name: 'epoch 1',
changes: [''], changes: [''],
parent: null parent: null
} }
@ -18,45 +18,45 @@ describe('Epochs', () => {
}); });
it('epochs addition returns new epochs list', () => { it('epochs addition returns new epochs list', () => {
const action = {type: 'ADD_EPOCH', value: { name: 'epoch-2', changes: [''], parent: null}}; const action = {type: 'ADD_EPOCH', value: { name: 'epoch 2', changes: [''], parent: null}};
expect(stateReducer(state, action)).toEqual({...state, epochs: [...state.epochs, action.value]}) expect(stateReducer(state, action)).toEqual({...state, epochs: [...state.epochs, action.value]})
}) })
it('epoch-name mutation returns new epochs list with mutation', () => { it('epoch name mutation returns new epochs list with mutation', () => {
const firstAction = {type: 'ADD_EPOCH', value: { name: 'epoch-2', changes: ['']}}; const firstAction = {type: 'ADD_EPOCH', value: { name: 'epoch 2', changes: ['']}};
const secondAction = {type: 'SET_EPOCH', value: { index: 0, name: 'proto-lang'}}; const secondAction = {type: 'SET_EPOCH', value: { index: 0, name: 'proto-lang'}};
const secondState = stateReducer(state, firstAction); const secondState = stateReducer(state, firstAction);
expect(stateReducer(secondState, secondAction)).toEqual( expect(stateReducer(secondState, secondAction)).toEqual(
{...state, {...state,
epochs: [ epochs: [
{name: 'proto-lang', changes: [''], parent: null}, {name: 'proto-lang', changes: [''], parent: null},
{name: 'epoch-2', changes: [''], parent: null} {name: 'epoch 2', changes: [''], parent: null}
] ]
} }
); );
}); });
it('epoch changes mutation returns new epochs list with mutation', () => { it('epoch changes mutation returns new epochs list with mutation', () => {
const firstAction = {type: 'ADD_EPOCH', value: { name: 'epoch-2', changes: ['']}}; const firstAction = {type: 'ADD_EPOCH', value: { name: 'epoch 2', changes: ['']}};
const secondAction = {type: 'SET_EPOCH', value: { index: 0, changes: ['n>t/_#', '[+plosive]>[+nasal -plosive]/_n']}}; const secondAction = {type: 'SET_EPOCH', value: { index: 0, changes: ['n>t/_#', '[+plosive]>[+nasal -plosive]/_n']}};
const secondState = stateReducer(state, firstAction); const secondState = stateReducer(state, firstAction);
expect(stateReducer(secondState, secondAction)).toEqual( expect(stateReducer(secondState, secondAction)).toEqual(
{...state, {...state,
epochs: [ epochs: [
{name: 'epoch-1', changes: ['n>t/_#', '[+plosive]>[+nasal -plosive]/_n'], parent: null}, {name: 'epoch 1', changes: ['n>t/_#', '[+plosive]>[+nasal -plosive]/_n'], parent: null},
{name: 'epoch-2', changes: [''], parent: null} {name: 'epoch 2', changes: [''], parent: null}
] ]
} }
); );
}); });
it('epochs returned with deleted epoch removed', () => { it('epochs returned with deleted epoch removed', () => {
const firstAction = {type: 'ADD_EPOCH', value: { name: 'epoch-2', changes: ['']}}; const firstAction = {type: 'ADD_EPOCH', value: { name: 'epoch 2', changes: ['']}};
const stateWithTwoEpochs = stateReducer(state, firstAction); const stateWithTwoEpochs = stateReducer(state, firstAction);
const secondAction = {type: 'REMOVE_EPOCH', value: {index: 0, name: 'epoch-1'}} const secondAction = {type: 'REMOVE_EPOCH', value: {index: 0, name: 'epoch 1'}}
expect(stateReducer(stateWithTwoEpochs, secondAction)).toEqual({ expect(stateReducer(stateWithTwoEpochs, secondAction)).toEqual({
...state, ...state,
epochs: [{ name: 'epoch-2', changes: [''], parent: null}] epochs: [{ name: 'epoch 2', changes: [''], parent: null}]
}); });
}); });

View file

@ -35,22 +35,15 @@ const findPhone = (phones: {}, phone: string): {} => {
const addFeatureToPhone = ( const addFeatureToPhone = (
phones: {}, phone: string, featureKey: string, featureValue: boolean phones: {}, phone: string, featureKey: string, featureValue: boolean
): {} => { ): {} => {
try {
let node = {} let node = {}
phone.split('').forEach((graph, index) => { phone.split('').forEach((graph, index) => {
node = index === 0 ? phones[graph] : node[graph]; node = index === 0 ? phones[graph] : node[graph];
if (index === phone.split('').length - 1) { if (index === phone.split('').length - 1) {
node.features = node && node.features node.features = {...node.features, [featureKey]: featureValue}
? {...node.features, [featureKey]: featureValue }
: {[featureKey]: featureValue};
} }
}); });
return phones; return phones;
}
catch (e) {
throw { phones, phone, featureKey, featureValue }
}
} }
export const addFeature = (state: stateType, action: featureAction): stateType => { export const addFeature = (state: stateType, action: featureAction): stateType => {
@ -87,9 +80,9 @@ export const addFeature = (state: stateType, action: featureAction): stateType =
} }
export const deleteFeature = (state, action) => { export const deleteFeature = (state, action) => {
const deletedFeature = state.features[action.value]; console.log('deleting')
deletedFeature.positive.forEach(phone => delete phone.features[action.value]) const deletedFeature = action.value;
deletedFeature.negative.forEach(phone => delete phone.features[action.value]) delete state.features[deletedFeature];
delete state.features[action.value]; console.log(state)
return state return {...state}
} }

View file

@ -31,17 +31,4 @@ describe('Features', () => {
); );
}); });
it('feature deletion returns new feature list', () => {
const action = {type: 'DELETE_FEATURE', value: 'occlusive'}
expect(stateReducer(state, action)).toEqual(
{...state,
features: {},
phones: {
a: {features: {}, grapheme: 'a'},
n: {features: {}, grapheme: 'n'}
}
}
)
})
}); });

View file

@ -5,39 +5,11 @@ export type initAction = {
type: "INIT" type: "INIT"
} }
export const clearState = () => {
return {
epochs: [],
phones: {},
options: { output: 'default', save: false },
results: [],
errors: {},
features: {},
lexicon: [],
latl: '',
parseResults: ''
}
}
export const waffleState = () => {
return {
epochs: [],
phones: {},
options: { output: 'default', save: false },
results: [],
errors: {},
features: {},
lexicon: [],
latl: waffleLatl,
parseResults: ''
}
}
export const initState = (changesArgument: number): stateType => { export const initState = (changesArgument: number): stateType => {
const state = { const state = {
epochs: [ epochs: [
{ {
name: 'epoch-1', name: 'epoch 1',
changes: [ changes: [
'[+ occlusive - nasal]>[+ occlusive + nasal]/n_.', '[+ occlusive - nasal]>[+ occlusive + nasal]/n_.',
'a>ɯ/._#', 'a>ɯ/._#',
@ -92,8 +64,7 @@ export const initState = (changesArgument: number): stateType => {
errors: {}, errors: {},
features: {}, features: {},
lexicon: [], lexicon: [],
latl: '', latl: ''
parseResults: ''
}; };
state.features = { state.features = {
sonorant: { positive:[ state.phones.a, state.phones.u, state.phones.ɯ, state.phones.ə, state.phones.n], negative: [] }, sonorant: { positive:[ state.phones.a, state.phones.u, state.phones.ɯ, state.phones.ə, state.phones.n], negative: [] },
@ -120,620 +91,3 @@ export const initState = (changesArgument: number): stateType => {
return state; return state;
} }
const waffleLatl = `
; -------- main class features
[consonantal
+=
; PLOSIVES
p / pʼ / / t / tʼ / ɾ / k / kʼ / /
; AFFRICATES
/ /
; FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ / ç / x /
; NASALS
m ɱ / n / ŋ /
; LIQUIDS + RHOTICS
l / ɹ ɹʲ ɹˤ /
; SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩
-=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; GLIDES
j / w /
; LARYNGEALS
h ɦ / ʔ
]
[sonorant
+=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; GLIDES
j / w w̥ /
; LIQUIDS + RHOTICS
l / ɹ ɹʲ ɹˤ /
; NASALS
m ɱ / n / ŋ /
; SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩
-=
; PLOSIVES
p / pʼ / / t / tʼ / ɾ / k / kʼ / /
; AFFRICATES
/ /
; FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ / ç / x /
; LARYNGEALS
h ɦ / ʔ
]
[approximant
+=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; LIQUIDS + RHOTICS
l / ɹ ɹʲ ɹˤ /
; GLIDES
j / w /
; SYLLABIC LIQUIDS
l̩ / ɹ̩
-=
; PLOSIVES
p / pʼ / / t / tʼ / ɾ / k / kʼ / /
; AFFRICATES
/ /
; FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ / ç / x /
; NASALS
m ɱ / n / ŋ /
; SYLLABIC NASALS
m̩ / n̩
]
; -------- laryngeal features
[voice
+=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; GLIDES
j / w /
; LIQUIDS + RHOTICS
l / ɹ ɹʲ ɹˤ /
; NASALS
m ɱ / n / ŋ /
; SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩ /
; VOICED FRICATIVES
v / ð / z / ʒ /
; VOICED AFFRICATES
/
; VOICED LARYNGEALS
; LARYNGEALS
ɦ
-= voiceless obstruents
; PLOSIVES
p / pʼ / / t / tʼ / ɾ / k / kʼ / /
; VOICELESS AFFRICATES
/ /
; VOICELESS FRICATIVES
f / θ / s / ʃ / ç / x /
; VOICELESS LARYNGEALS
h / ʔ
]
[spreadGlottis
+=
; ASPIRATED PLOSIVES
/ / /
; ASPIRATED AFFRICATES
/
; SPREAD LARYNGEALS
h ɦ
-=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; UNASPIRATED PLOSIVES
p / pʼ / t / tʼ / ɾ / k / kʼ /
; UNASPIRATED AFFRICATES
/ /
; FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ / ç / x /
; NASAL OBSTRUENTS
m ɱ / n / ŋ /
; LIQUIDS + RHOTICS
l / ɹ ɹʲ ɹˤ /
; SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩ /
; GLIDES
j / w
; CONSTRICTED LARYNGEALS
ʔ
]
[constrictedGlottis
+=
; LARYNGEALIZED RHOTIC
ɹˤ /
; CONSTRICTED LARYNGEAL
ʔ /
; EJECTIVE PLOSIVES
pʼ / tʼ / kʼ
-=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; PLOSIVES
p / / t / ɾ / k / /
; AFFRICATES
/ /
; FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ / ç / x /
; NASAL OBSTRUENTS
m ɱ / n / ŋ /
; LIQUIDS
l /
; NON-PHARYNGEALIZED RHOTICS
ɹ ɹʲ /
; SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩
; GLIDES
j / w
; SPREAD LARYNGEALS
h ɦ /
]
; -------- manner features
[continuant
+=
; FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ / ç / x /
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; LIQUIDS + RHOTICS
l / ɹ ɹʲ ɹˤ /
; GLIDES
j / w /
; SYLLABIC LIQUIDS
l̩ / ɹ̩ /
; TAPS
ɾ
-=
; NON-TAP PLOSIVES
p / pʼ / / t / tʼ / / k / kʼ / /
; AFFRICATES
/ /
; NASALS
m ɱ / n / ŋ /
; SYLLABIC NASALS
m̩ / n̩
]
[nasal
+=
; NASALS
m ɱ / n / ŋ /
; SYLLABIC NASALS
m̩ / n̩
-=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ / ç / x /
; LIQUIDS + RHOTICS
l / ɹ ɹʲ ɹˤ /
; GLIDES
j / w /
; SYLLABIC LIQUIDS
l̩ / ɹ̩ /
; PLOSIVES
p / pʼ / / t / tʼ / ɾ / k / kʼ / /
; AFFRICATES
/ /
]
[strident
+=
; STRIDENT FRICATIVES
f / v / s / z / ʃ / ʒ /
; STRIDENT AFFRICATES
/
-=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; PLOSIVES
p / pʼ / / t / tʼ / ɾ / k / kʼ / /
; NON-STRIDENT FRICATIVES
θ / ð / ç / x /
; NASAL OBSTRUENTS
m ɱ / n / ŋ /
; RHOTICS + LIQUIDS
l / ɹ ɹʲ ɹˤ /
; SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩ /
; GLIDES
j / w
]
[lateral
+=
; LATERAL LIQUIDS
l /
; SYLLABIC LATERALS /
l̩
-=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; PLOSIVES
p / pʼ / / t / tʼ / ɾ / k / kʼ /
; AFFRICATES
/
; FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ / ç / x
; NASAL OBSTRUENTS
m ɱ / n / ŋ
; RHOTIC LIQUIDS
ɹ ɹʲ ɹˤ
; NON-LIQUID SYLLABIC CONSONANTS
m̩ / n̩ / ɹ̩
; GLIDES
j / w
]
; -------- ---- PLACE features
; -------- labial features
[labial
+=
; ROUNDED VOWELS
u̟ / ʊ̞ / ɔ /
; LABIAL PLOSIVES
p / pʼ / /
; LABIAL FRICATIVES
f / v /
; LABIAL NASALS
m ɱ /
; LABIAL SYLLABIC CONSONANTS
m̩ /
; LABIAL GLIDES
w
-=
; UNROUNDED VOWELS
æ / e / ə / ɑ / ɪ̞ / ɛ / ʌ / i /
; NON-LABIAL PLOSIVES
t / tʼ / ɾ / k / kʼ / /
; NON-LABIAL AFFRICATES
/ /
; NON-LABIAL FRICATIVES
θ / ð / s / z / ʃ / ʒ / ç / x /
; NON-LABIAL NASAL OBSTRUENTS
n / ŋ /
; LIQUIDS
l /
; RHOTIC LIQUIDS
ɹ ɹʲ ɹˤ /
; NON-LABIAL SYLLABIC CONSONANTS
n̩ / l̩ / ɹ̩ /
; NON-LABIAL GLIDES
j
]
; -------- coronal features
[coronal
+=
; CORONAL PLOSIVES
t / tʼ / ɾ /
; CORONAL AFFRICATES
/ /
; CORONAL FRICATIVES
θ / ð / s / z / ʃ / ʒ /
; CORONAL NASALS
n /
; CORONAL LIQUIDS
l
; CORONAL RHOTIC LIQUIDS
ɹ
; CORONAL SYLLABIC CONSONANTS
n̩ / l̩ / ɹ̩
-=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; NON-CORONAL PLOSIVES
p / pʼ / / k / kʼ /
; NON-CORONAL FRICATIVES
f / v / ç / x
; NON-CORONAL NASAL OBSTRUENTS
m ɱ / ŋ
; NON-CORONAL RHOTIC LIQUIDS
ɹʲ ɹˤ
; NON-CORONAL SYLLABIC CONSONANTS
m̩ /
; NON-CORONAL GLIDES
j / w
]
[anterior
+=
; ALVEOLAR PLOSIVES
t / tʼ / ɾ /
; ALVEOLAR AFFRICATES
/ /
; DENTAL FRICATIVES
θ / ð /
; ALVEOLAR FRICATIVES
s / z /
; ALVEOLAR NASALS
n /
; ALVEOLAR LIQUIDS
l
; ALVEOLAR SYLLABIC CONSONANTS
n̩ / l̩ /
-=
; POSTALVEOLAR FRICATIVES
ʃ / ʒ /
; POSTALVEOLAR RHOTIC LIQUIDS
ɹ /
; POSTALVEOLAR SYLLABIC CONSONANTS
ɹ̩ /
; -- NON-CORONALs
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; NON-CORONAL PLOSIVES
p / pʼ / / k / kʼ /
; NON-CORONAL FRICATIVES
f / v / ç / x
; NON-CORONAL NASAL OBSTRUENTS
m ɱ / ŋ
; NON-CORONAL RHOTIC LIQUIDS
ɹʲ ɹˤ
; NON-CORONAL SYLLABIC CONSONANTS
m̩ /
; NON-CORONAL GLIDES
j / w
]
[distributed
+=
; DENTAL FRICATIVES
θ / ð /
; POSTALVEOLAR FRICATIVES
ʃ / ʒ /
; POSTALVEOLAR RHOTIC LIQUIDS
ɹ /
; POSTALVEOLAR SYLLABIC CONSONANTS
ɹ̩ /
-=
; apical / retroflex
; ALVEOLAR PLOSIVES
t / tʼ / ɾ /
; ALVEOLAR FRICATIVES
s / z /
; ALVEOLAR NASALS
n /
; ALVEOLAR LIQUIDS
l
; ALVEOLAR SYLLABIC CONSONANTS
n̩ / l̩ /
; -- NON-CORONALS
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; NON-CORONAL PLOSIVES
p / pʼ / / k / kʼ /
; NON-CORONAL FRICATIVES
f / v / ç / x
; NON-CORONAL NASAL OBSTRUENTS
m ɱ / ŋ
; NON-CORONAL RHOTIC LIQUIDS
ɹʲ ɹˤ
; NON-CORONAL SYLLABIC CONSONANTS
m̩ /
; NON-CORONAL GLIDES
j / w
]
; -------- dorsal features
[dorsal
+=
; VOWELS
æ / e / ə / ɑ / ɔ / ɪ̞ / ɛ / ʌ / ʊ̞ / i / u̟ /
; DORSAL PLOSIVES
k / kʼ / /
; DORSAL FRICATIVES
ç / x /
; DORSAL NASAL OBSTRUENTS
ŋ /
; DORSAL RHOTIC LIQUIDS
ɹʲ ɹˤ
; DORSAL GLIDES
j
-=
; NON-DORSAL PLOSIVES
p / pʼ / / t / tʼ / ɾ /
; NON-DORSAL AFFRICATES
/ /
; NON-DORSAL FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ /
; NON-DORSAL NASALS
m ɱ / n /
; NON-DORSAL LIQUIDS
l
; NON-DORSAL RHOTIC LIQUIDS
ɹ
; NON-DORSAL SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩
; NON-DORSAL GLIDES
w
]
[high
+=
; HIGH VOWELS
i / u̟ / ʊ̞ / ɪ̞
; HIGH DORSAL PLOSIVES
k / kʼ / /
; HIGH DORSAL FRICATIVES
ç / x /
; HIGH DORSAL NASAL OBSTRUENTS
ŋ /
; HIGH RHOTIC LIQUIDS
ɹʲ
; HIGH DORSAL GLIDES
j / w
-= χ / e / o / a
; NON-HIGH VOWELS
ɑ / æ / e / ə / ɛ / ʌ
; NON-HIGH RHOTIC LIQUIDS
ɹˤ
; -- NON-DORSALS
; NON-DORSAL PLOSIVES
p / pʼ / / t / tʼ / ɾ /
; NON-DORSAL AFFRICATES
/ /
; NON-DORSAL FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ /
; NON-DORSAL NASALS
m ɱ / n /
; NON-DORSAL LIQUIDS
l
; NON-DORSAL RHOTIC LIQUIDS
ɹ
; NON-DORSAL SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩
; NON-DORSAL GLIDES
w
]
[low
+=
; LOW VOWELS
ɑ / æ / ɛ /
; LOW DORSAL RHOTIC LIQUIDS
ɹˤ
-= a / ɛ / ɔ
; NON-LOW VOWELS
i / u̟ / ʊ̞ / ɪ̞ / e / ə / ʌ
; NON-LOW DORSAL PLOSIVES
k / kʼ / /
; NON-LOW DORSAL FRICATIVES
ç / x /
; NON-LOW DORSAL NASAL OBSTRUENTS
ŋ /
; NON-LOW DORSAL RHOTIC LIQUIDS
ɹʲ
; DORSAL GLIDES
j
; -- NON-DORSALS
; NON-DORSAL PLOSIVES
p / pʼ / / t / tʼ / ɾ /
; NON-DORSAL AFFRICATES
/ /
; NON-DORSAL FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ /
; NON-DORSAL NASALS
m ɱ / n /
; NON-DORSAL LIQUIDS
l
; NON-DORSAL RHOTIC LIQUIDS
ɹ
; NON-DORSAL SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩
; NON-DORSAL GLIDES
w
]
[back
+=
; BACK VOWELS
ɑ / ɔ / ʌ / ʊ̞ / u̟ /
; BACK DORSAL PLOSIVES
k / kʼ / /
; BACK DORSAL FRICATIVES
x /
; BACK DORSAL NASAL OBSTRUENTS
ŋ /
; BACK DORSAL RHOTIC LIQUIDS
ɹˤ
-=
; NON-BACK DORSAL FRICATIVES
ç /
; NON-BACK DORSAL RHOTIC LIQUIDS
ɹʲ
; NON-BACK DORSAL GLIDES
j
; NON-BACK VOWELS
æ / e / ə / ɪ̞ / ɛ / i
; -- NON-DORSALS
; NON-DORSAL PLOSIVES
p / pʼ / / t / tʼ / ɾ /
; NON-DORSAL AFFRICATES
/ /
; NON-DORSAL FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ /
; NON-DORSAL NASALS
m ɱ / n /
; NON-DORSAL LIQUIDS
l
; NON-DORSAL RHOTIC LIQUIDS
ɹ
; NON-DORSAL SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩
; NON-DORSAL GLIDES
w
]
[tense ; compare to ATR or RTR
+=
; TENSE VOWELS
e / i / u̟ / ɑ
-=
; NON-TENSE VOWELS
æ / ə / ɪ̞ / ɛ / ʌ / ʊ̞ / ɔ /
; DORSAL PLOSIVES
k / kʼ / /
; DORSAL FRICATIVES
ç / x /
; DORSAL NASAL OBSTRUENTS
ŋ /
; DORSAL RHOTIC LIQUIDS
ɹʲ ɹˤ /
; DORSAL GLIDES
j
; -- NON-DORSALS
; NON-DORSAL PLOSIVES
p / pʼ / / t / tʼ / ɾ /
; NON-DORSAL AFFRICATES
/ /
; NON-DORSAL FRICATIVES
f / v / θ / ð / s / z / ʃ / ʒ /
; NON-DORSAL NASALS
m ɱ / n /
; NON-DORSAL LIQUIDS
l
; NON-DORSAL RHOTIC LIQUIDS
ɹ
; NON-DORSAL SYLLABIC CONSONANTS
m̩ / n̩ / l̩ / ɹ̩
; NON-DORSAL GLIDES
w
]
*PROTO
; -- Devoicing, all our z's become s's
[+ voice - continuant]>[- voice]/._.
; -- Reduction of schwa
ə>0/._.
|Gif Lang
*PROTO
; -- Ejectivization, all our pits become pit's
[+ spreadGlottis - continuant]>[+ constrictedGlottis - spreadGlottis]/._[+ constrictedGlottis]
[+ spreadGlottis - continuant]>[+ constrictedGlottis - spreadGlottis]/[+ constrictedGlottis]_.
[+ constrictedGlottis]>0/[+ constrictedGlottis - continuant]_.
[+ constrictedGlottis]>0/._[+ constrictedGlottis - continuant]
|Jif Lang
`

View file

@ -12,7 +12,7 @@ import type { resultsAction } from './reducer.results'
import { initState } from './reducer.init'; import { initState } from './reducer.init';
import type { initAction } from './reducer.init'; import type { initAction } from './reducer.init';
import { clearOutput } from './reducer.clear'; import { clearOutput } from './reducer.clear';
import { setLatl, parseLatl } from './reducer.latl'; import { setLatl } from './reducer.latl';
export type stateType = { export type stateType = {
lexicon: Array<{lexeme: string, epoch: epochType}>, lexicon: Array<{lexeme: string, epoch: epochType}>,
@ -63,8 +63,6 @@ export const stateReducer = (state: stateType, action: actionType): stateType =>
case 'SET_LATL': return setLatl(state, action); case 'SET_LATL': return setLatl(state, action);
case 'PARSE_LATL': return parseLatl(state, action);
case 'CLEAR': return clearOutput(state, action); case 'CLEAR': return clearOutput(state, action);
case 'RUN': return run(state, action); case 'RUN': return run(state, action);

View file

@ -1,529 +1,5 @@
import { stateReducer } from './reducer';
export const setLatl = (state, action) => { export const setLatl = (state, action) => {
let latl = action.value; return {...state};
return {...state, latl, parseResults: ''};
} }
const getOneToken = (latl, tokens) => {
for (const [type, regEx] of tokenTypes) {
const newRegEx = new RegExp(`^(${regEx})`);
const match = latl.match(newRegEx) || null;
if (match) {
const newTokens = [...tokens, {type, value: match[0].trim()}]
const newLatl = latl.slice(match[0].length ,);
return [newLatl, newTokens]
}
}
throw `Unexpected token at ${latl.split('\n')[0]}`
}
export const tokenize = latl => {
let i = 0;
let tokens = [];
let newLatl = latl.trim();
try {
while(newLatl.length) {
[newLatl, tokens] = getOneToken(newLatl, tokens)
}
return tokens;
}
catch (err) {
return {errors: 'tokenization error', message: err, newLatl}
}
}
const parseLineBreak = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
if (!lastNode) return tree;
switch (lastNode.type) {
case 'rule': {
if (tree[tree.length - 2].type === 'ruleSet') {
const ruleValue = lastNode.value;
tree[tree.length - 2].value.push(ruleValue);
tree.pop()
return tree;
}
if (tree[tree.length - 2].type === 'epoch') {
const newNode = { type: 'ruleSet', value: [ lastNode.value ] }
tree[tree.length - 1] = newNode;
return tree;
}
}
case 'feature--plus': {
// tree[tree.length - 1].type === 'feature';
return tree;
}
case 'feature--minus': {
// tree[tree.length - 1].type === 'feature';
return tree;
}
default:
return tree;
}
}
const parseWhiteSpace = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'rule': {
tree[tree.length - 1] = {...lastNode, value: lastNode.value + ' ' }
return tree;
}
default:
return tree;
}
}
const parseStar = (tree, token, index, tokens) => {
const nextToken = tokens[index + 1];
if (nextToken.type === 'referent') {
return [...tree, { type: 'epoch-parent' }]
}
}
const parsePipe = (tree, token, index, tokens) => {
const nextToken = tokens[index + 1];
if (nextToken.type === 'referent') {
const ruleToken = tree[tree.length - 1];
const epochToken = tree[tree.length - 2];
if (ruleToken.type === 'rule' || ruleToken.type === 'ruleSet') {
if (epochToken.type === 'epoch') {
tree[tree.length - 2] = {
...epochToken,
changes: [...ruleToken.value],
type: 'epoch-name'
}
tree.pop();
return tree;
}
}
}
return [...tree, 'unexpected pipe']
}
const parseReferent = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'epoch-parent': {
tree[tree.length - 1] = {...lastNode, parent: token.value, type: 'epoch' }
return tree;
}
case 'epoch-name': {
tree[tree.length - 1] = {...lastNode, name: token.value, type: 'epoch' }
return [...tree, { type: 'main'}];
}
case 'epoch': {
return [...tree, { type: 'rule', value: token.value } ]
}
case 'rule': {
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value }
return tree;
}
case 'ruleSet': {
return [...tree, { type: 'rule', value: token.value }]
}
case 'feature': {
if (!lastNode.value) {
tree[tree.length - 1].value = token.value;
return tree;
}
}
case 'feature--plus': {
if (lastNode.value) {
lastNode.positivePhones = [...lastNode.positivePhones, token.value ]
}
else {
lastNode.value = token.value;
}
tree[tree.length - 1] = lastNode;
return [...tree]
}
case 'feature--minus': {
if (lastNode.value) {
lastNode.negativePhones = [...lastNode.negativePhones, token.value ]
}
else {
lastNode.value = token.value;
}
tree[tree.length - 1] = lastNode;
return [...tree]
}
case 'lexicon': {
if (!lastNode.epoch) {
tree[tree.length - 1].epoch = token.value;
}
else {
tree[tree.length - 1].value.push(token.value)
}
return tree;
}
default:
return [...tree, `unexpected referent ${token.value}`]
}
}
const parsePhone = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch(lastNode.type) {
case 'rule': {
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value }
return tree;
}
case 'ruleSet': {
return [...tree, { type: 'rule', value: token.value }]
}
case 'feature--plus':
lastNode.positivePhones = [...lastNode.positivePhones, token.value ];
tree[tree.length - 1] = lastNode;
return tree;
case 'feature--minus':
lastNode.negativePhones = [...lastNode.negativePhones, token.value ];
tree[tree.length - 1] = lastNode;
return tree;
default:
return [...tree, `unexpected phone ${token.value}`]
}
}
const parseOpenBracket = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
if (lastNode) {
switch (lastNode.type) {
case 'epoch':
return [...tree, {type: 'rule', value: token.value}]
case 'rule':
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value }
return tree;
case 'ruleSet':
return [...tree, {type: 'rule', value: token.value}];
// case 'feature':
// return [{type: 'feature', positivePhones: [], negativePhones: []}];
case 'feature--plus':
return [...tree, {type: 'feature', positivePhones: [], negativePhones: []}];
case 'feature--minus':
return [...tree, {type: 'feature', positivePhones: [], negativePhones: []}];
case 'main':
return [...tree, {type: 'feature', positivePhones: [], negativePhones: []}];
default:
return [...tree, 'unexpected open bracket']
}
}
return [{type: 'feature', positivePhones: [], negativePhones: []}]
}
const parseCloseBracket = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'rule':
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value }
return tree;
case 'feature--plus':
return tree;
case 'feature--minus':
return tree;
default:
return [...tree, 'unexpected close bracket']
}
}
const parsePositiveAssignment = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'feature':
tree[tree.length - 1].type = 'feature--plus'
return tree;
default:
return [...tree, 'unexpected positive assignment']
}
}
const parseNegativeAssignment = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'feature':
tree[tree.length - 1].type = 'feature--minus'
return tree;
case 'feature--plus':
tree[tree.length - 1].type = 'feature--minus';
return tree;
default:
return [...tree, 'unexpected negative assignment']
}
}
const parsePlus = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'rule':
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value}
return tree;
case 'feature':
tree[tree.length - 1] = {...lastNode, type: 'feature--plus'}
return tree;
case 'feature--minus':
tree[tree.length - 1] = {...lastNode, type: 'feature--minus'}
return tree;
default:
return [...tree, 'unexpected plus']
}
}
const parseMinus = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'rule':
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value}
return tree;
case 'feature':
tree[tree.length - 1] = {...lastNode, type: 'feature--minus'}
return tree;
default:
return [...tree, 'unexpected minus']
}
}
const parseEqual = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'feature--plus':
return tree;
case 'feature--minus':
return tree;
default:
return [...tree, 'unexpected equal'];
}
}
const parseGreaterThan = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'rule':
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value}
return tree;
default:
return [...tree, 'unexpected greater than']
}
}
const parseSlash = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
if (lastNode) {
switch (lastNode.type) {
case 'rule':
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value}
return tree;
case 'feature--plus':
return tree;
case 'feature--minus':
return tree;
case 'lexicon':
return [...tree, { }];
case 'main':
return [...tree, { type: 'lexicon', value: []}]
default:
return [...tree, 'unexpected slash']
}
}
return [...tree, { type: 'lexicon', value: []}]
}
const parseHash = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'rule':
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value}
return tree;
default:
return [...tree, 'unexpected hash']
}
}
const parseDot = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'rule':
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value}
return tree;
default:
return [...tree, 'unexpected dot']
}
}
const parseUnderScore = (tree, token, index, tokens) => {
const lastNode = tree[tree.length - 1];
switch (lastNode.type) {
case 'rule':
tree[tree.length - 1] = {...lastNode, value: lastNode.value + token.value}
return tree;
default:
return [...tree, 'unexpected underscore']
}
}
const generateNode = (tree, token, index, tokens) => {
switch (token.type) {
// if comment, consume without effect
case 'semicolon':
return [...tree]
case 'lineBreak':
return parseLineBreak(tree, token, index, tokens);
case 'whiteSpace':
return parseWhiteSpace(tree, token, index, tokens);
// if *PROTO consume token:* and add epochs: [ { parent: 'PROTO' } ]
case 'star':
return parseStar(tree, token, index, tokens);
case 'pipe':
return parsePipe(tree, token, index, tokens);
case 'referent':
return parseReferent(tree, token, index, tokens);
case 'phone':
return parsePhone(tree, token, index, tokens);
case 'openBracket':
return parseOpenBracket(tree, token, index, tokens);
case 'closeBracket':
return parseCloseBracket(tree, token, index, tokens);
case 'positiveAssignment':
return parsePositiveAssignment(tree, token, index, tokens);
case 'negativeAssignment':
return parseNegativeAssignment(tree, token, index, tokens);
case 'plus':
return parsePlus(tree, token, index, tokens);
case 'minus':
return parseMinus(tree, token, index, tokens);
case 'equal':
return parseEqual(tree, token, index, tokens);
case 'greaterThan':
return parseGreaterThan(tree, token, index, tokens);
case 'slash':
return parseSlash(tree, token, index, tokens);
case 'hash':
return parseHash(tree, token, index, tokens);
case 'dot':
return parseDot(tree, token, index, tokens);
case 'underscore':
return parseUnderScore(tree, token, index, tokens);
default:
return [...tree, { ...token }]
}
}
const addToken = (tree, token, index, tokens) => generateNode(tree, token, index, tokens);
const connectNodes = (tree, node, index, nodes) => {
switch (node.type) {
case 'epoch':
delete node.type;
return {...tree, epochs: [...tree.epochs, {...node, index: tree.epochs.length } ] }
case 'feature':
node.feature = node.value;
delete node.value;
delete node.type;
return {...tree, features: [...tree.features, {...node } ] }
case 'feature--minus':
node.feature = node.value;
delete node.value;
delete node.type;
if (tree.features.length && tree.features[tree.features.length - 1].feature === node.feature) {
tree.features[tree.features.length - 1].negativePhones = node.negativePhones
return tree;
}
return {...tree, features: [...tree.features, {...node} ] }
case 'feature--plus':
delete node.type;
node.feature = node.value;
delete node.value;
if (tree.features.length && tree.features[tree.features.length - 1].feature === node.feature) {
tree.features[tree.features.length - 1].positivePhones = node.positivePhones
return tree;
}
return {...tree, features: [...tree.features, {...node} ] }
case 'lexicon':
delete node.type;
return {...tree, lexicon: [...tree.lexicon, node]}
default:
return tree;
}
}
export const buildTree = tokens => {
const bareTree = {
epochs: [],
features: [],
lexicon: []
}
const nodes = tokens.reduce(addToken, []);
// return nodes
const tree = nodes.reduce(connectNodes, bareTree);
const filterProps = Object.entries(tree).filter(([key, value]) => !value.length)
.map(([key, value]) => key)
return filterProps.reduce((tree, badProp) => {
delete tree[badProp];
return tree;
}, tree);
}
export const generateAST = latl => {
// tokenize
const tokens = tokenize(latl.trim());
// build tree
const tree = buildTree(tokens);
return tree;
}
export const parseLatl = (state, action) => {
try {
const latl = state.latl;
const AST = generateAST(latl);
const features = AST.features;
if (features) {
if (state.features) {
state = Object.keys(state.features).reduce((state, feature) => {
return stateReducer(state, {type: 'DELETE_FEATURE', value: feature})
}, state)
}
state = features.reduce((state, feature) => stateReducer(state, {type:'ADD_FEATURE', value: feature}), state);
}
delete AST.features;
const lexicon = AST.lexicon;
if (lexicon) {
if (state.lexicon) {
state.lexicon = [];
}
state = lexicon.reduce((state, epoch) => {
return epoch.value.reduce((reducedState, lexeme) => {
return stateReducer(reducedState, {type: 'ADD_LEXEME', value: { lexeme, epoch: epoch.epoch }})
}, state)
}, state)
}
delete AST.lexicon;
Object.entries(AST).forEach(([key, value]) => state[key] = value);
return { ...state, parseResults: 'latl parsed successfully', results:[] }
}
catch (e) {
console.log(e)
return { ...state, parseResults: 'error parsing', errors: e}
}
}
const tokenTypes = [
['semicolon', ';.*\n'],
[`star`, `\\*`],
['pipe', `\\|`],
['openBracket', `\\[`],
['closeBracket', `\\]`],
['positiveAssignment', `\\+=`],
['negativeAssignment', `\\-=`],
['plus', `\\+`],
['minus', `\\-`],
['greaterThan', `\\>`],
['hash', `#`],
['slash', `\/`],
['dot', `\\.`],
['underscore', `\\_`],
[`referent`, `[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*`],
[`phone`, `[\u00c0-\u03FFA-Za-z0]+`],
['equal', `=`],
[`lineBreak`, `\\n`],
[`whiteSpace`, `\\s+`]
]

View file

@ -1,520 +1,14 @@
import { stateReducer } from './reducer'; import { stateReducer } from './reducer';
import { initState } from './reducer.init'; import { initState } from './reducer.init';
import { tokenize, buildTree, parseLatl } from './reducer.latl';
describe('LATL', () => { describe('LATL', () => {
it('returns state unaltered with no action body', () => { it('returns state unaltered with no action body', () => {
const state = initState(); const state = initState();
const action = { const action = {
type: 'SET_LATL', type: 'SET_LATL',
value: '' value: {}
} }
const returnedState = stateReducer(state, action) const returnedState = stateReducer(state, action)
expect(returnedState).toStrictEqual(state); expect(returnedState).toStrictEqual(state);
}) })
it('returns tokens from well-formed latl epoch definition', () => {
const tokens = tokenize(epochDefinitionLatl);
expect(tokens).toStrictEqual(tokenizedEpoch)
});
it('returns tokens from well-formed latl feature definition', () => {
const tokens = tokenize(featureDefinitionLatl);
expect(tokens).toStrictEqual(tokenizedFeature);
});
it('returns tokens from well-formed latl lexicon definition', () => {
const tokens = tokenize(lexiconDefinitionLatl);
expect(tokens).toStrictEqual(tokenizedLexicon);
});
it('returns tokens from well-formed latl epoch, feature, and lexicon definitions', () => {
const latl = epochDefinitionLatl + '\n' + featureDefinitionLatl + '\n' + lexiconDefinitionLatl;
const tokens = tokenize(latl);
const lineBreaks = [{ type: 'lineBreak', value: '' },{ type: 'lineBreak', value: '' },{ type: 'lineBreak', value: '' }]
const tokenizedLatl = [...tokenizedEpoch, ...lineBreaks, ...tokenizedFeature, ...lineBreaks, ...tokenizedLexicon];
expect(tokens).toStrictEqual(tokenizedLatl);
});
it('returns AST from well-formed epoch tokens', () => {
const tree = buildTree(tokenizedEpoch);
expect(tree).toStrictEqual(treeEpoch);
})
it('returns AST from well-formed feature tokens', () => {
const tree = buildTree(tokenizedFeature);
expect(tree).toStrictEqual(treeFeature);
})
it('returns AST from well-formed lexicon tokens', () => {
const tree = buildTree(tokenizedLexicon);
expect(tree).toStrictEqual(treeLexicon);
})
it('parse returns state from well-formed feature latl', () => {
const state = initState();
const setAction = {
type: 'SET_LATL',
value: featureDefinitionLatl
}
const latlState = stateReducer(state, setAction);
const parseState = parseLatl(latlState, {});
expect(parseState).toStrictEqual(featureState)
})
it('returns run from well-formed epoch latl', () => {
const state = initState();
const setAction = {
type: 'SET_LATL',
value: runEpochLatl
}
const latlState = stateReducer(state, setAction);
const parseState = parseLatl(latlState, {})
// expect(parseState).toStrictEqual(epochState);
parseState.lexicon[0].epoch = 'PROTO'
const runState = stateReducer(parseState, {type: 'RUN', value:{}})
expect(runState).toStrictEqual({...runState, results: runEpochResults})
})
it('returns state from well-formed lexicon latl', () => {
const state = initState();
const setAction = {
type: 'SET_LATL',
value: lexiconDefinitionLatl
}
const latlState = stateReducer(state, setAction);
const parseState = parseLatl(latlState, {});
expect(parseState).toStrictEqual(lexiconState)
})
// it('returns state from well formed latl', () => {
// const state = initState();
// const setAction = {
// type: 'SET_LATL',
// value: totalLatl
// }
// const latlState = stateReducer(state, setAction);
// const parseState = parseLatl(latlState, {});
// expect(parseState).toStrictEqual(totalLatlState)
// })
}) })
const epochDefinitionLatl = `
; comment
*PROTO
[+ FEATURE]>[- FEATURE]/._.
n>m/#_.
|CHILD
`
const runEpochLatl = `
; comment
*PROTO
a>u/._.
|epoch-1
`
const runEpochResults = [
{
pass: 'epoch-1',
parent: 'PROTO',
lexicon: [ 'untu', 'unut', 'unət', 'unnu', 'tun', 'əntu' ]
}
]
const tokenizedEpoch = [
{ type: "semicolon", value: "; comment" },
{ type: "star", value: "*" }, { type: "referent", value: "PROTO" }, { type: 'lineBreak', value: '' }, { type: "whiteSpace", value: "" },
{ type: "openBracket", value: "[" }, { type: "plus", value: "+" }, { type: "whiteSpace", value: "" }, { type: "referent", value: "FEATURE" }, { type: "closeBracket", value: "]" },
{ type: "greaterThan", value: ">" }, { type: "openBracket", value: "[" }, { type: "minus", value: "-" }, { type: "whiteSpace", value: "" }, { type: "referent", value: "FEATURE" }, { type: "closeBracket", value: "]" },
{ type: "slash", value: "/" }, { type: "dot", value: "." },
{ type: "underscore", value: "_" }, { type: "dot", value: "." }, { type: 'lineBreak', value: '' }, { type: "whiteSpace", value: "" },
{ type: "referent", value: "n" },
{ type: "greaterThan", value: ">" }, { type: "referent", value: "m" },
{ type: "slash", value: "/" }, { type: "hash", value: "#" },
{ type: "underscore", value: "_" }, { type: "dot", value: "." }, { type: 'lineBreak', value: '' },
{ type: "pipe", value: "|" }, { type: "referent", value: "CHILD" }
]
const treeEpoch = {
epochs: [
{
parent: 'PROTO',
name: 'CHILD',
index: 0,
changes: [
'[+ FEATURE]>[- FEATURE]/._.',
'n>m/#_.'
]
}
]
}
const epochState = {
...initState(),
epochs: treeEpoch.epochs,
latl: epochDefinitionLatl
}
const featureDefinitionLatl = `
[+ PLOSIVE] = kp/p/b/d/t/g/k
[- PLOSIVE] = m/n/s/z
[SONORANT
+= m/n
-= s/z/kp/p/b/d/t/g/k
]
`
const tokenizedFeature = [
{type: "openBracket", value: "[" }, { type: "plus", value: "+" }, { type: "whiteSpace", value: "" }, { type: "referent", value: "PLOSIVE" }, { type: "closeBracket", value: "]" }, { type: "whiteSpace", value: "" },
{ type: "equal", value: "=" }, { type: "whiteSpace", value: "" }, { type: "referent", value: "kp" }, { type: "slash", value: "/" }, { type: "referent", value: "p" }, { type: "slash", value: "/" }, { type: "referent", value: "b" }, { type: "slash", value: "/" }, { type: "referent", value: "d" }, { type: "slash", value: "/" }, { type: "referent", value: "t" }, { type: "slash", value: "/" }, { type: "referent", value: "g" }, { type: "slash", value: "/" }, { type: "referent", value: "k" }, { type: 'lineBreak', value: '' },
{type: "openBracket", value: "[" }, { type: "minus", value: "-" }, { type: "whiteSpace", value: "" }, { type: "referent", value: "PLOSIVE" }, { type: "closeBracket", value: "]" }, { type: "whiteSpace", value: "" },
{ type: "equal", value: "=" }, { type: "whiteSpace", value: "" }, { type: "referent", value: "m" }, { type: "slash", value: "/" }, { type: "referent", value: "n" }, { type: "slash", value: "/" }, { type: "referent", value: "s" }, { type: "slash", value: "/" }, { type: "referent", value: "z" }, { type: 'lineBreak', value: '' },
{type: "openBracket", value: "[" }, { type: "referent", value: "SONORANT" }, { type: 'lineBreak', value: '' },
{ type: "whiteSpace", value: "" }, { type: "positiveAssignment", value: "+=" }, { type: "whiteSpace", value: "" },
{ type: "referent", value: "m" }, { type: "slash", value: "/" }, { type: "referent", value: "n" }, { type: 'lineBreak', value: '' },
{ type: "whiteSpace", value: "" }, { type: "negativeAssignment", value: "-=" }, { type: "whiteSpace", value: "" },
{ type: "referent", value: "s" }, { type: "slash", value: "/" }, { type: "referent", value: "z" }, { type: "slash", value: "/" }, { type: "referent", value: "kp" }, { type: "slash", value: "/" }, { type: "referent", value: "p" }, { type: "slash", value: "/" }, { type: "referent", value: "b" }, { type: "slash", value: "/" }, { type: "referent", value: "d" }, { type: "slash", value: "/" }, { type: "referent", value: "t" }, { type: "slash", value: "/" }, { type: "referent", value: "g" }, { type: "slash", value: "/" }, { type: "referent", value: "k" }, { type: 'lineBreak', value: '' },
{ type: "closeBracket", value: "]" },
]
const treeFeature = { features: [
{
feature: 'PLOSIVE',
positivePhones: ['kp', 'p', 'b', 'd', 't', 'g', 'k'],
negativePhones: ['m', 'n', 's', 'z']
},
{
feature: 'SONORANT',
positivePhones: ['m', 'n'],
negativePhones: ['s' ,'z' ,'kp' ,'p' ,'b' ,'d' ,'t' ,'g' ,'k']
}
]}
const featureState = {
...initState(),
features: {
PLOSIVE: {
negative: [
{
features: {
PLOSIVE: false,
SONORANT: true,
},
grapheme: "m",
},
{
features: {
PLOSIVE: false,
SONORANT: true,
},
grapheme: "n",
},
{
features: {
PLOSIVE: false,
SONORANT: false,
},
grapheme: "s",
},
{
features: {
PLOSIVE: false,
SONORANT: false,
},
grapheme: "z",
},
],
positive: [
{
features: {
PLOSIVE: true,
},
grapheme: "kp",
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "p",
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "b",
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "d",
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "t",
ʰ: {
features: {},
grapheme: "tʰ",
},
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "g",
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "k",
p: {
features: {
SONORANT: false,
},
grapheme: "kp",
},
},
],
},
SONORANT: {
negative: [
{
features: {
PLOSIVE: false,
SONORANT: false,
},
grapheme: "s",
},
{
features: {
PLOSIVE: false,
SONORANT: false,
},
grapheme: "z",
},
{
features: {
SONORANT: false,
},
grapheme: "kp",
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "p",
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "b",
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "d",
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "t",
ʰ: {
features: {},
grapheme: "tʰ",
},
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "g",
},
{
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "k",
p: {
features: {
SONORANT: false,
},
grapheme: "kp",
},
},
],
positive: [
{
features: {
PLOSIVE: false,
SONORANT: true,
},
grapheme: "m",
},
{
features: {
PLOSIVE: false,
SONORANT: true,
},
grapheme: "n",
},
],
}, },
parseResults: 'latl parsed successfully',
latl: featureDefinitionLatl,
phones: {
a: {
features: {},
grapheme: "a",
},
b: {
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "b",
},
d: {
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "d",
},
g: {
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "g",
},
k: {
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "k",
p: {
features: {
SONORANT: false,
},
grapheme: "kp",
},
},
m: {
features: {
PLOSIVE: false,
SONORANT: true,
},
grapheme: "m",
},
n: {
features: {
PLOSIVE: false,
SONORANT: true,
},
grapheme: "n",
},
p: {
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "p",
},
s: {
features: {
PLOSIVE: false,
SONORANT: false,
},
grapheme: "s",
},
t: {
features: {
PLOSIVE: true,
SONORANT: false,
},
grapheme: "t",
ʰ: {
features: {},
grapheme: "tʰ",
},
},
u: {
features: {},
grapheme: "u",
},
z: {
features: {
PLOSIVE: false,
SONORANT: false,
},
grapheme: "z",
},
ə: {
features: {},
grapheme: "ə",
},
ɯ: {
features: {},
grapheme: "ɯ",
},
}
}
const lexiconDefinitionLatl = `
/PROTO
kpn
sm
/
`
const tokenizedLexicon = [
{ type: "slash", value: "/" }, { type: "referent", value: "PROTO" }, { type: 'lineBreak', value: '' },
{ type: "whiteSpace", value:"" }, { type: "referent", value: "kpn" }, { type: 'lineBreak', value: '' },
{ type: "whiteSpace", value:"" }, { type: "referent", value: "sm" }, { type: 'lineBreak', value: '' },
{ type: "slash", value: "/" }
]
const treeLexicon = {lexicon: [{epoch: "PROTO", value: ["kpn", "sm"]}]};
const lexiconState = {
...initState(),
latl: lexiconDefinitionLatl,
lexicon: [
{ lexeme: 'kpn', epoch: 'PROTO'},
{ lexeme: 'sm', epoch: 'PROTO'}
],
parseResults: 'latl parsed successfully'
}
const totalLatl = `${epochDefinitionLatl}\n\n${featureDefinitionLatl}\n\n${lexiconDefinitionLatl}`
const totalLatlState = {
...initState(),
latl: totalLatl,
phonemes: {},
features: featureState.features,
epochs: treeEpoch.epochs,
lexicon: lexiconState.lexicon,
parseResults: 'latl parsed successfully'
}

View file

@ -20,10 +20,8 @@ const makeLexeme = (lexeme: string, epochName: ?string, state: stateType) => {
const newLexeme = {lexeme: lexeme, epoch: state.epochs[0]}; const newLexeme = {lexeme: lexeme, epoch: state.epochs[0]};
if (epochName) { if (epochName) {
const epochIndex = state.epochs.findIndex(epoch => epoch.name === epochName); const epochIndex = state.epochs.findIndex(epoch => epoch.name === epochName);
if (epochIndex > -1) { if (epochIndex > 0) {
newLexeme.epoch = state.epochs[epochIndex]; newLexeme.epoch = state.epochs[epochIndex];
} else {
newLexeme.epoch = epochName;
}; };
} }
return newLexeme; return newLexeme;

View file

@ -3,8 +3,8 @@ import {stateReducer} from './reducer';
describe('Lexicon', () => { describe('Lexicon', () => {
const state = { const state = {
epochs: [ epochs: [
{ name: 'epoch-1', changes:[''] }, { name: 'epoch 1', changes:[''] },
{ name: 'epoch-2', changes:[''] } { name: 'epoch 2', changes:[''] }
] ]
} }
state.lexicon = [ state.lexicon = [
@ -28,16 +28,16 @@ describe('Lexicon', () => {
}); });
it('lexicon addition with epoch returns updated lexicon with correct epoch', () => { it('lexicon addition with epoch returns updated lexicon with correct epoch', () => {
const action = {type: 'ADD_LEXEME', value: {lexeme:'ntʰa', epoch: 'epoch-2'}} const action = {type: 'ADD_LEXEME', value: {lexeme:'ntʰa', epoch: 'epoch 2'}}
expect(stateReducer(state, action)).toEqual({...state, lexicon:[...state.lexicon, {lexeme:'ntʰa', epoch:state.epochs[1]}]}); expect(stateReducer(state, action)).toEqual({...state, lexicon:[...state.lexicon, {lexeme:'ntʰa', epoch:state.epochs[1]}]});
}); });
it('lexicon set returns updated lexicon with correct epoch', () => { it('lexicon set returns updated lexicon with correct epoch', () => {
const newLexicon = [ const newLexicon = [
{lexeme:'anta', epoch:'epoch-1'}, {lexeme:'anta', epoch:'epoch 1'},
{lexeme:'anat', epoch:'epoch-1'}, {lexeme:'anat', epoch:'epoch 1'},
{lexeme:'anət', epoch:'epoch-1'}, {lexeme:'anət', epoch:'epoch 1'},
{lexeme:'anna', epoch:'epoch-1'} {lexeme:'anna', epoch:'epoch 1'}
] ]
const action = {type: 'SET_LEXICON', value: newLexicon} const action = {type: 'SET_LEXICON', value: newLexicon}
expect(stateReducer(state, action)).toEqual({...state, lexicon:[ expect(stateReducer(state, action)).toEqual({...state, lexicon:[
@ -58,7 +58,7 @@ describe('Lexicon', () => {
const inputLexicon = [ const inputLexicon = [
{lexeme:'anta'}, {lexeme:'anta'},
{lexeme:'anat'}, {lexeme:'anat'},
{lexeme:'anət', epoch:'epoch-2'}, {lexeme:'anət', epoch:'epoch 2'},
{lexeme:'anna'} {lexeme:'anna'}
] ]
const action = {type: 'SET_LEXICON', value: inputLexicon} const action = {type: 'SET_LEXICON', value: inputLexicon}

View file

@ -32,8 +32,7 @@ const findFeaturesFromLexeme = (phones: {}, lexeme:string): [] => {
let lastIndex = lexeme.length - 1; let lastIndex = lexeme.length - 1;
let node = {}; let node = {};
[...lexeme].forEach((graph, index) => { [...lexeme].forEach((graph, index) => {
try { if (!index) return node = phones[graph]
if (!index ) return node = phones[graph]
if (index === lastIndex) return node[graph] if (index === lastIndex) return node[graph]
? featureBundle.push(node[graph]) ? featureBundle.push(node[graph])
: featureBundle.push(node, phones[graph]) : featureBundle.push(node, phones[graph])
@ -41,12 +40,8 @@ const findFeaturesFromLexeme = (phones: {}, lexeme:string): [] => {
featureBundle.push(node) featureBundle.push(node)
return node = phones[graph] return node = phones[graph]
} }
if (!node) return node = phones[graph] if (!node[graph])
return node = node[graph] return node = node[graph]
}
catch (e) {
throw {e, 'phones[graph]':phones[graph], index, lexeme }
}
}) })
return featureBundle; return featureBundle;
} }
@ -216,7 +211,7 @@ const transformLexemeInitial = (newLexeme, pre, post, position, phoneme, index,
if (!isEnvironmentBoundByRule(lexemeBundle.slice(index + position.length, index + post.length + position.length), post)) return [...newLexeme, phoneme]; if (!isEnvironmentBoundByRule(lexemeBundle.slice(index + position.length, index + post.length + position.length), post)) return [...newLexeme, phoneme];
const newPhoneme = transformPhoneme(phoneme, newFeatures[0], features); const newPhoneme = transformPhoneme(phoneme, newFeatures[0], features);
// if deletion occurs // if deletion occurs
if (!newPhoneme || !newPhoneme.grapheme) return [ ...newLexeme] ; if (!newPhoneme.grapheme) return [ ...newLexeme] ;
return [...newLexeme, newPhoneme]; return [...newLexeme, newPhoneme];
} }
@ -226,7 +221,7 @@ const transformLexemeCoda = (newLexeme, pre, post, position, phoneme, index, lex
if (!isEnvironmentBoundByRule([phoneme], position)) return [...newLexeme, phoneme]; if (!isEnvironmentBoundByRule([phoneme], position)) return [...newLexeme, phoneme];
const newPhoneme = transformPhoneme(phoneme, newFeatures[0], features); const newPhoneme = transformPhoneme(phoneme, newFeatures[0], features);
// if deletion occurs // if deletion occurs
if (!newPhoneme || !newPhoneme.grapheme) return [ ...newLexeme] ; if (!newPhoneme.grapheme) return [ ...newLexeme] ;
return [...newLexeme, newPhoneme]; return [...newLexeme, newPhoneme];
} }
@ -269,14 +264,11 @@ export const run = (state: stateType, action: resultsAction): stateType => {
const { phones, features, lexicon } = state; const { phones, features, lexicon } = state;
let lexiconBundle; let lexiconBundle;
if ( epoch.parent ) { if ( epoch.parent ) {
lexiconBundle = results.find(result => result.pass === epoch.parent) lexiconBundle = results.find(result => result.pass === epoch.parent).lexicon
} }
if (!lexiconBundle) { if (!epoch.parent) {
lexiconBundle = formBundleFromLexicon(lexicon)(phones); lexiconBundle = formBundleFromLexicon(lexicon)(phones);
} }
else {
lexiconBundle = lexiconBundle.lexicon
}
const ruleBundle = decomposeRules(epoch, phones); const ruleBundle = decomposeRules(epoch, phones);
const passResults = transformLexicon(lexiconBundle)(ruleBundle)(features) const passResults = transformLexicon(lexiconBundle)(ruleBundle)(features)
const pass = { pass: epoch.name, lexicon: passResults } const pass = { pass: epoch.name, lexicon: passResults }
@ -285,9 +277,9 @@ export const run = (state: stateType, action: resultsAction): stateType => {
}, []); }, []);
const results = passResults.map(stringifyResults); const results = passResults.map(stringifyResults);
return {...state, results, errors: {}, parseResults: '' } return {...state, results, errors: {} }
} catch (err) { } catch (err) {
console.log(err) console.log(err)
return {...state, errors: err, results:[], parseResults: '' }; return {...state, errors: err, results:[] };
} }
} }

View file

@ -130,7 +130,7 @@ describe('Results', () => {
state = initState(1) state = initState(1)
expect(stateReducer(state, action).results).toEqual([ expect(stateReducer(state, action).results).toEqual([
{ {
pass: 'epoch-1', pass: 'epoch 1',
lexicon: [ lexicon: [
'anna', 'anat', 'anət', 'anna', 'tan', 'ənna' 'anna', 'anat', 'anət', 'anna', 'tan', 'ənna'
] ]
@ -143,7 +143,7 @@ describe('Results', () => {
state = initState(2) state = initState(2)
expect(stateReducer(state, action).results).toEqual([ expect(stateReducer(state, action).results).toEqual([
{ {
pass: 'epoch-1', pass: 'epoch 1',
lexicon: [ lexicon: [
'annɯ', 'anat', 'anət', 'annɯ', 'tan', 'ənnɯ' 'annɯ', 'anat', 'anət', 'annɯ', 'tan', 'ənnɯ'
] ]
@ -156,7 +156,7 @@ describe('Results', () => {
state = initState(3) state = initState(3)
expect(stateReducer(state, action).results).toEqual([ expect(stateReducer(state, action).results).toEqual([
{ {
pass: 'epoch-1', pass: 'epoch 1',
lexicon: [ lexicon: [
'annɯ', 'anat', 'ant', 'annɯ', 'tan', 'nnɯ' 'annɯ', 'anat', 'ant', 'annɯ', 'tan', 'nnɯ'
] ]
@ -169,7 +169,7 @@ describe('Results', () => {
state = initState(4) state = initState(4)
expect(stateReducer(state, action).results).toEqual([ expect(stateReducer(state, action).results).toEqual([
{ {
pass: 'epoch-1', pass: 'epoch 1',
lexicon: [ lexicon: [
'annɯ', 'anat', 'ant', 'annɯ', 'tʰan', 'nnɯ' 'annɯ', 'anat', 'ant', 'annɯ', 'tʰan', 'nnɯ'
] ]
@ -182,7 +182,7 @@ describe('Results', () => {
state = initState(5) state = initState(5)
expect(stateReducer(state, action).results).toEqual([ expect(stateReducer(state, action).results).toEqual([
{ {
pass: 'epoch-1', pass: 'epoch 1',
lexicon: [ lexicon: [
'annu', 'anat', 'ant', 'annu', 'tʰan', 'nnu' 'annu', 'anat', 'ant', 'annu', 'tʰan', 'nnu'
] ]
@ -195,7 +195,7 @@ describe('Results', () => {
// state = initState(6) // state = initState(6)
// expect(stateReducer(state, action).results).toEqual([ // expect(stateReducer(state, action).results).toEqual([
// { // {
// pass: 'epoch-1', // pass: 'epoch 1',
// lexicon: [ // lexicon: [
// 'annu', 'anta', 'ant', 'annu', 'tʰan', 'nnu' // 'annu', 'anta', 'ant', 'annu', 'tʰan', 'nnu'
// ] // ]
@ -207,7 +207,7 @@ describe('Results', () => {
const action = {type: 'RUN'}; const action = {type: 'RUN'};
state = initState(5); state = initState(5);
const newEpoch = { const newEpoch = {
name: 'epoch-2', name: 'epoch 2',
changes: [ changes: [
'[+ sonorant ]>0/#_.', '[+ sonorant ]>0/#_.',
'n>0/#_n' 'n>0/#_n'
@ -216,13 +216,13 @@ describe('Results', () => {
state.epochs = [ ...state.epochs, newEpoch ] state.epochs = [ ...state.epochs, newEpoch ]
expect(stateReducer(state, action).results).toEqual([ expect(stateReducer(state, action).results).toEqual([
{ {
pass: 'epoch-1', pass: 'epoch 1',
lexicon: [ lexicon: [
'annu', 'anat', 'ant', 'annu', 'tʰan', 'nnu' 'annu', 'anat', 'ant', 'annu', 'tʰan', 'nnu'
] ]
}, },
{ {
pass: 'epoch-2', pass: 'epoch 2',
lexicon: [ lexicon: [
'nta', 'nat', 'nət', 'na', 'tan', 'nta' 'nta', 'nat', 'nət', 'na', 'tan', 'nta'
] ]
@ -234,8 +234,8 @@ describe('Results', () => {
const action = {type: 'RUN'}; const action = {type: 'RUN'};
state = initState(5); state = initState(5);
const newEpoch = { const newEpoch = {
name: 'epoch-2', name: 'epoch 2',
parent: 'epoch-1', parent: 'epoch 1',
changes: [ changes: [
'[+ sonorant ]>0/#_.' '[+ sonorant ]>0/#_.'
] ]
@ -243,14 +243,14 @@ describe('Results', () => {
state.epochs = [ ...state.epochs, newEpoch ] state.epochs = [ ...state.epochs, newEpoch ]
expect(stateReducer(state, action).results).toEqual([ expect(stateReducer(state, action).results).toEqual([
{ {
pass: 'epoch-1', pass: 'epoch 1',
lexicon: [ lexicon: [
'annu', 'anat', 'ant', 'annu', 'tʰan', 'nnu' 'annu', 'anat', 'ant', 'annu', 'tʰan', 'nnu'
] ]
}, },
{ {
pass: 'epoch-2', pass: 'epoch 2',
parent: 'epoch-1', parent: 'epoch 1',
lexicon: [ lexicon: [
'nnu', 'nat', 'nt', 'nnu', 'tʰan', 'nu' 'nnu', 'nat', 'nt', 'nnu', 'tʰan', 'nu'
] ]

View file

@ -1,75 +0,0 @@
# LATL specification
## Feature Definition
## Rule Definition
ex.
```
(
`Unmotivated A to C`
A -> B / _
A -> C / _
``A becomes C in all environments with a intermediate state of B``
)
```
### Rule Body
#### Sound Definition
#### Change Definition
#### Environment Definition
##### Null Environment
Valid syntaxes:
```
A -> B ; no indicated environment
A -> B / _ ; environment indicated wth underscore
A -> B / . _ . ; environment indicated with underscore and placeholder dots
```
### Rule Metadata
#### Rule Title
#### Rule Description
## Language Primitives
## Data Structures
### Sets
Sets are collections of pointers to phones. The GLOBAL set contains all phones, making all other sets subsets of GLOBAL.
#### Global Set
[ GLOBAL ] is a shorthand for [ GLOBAL.SETS ]
#### Set Definition
Sets are defined with the set keyword followed by an equal sign and a set expression:
```
set SHORT_VOWELS = [ a, i, u ]
```
A single alias can be provided to the set during definition:
```
; the alias N can be used to refer to this set
set NASAL_PULMONIC_CONSONANTS, N = [ m, ɱ, n̼, n, ɳ, ɲ, ŋ, ɴ ]
```
Lists of sets can be defined using a comma followed by whitespace syntax
```
set PLOSIVES = [ p, t, k ],
FRICATIVES = [ f, s, x ],
LABIALIZED_PLOSIVES = { PLOSIVES yield [ X concat ʷ ] }
```
#### Set Usage
#### Set Operations
##### 'and' Operation
##### 'or' Operation
##### 'not' Operation
##### 'nor' Operation
##### 'in' Operation
##### 'yield' Operation
### Lexemes
#### Lexeme Operations
### Phone
For set of phones 'a', 'b', and 'ab':
```
GLOBAL ┬▻ <Key: a> ┬▻ <Key: b> ┬▻ { feature: <Boolean>, ... }
│ │ └▻ grapheme: <String: 'ab'>
│ └┬▻ { feature: <Boolean>, ... }
│ └▻ grapheme: <String: 'a'>
└┬▻ { feature: <Boolean>, ... }
└▻ grapheme: <String: 'b'>
```
#### Phone Operations
### Epochs

View file

@ -1,19 +0,0 @@
import { parser } from './parser';
export const codeGenerator = (latl) => {
const results = parser().feed(latl).results;
const nodeReader = (code, node) => {
if (node.length) {
return results.reduce(nodeReader, code)
}
if (!node) return code;
if (node.main) {
return nodeReader(code, node.main)
}
return code + node;
}
return nodeReader('', results)
}

View file

@ -1,120 +0,0 @@
// Generated automatically by nearley, version 2.19.1
// http://github.com/Hardmath123/nearley
(function () {
function id(x) { return x[0]; }
const { lexer } = require('./lexer.js');
const getTerminal = d => d ? d[0] : null;
const getAll = d => d.map((item, i) => ({ [i]: item }));
const flag = token => d => d.map(item => ({ [token]: item }))
const clearNull = d => d.filter(t => !!t && (t.length !== 1 || t[0])).map(t => t.length ? clearNull(t) : t);
const flagIndex = d => d.map((item, i) => ({[i]: item}))
const remove = _ => null;
const append = d => d.join('');
const constructSet = d => d.reduce((acc, t) => {
if (t && t.type === 'setIdentifier') acc.push({set: t});
if (t && t.length) acc[acc.length - 1].phones = t;
return acc;
}, []);
const pipe = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d);
const objFromArr = d => d.reduce((obj, item) => ({ ...obj, ...item }), {});
var grammar = {
Lexer: lexer,
ParserRules: [
{"name": "main$ebnf$1", "symbols": []},
{"name": "main$ebnf$1$subexpression$1", "symbols": ["_", "statement"]},
{"name": "main$ebnf$1", "symbols": ["main$ebnf$1", "main$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "main", "symbols": ["main$ebnf$1", "_"], "postprocess": pipe(
clearNull,
// recursive call to fix repeat?
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
flag('main'),
getTerminal,
) },
{"name": "_$ebnf$1$subexpression$1", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)]},
{"name": "_$ebnf$1", "symbols": ["_$ebnf$1$subexpression$1"], "postprocess": id},
{"name": "_$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
{"name": "_", "symbols": ["_$ebnf$1"], "postprocess": remove},
{"name": "__", "symbols": [(lexer.has("whiteSpace") ? {type: "whiteSpace"} : whiteSpace)], "postprocess": remove},
{"name": "equal", "symbols": [(lexer.has("equal") ? {type: "equal"} : equal)], "postprocess": remove},
{"name": "statement", "symbols": ["comment"]},
{"name": "statement", "symbols": ["definition"], "postprocess": pipe(
d => d.flatMap(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
// recursive call to fit repeat?
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
// may split from other definition statements
d => d.map(t => t && t.length > 1 ? ({ type: 'set', ...objFromArr(t) }) : null)
) },
{"name": "comment", "symbols": [(lexer.has("comment") ? {type: "comment"} : comment)], "postprocess": pipe(getTerminal, remove)},
{"name": "definition$ebnf$1", "symbols": []},
{"name": "definition$ebnf$1$subexpression$1", "symbols": ["setDefinition", (lexer.has("comma") ? {type: "comma"} : comma), "__"]},
{"name": "definition$ebnf$1", "symbols": ["definition$ebnf$1", "definition$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "definition", "symbols": [(lexer.has("kwSet") ? {type: "kwSet"} : kwSet), "__", "definition$ebnf$1", "setDefinition"], "postprocess": pipe(
// not yet sure why this call is required twice
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
d => d.map(u => u && u.length ? u.map(v => v.length ? v.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet')[0] : v) : u),
clearNull,
) },
{"name": "setDefinition$ebnf$1$subexpression$1", "symbols": ["setAlias"]},
{"name": "setDefinition$ebnf$1", "symbols": ["setDefinition$ebnf$1$subexpression$1"], "postprocess": id},
{"name": "setDefinition$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
{"name": "setDefinition", "symbols": [(lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier), "setDefinition$ebnf$1", "__", "equal", "__", "setExpression"], "postprocess":
pipe(
d => d.filter(t => !!t && t.length !== 0),
d => d.map(u => u && u.length ? u.map(t => t && t.length ? t.filter(v => v && v.type !== 'comma') : t) : u),
d => d.map(t => t.type === 'setIdentifier' ? { setIdentifier: t.toString() } : t),
d => d.map(t => t && t.length && t[0].hasOwnProperty('setExpression') ? t[0] : t),
d => d.map(t => t.length ?
// pretty ugly ([ { type: 'aias', alias: [ string ] }] ) => { setAlias: str }
{ setAlias: t.reduce((aliases, token) => token && token.type === 'alias' ? [...aliases, ...token.alias] : aliases, [])[0] }
: t),
)
},
{"name": "setExpression", "symbols": [(lexer.has("openSquareBracket") ? {type: "openSquareBracket"} : openSquareBracket), "_", "phoneList", "_", (lexer.has("closeSquareBracket") ? {type: "closeSquareBracket"} : closeSquareBracket)]},
{"name": "setExpression$ebnf$1$subexpression$1", "symbols": ["setOperation"]},
{"name": "setExpression$ebnf$1", "symbols": ["setExpression$ebnf$1$subexpression$1"], "postprocess": id},
{"name": "setExpression$ebnf$1", "symbols": [], "postprocess": function(d) {return null;}},
{"name": "setExpression", "symbols": [(lexer.has("openCurlyBracket") ? {type: "openCurlyBracket"} : openCurlyBracket), "_", "setExpression$ebnf$1", "_", (lexer.has("closeCurlyBracket") ? {type: "closeCurlyBracket"} : closeCurlyBracket)], "postprocess":
pipe(
// filters commas and whitespace
d => d.filter(t => t && t.length),
d => d.map(t => t.map(u => u[0])),
flag('setExpression')
) },
{"name": "setAlias", "symbols": [(lexer.has("comma") ? {type: "comma"} : comma), "_", (lexer.has("setIdentifier") ? {type: "setIdentifier"} : setIdentifier)], "postprocess": pipe(
d => d && d.length ? d.filter(t => !!t) : d,
d => d.map(t => t.type === 'setIdentifier' ? t.toString() : null),
d => d.filter(t => !!t),
d => ({type: 'alias', alias: d }),
) },
{"name": "phoneList$ebnf$1", "symbols": []},
{"name": "phoneList$ebnf$1$subexpression$1$ebnf$1", "symbols": []},
{"name": "phoneList$ebnf$1$subexpression$1$ebnf$1$subexpression$1", "symbols": [(lexer.has("comma") ? {type: "comma"} : comma), "_"]},
{"name": "phoneList$ebnf$1$subexpression$1$ebnf$1", "symbols": ["phoneList$ebnf$1$subexpression$1$ebnf$1", "phoneList$ebnf$1$subexpression$1$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "phoneList$ebnf$1$subexpression$1", "symbols": [(lexer.has("phone") ? {type: "phone"} : phone), "phoneList$ebnf$1$subexpression$1$ebnf$1"]},
{"name": "phoneList$ebnf$1", "symbols": ["phoneList$ebnf$1", "phoneList$ebnf$1$subexpression$1"], "postprocess": function arrpush(d) {return d[0].concat([d[1]]);}},
{"name": "phoneList", "symbols": ["phoneList$ebnf$1"], "postprocess":
pipe(
d => d ? d[0].map(t => t.filter(u => u.type === 'phone').map(u => u.toString())) : d
)
},
{"name": "setOperation", "symbols": ["orOperation"]},
{"name": "setOperation", "symbols": [(lexer.has("identifier") ? {type: "identifier"} : identifier)], "postprocess": pipe(
d => d.type ? d : ({ identifier: d.toString(), type: 'identifier' })
)},
{"name": "orOperation", "symbols": ["_", "setOperation", "__", (lexer.has("kwSetOr") ? {type: "kwSetOr"} : kwSetOr), "__", "setOperation", "_"], "postprocess": pipe(
d => d.filter(d => !!d),
d => ({ type: 'operator', operator: 'or', operands: [ d[0], d[2] ] }),
) }
]
, ParserStart: "main"
}
if (typeof module !== 'undefined'&& typeof module.exports !== 'undefined') {
module.exports = grammar;
} else {
window.grammar = grammar;
}
})();

View file

@ -1,109 +0,0 @@
@{%
const { lexer } = require('./lexer.js');
const getTerminal = d => d ? d[0] : null;
const getAll = d => d.map((item, i) => ({ [i]: item }));
const flag = token => d => d.map(item => ({ [token]: item }))
const clearNull = d => d.filter(t => !!t && (t.length !== 1 || t[0])).map(t => t.length ? clearNull(t) : t);
const flagIndex = d => d.map((item, i) => ({[i]: item}))
const remove = _ => null;
const append = d => d.join('');
const constructSet = d => d.reduce((acc, t) => {
if (t && t.type === 'setIdentifier') acc.push({set: t});
if (t && t.length) acc[acc.length - 1].phones = t;
return acc;
}, []);
const pipe = (...funcs) => d => funcs.reduce((acc, func) => func(acc), d);
const objFromArr = d => d.reduce((obj, item) => ({ ...obj, ...item }), {});
%}
@lexer lexer
main -> (_ statement):* _
{% pipe(
clearNull,
// recursive call to fix repeat?
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
flag('main'),
getTerminal,
) %}
_ -> (%whiteSpace):?
{% remove %}
__ -> %whiteSpace
{% remove %}
equal -> %equal
{% remove %}
statement -> comment | definition
{% pipe(
d => d.flatMap(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
// recursive call to fit repeat?
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
d => d.map(t => t && t.length === 1 && t[0] ? t[0] : t),
// may split from other definition statements
d => d.map(t => t && t.length > 1 ? ({ type: 'set', ...objFromArr(t) }) : null)
) %}
comment -> %comment
{% pipe(getTerminal, remove) %}
# SETS
definition -> %kwSet __ (setDefinition %comma __):* setDefinition
{% pipe(
// not yet sure why this call is required twice
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
d => d.map(u => u && u.length ? u.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet') : u),
d => d.map(u => u && u.length ? u.map(v => v.length ? v.filter(t => t && t.type !== 'comma' && t.type !== 'kwSet')[0] : v) : u),
clearNull,
) %}
setDefinition -> %setIdentifier (setAlias):? __ equal __ setExpression
{%
pipe(
d => d.filter(t => !!t && t.length !== 0),
d => d.map(u => u && u.length ? u.map(t => t && t.length ? t.filter(v => v && v.type !== 'comma') : t) : u),
d => d.map(t => t.type === 'setIdentifier' ? { setIdentifier: t.toString() } : t),
d => d.map(t => t && t.length && t[0].hasOwnProperty('setExpression') ? t[0] : t),
d => d.map(t => t.length ?
// pretty ugly ([ { type: 'aias', alias: [ string ] }] ) => { setAlias: str }
{ setAlias: t.reduce((aliases, token) => token && token.type === 'alias' ? [...aliases, ...token.alias] : aliases, [])[0] }
: t),
)
%}
setExpression -> %openSquareBracket _ phoneList _ %closeSquareBracket
| %openCurlyBracket _ (setOperation):? _ %closeCurlyBracket
{%
pipe(
// filters commas and whitespace
d => d.filter(t => t && t.length),
d => d.map(t => t.map(u => u[0])),
flag('setExpression')
) %}
setAlias -> %comma _ %setIdentifier
{% pipe(
d => d && d.length ? d.filter(t => !!t) : d,
d => d.map(t => t.type === 'setIdentifier' ? t.toString() : null),
d => d.filter(t => !!t),
d => ({type: 'alias', alias: d }),
) %}
phoneList -> (%phone (%comma _):* ):*
{%
pipe(
d => d ? d[0].map(t => t.filter(u => u.type === 'phone').map(u => u.toString())) : d
)
%}
setOperation -> orOperation
| %identifier
{% pipe(
d => d.type ? d : ({ identifier: d.toString(), type: 'identifier' })
)%}
orOperation -> _ setOperation __ %kwSetOr __ setOperation _
{% pipe(
d => d.filter(d => !!d),
d => ({ type: 'operator', operator: 'or', operands: [ d[0], d[2] ] }),
) %}

View file

@ -1,124 +0,0 @@
const moo = require("moo");
const lexer = moo.states({
main: {
comment: /;.*$/,
star: { match: /\*/, push: "epoch" },
slash: { match: /\//, push: "lexicon" },
// change so that identifiers are always upper, keywords are always lower, phones are always lower
kwSet: {
match: "set",
type: moo.keywords({ kwSet: "set " }),
push: "setDefinition",
},
identifier: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/ },
openBracket: { match: /\[/, push: "feature" },
whiteSpace: { match: /\s+/, lineBreaks: true },
newLine: { match: /\n+/, lineBreaks: true },
},
epoch: {
identifier: {
match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/,
push: "rule",
},
openParen: { match: /\(/, push: "ruleDefinition" },
pipe: { match: /\|/, pop: true },
greaterThan: /\>/,
arrow: /\-\>/,
hash: /#/,
slash: /\//,
dot: /\./,
underscore: /\_/,
newLine: { match: /\n/, lineBreaks: true },
},
ruleDefinition: {
doubleTick: { match: /``/, push: "ruleName" },
singleTick: { match: /`/, push: "ruleDescription" },
// push rule
closeParen: { match: /\)/, pop: true },
newLine: { match: /\n/, lineBreaks: true },
},
ruleName: {
ruleName: { match: /.+(?=``)/ },
doubleTick: { match: /``/, pop: true },
},
ruleDescription: {
ruleDescription: { match: /.+(?=`)/ },
singleTick: { match: /`/, pop: true },
},
rule: {
openSquareBracket: { match: /\[/, push: "ruleFeature" },
// whiteSpace: { match: /\s/ },
newLine: { match: /\n/, pop: true, lineBreaks: true },
},
ruleFeature: {
ruleFeature: { match: /[A-Za-z]+[\u00c0-\u03FFA-Za-z0-9\\-\\_]*/ },
closeBracket: { match: /\]/, pop: true },
newLine: { match: /\n/, lineBreaks: true },
},
lexicon: {
slash: { match: /\//, pop: true },
newLine: { match: /\n/, lineBreaks: true },
},
feature: {
closeBracket: { match: /\]/, pop: true },
positiveAssignment: /\+=/,
negativeAssignment: /\-=/,
newLine: { match: /\n/, lineBreaks: true },
},
setDefinition: {
comment: /;.*$/,
setIdentifier: { match: /[A-Z]+[A-Z_]*/ },
openCurlyBracket: { match: /\{/, push: "setOperation" },
equal: /=/,
openSquareBracket: /\[/,
phone: /[\u00c0-\u03FFa-z]+/,
closeSquareBracket: { match: /\]/ },
comma: { match: /,/, push: "commaOperation" },
whiteSpace: { match: /[\t ]+/ },
newLine: { match: /\n/, pop: true, lineBreaks: true },
},
setOperation: {
closeCurlyBracket: { match: /\}/, pop: true },
// ! restrict identifiers
keyword: {
match: ["not", "and", "or", "nor", "in", "yield", "concat", "dissoc"],
type: moo.keywords({
kwSetNot: "not",
kwSetAnd: "and",
kwSetOr: "or",
kwSetNor: "nor",
kwSetIn: "in",
kwSetYield: "yield",
kwSetConcat: "concat",
kwSetDissoc: "dissoc",
}),
},
identifier: /[A-Z]+[A-Z_]+/,
whiteSpace: { match: /\s+/, lineBreaks: true },
openSquareBracket: /\[/,
closeSquareBracket: /\]/,
identifier: /[A-Z]+[A-Z_]*/,
phone: /[\u00c0-\u03FFa-z]+/,
},
commaOperation: {
// if comma is detected during a definition, the commaOperation consumes all white space and pops back to definition
// this prevents popping back to main
comment: /\s*;.*$/,
whiteSpace: { match: /\s+/, lineBreaks: true, pop: true },
newLine: { match: /\n/, lineBreaks: true, pop: true },
},
});
module.exports = { lexer };

View file

@ -1,4 +0,0 @@
const nearley = require("nearley");
const grammar = require("./grammar.js");
export const parser = () => new nearley.Parser(nearley.Grammar.fromCompiled(grammar));

View file

@ -1,810 +0,0 @@
export const assertionData = {
simpleComment: {
latl: `; comment`,
tokens: [{ type: "comment", value: "; comment" }],
AST: {
main: [],
},
code: "",
},
simpleSetDefinition: {
latl: `set NASAL_PULMONIC_CONSONANTS = [ m̥, m, ɱ ]`,
tokens: [
{ type: "kwSet", value: "set" },
{ type: "whiteSpace", value: " " },
{ type: "setIdentifier", value: "NASAL_PULMONIC_CONSONANTS" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "m̥" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "m" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɱ" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
],
AST: {
main: [
{
type: "set",
setIdentifier: "NASAL_PULMONIC_CONSONANTS",
setExpression: ["m̥", "m", "ɱ"],
},
],
},
code: "",
},
commaSetDefinition: {
latl: `
set NASAL_PULMONIC_CONSONANTS = [ m̥, m, ɱ, n̼, n̥, n, ɳ̊, ɳ, ɲ̊, ɲ, ŋ, ̊ŋ, ɴ ],
STOP_PULMONIC_CONSONANTS = [ p, b, p̪, b̪, t̼, d̼, t, d, ʈ, ɖ, c, ɟ, k, ɡ, q, ɢ, ʡ, ʔ ]`,
tokens: [
{ type: "whiteSpace", value: "\n" },
{ type: "kwSet", value: "set" },
{ type: "whiteSpace", value: " " },
{ type: "setIdentifier", value: "NASAL_PULMONIC_CONSONANTS" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "m̥" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "m" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɱ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "n̼" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "n̥" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "n" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɳ̊" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɳ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɲ̊" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɲ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ŋ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "̊ŋ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɴ" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "STOP_PULMONIC_CONSONANTS" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "p" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "b" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "p̪" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "b̪" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "t̼" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "d̼" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "t" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "d" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ʈ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɖ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "c" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɟ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "k" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɡ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "q" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɢ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ʡ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ʔ" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
],
AST: {
main: [
{
type: "set",
setIdentifier: "NASAL_PULMONIC_CONSONANTS",
setExpression: [
"m̥",
"m",
"ɱ",
"n̼",
"n̥",
"n",
"ɳ̊",
"ɳ",
"ɲ̊",
"ɲ",
"ŋ",
"̊ŋ",
"ɴ",
],
},
{
type: "set",
setIdentifier: "STOP_PULMONIC_CONSONANTS",
setExpression: [
"p",
"b",
"p̪",
"b̪",
"t̼",
"d̼",
"t",
"d",
"ʈ",
"ɖ",
"c",
"ɟ",
"k",
"ɡ",
"q",
"ɢ",
"ʡ",
"ʔ",
],
},
],
},
},
setAliasDefinition: {
latl: `
set NASAL_PULMONIC_CONSONANTS, N = [ m̥, m, ɱ, n̼, n̥, n, ɳ̊, ɳ, ɲ̊, ɲ, ŋ, ̊ŋ, ɴ ]`,
tokens: [
{ type: "whiteSpace", value: "\n" },
{ type: "kwSet", value: "set" },
{ type: "whiteSpace", value: " " },
{ type: "setIdentifier", value: "NASAL_PULMONIC_CONSONANTS" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "setIdentifier", value: "N" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "m̥" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "m" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɱ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "n̼" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "n̥" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "n" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɳ̊" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɳ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɲ̊" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɲ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ŋ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "̊ŋ" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "ɴ" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
],
AST: {
main: [
{
type: "set",
setIdentifier: "NASAL_PULMONIC_CONSONANTS",
setAlias: "N",
setExpression: [
"m̥",
"m",
"ɱ",
"n̼",
"n̥",
"n",
"ɳ̊",
"ɳ",
"ɲ̊",
"ɲ",
"ŋ",
"̊ŋ",
"ɴ",
],
},
],
},
},
setDefinitionJoin: {
latl: `
set CLICK_CONSONANTS = { TENUIS_CLICK_CONSONANTS or VOICED_CLICK_CONSONANTS }`,
tokens: [
{ type: "whiteSpace", value: "\n" },
{ type: "kwSet", value: "set" },
{ type: "whiteSpace", value: " " },
{ type: "setIdentifier", value: "CLICK_CONSONANTS" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "TENUIS_CLICK_CONSONANTS" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetOr", value: "or" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "VOICED_CLICK_CONSONANTS" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
],
AST: {
main: [
{
type: "set",
setIdentifier: "CLICK_CONSONANTS",
setExpression: [
{
type: "operator",
operator: "or",
operands: [
{
type: "identifier",
identifier: "TENUIS_CLICK_CONSONANTS",
},
{
type: "identifier",
identifier: "VOICED_CLICK_CONSONANTS",
},
],
},
],
},
],
},
},
setDefinitionMultiJoin: {
latl: `
set CLICK_CONSONANTS = { TENUIS_CLICK_CONSONANTS or VOICED_CLICK_CONSONANTS
or NASAL_CLICK_CONSONANTS or L_CLICK_CONSONANTS
}`,
tokens: [
{ type: "whiteSpace", value: "\n" },
{ type: "kwSet", value: "set" },
{ type: "whiteSpace", value: " " },
{ type: "setIdentifier", value: "CLICK_CONSONANTS" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "TENUIS_CLICK_CONSONANTS" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetOr", value: "or" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "VOICED_CLICK_CONSONANTS" },
{ type: "whiteSpace", value: "\n " },
{ type: "kwSetOr", value: "or" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "NASAL_CLICK_CONSONANTS" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetOr", value: "or" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "L_CLICK_CONSONANTS" },
{ type: "whiteSpace", value: " \n " },
{ type: "closeCurlyBracket", value: "}" },
],
AST: {
main: [
{
type: "set",
setIdentifier: "CLICK_CONSONANTS",
setExpression: [
{
type: "operator",
operator: "or ",
operands: [
{
type: "identifier",
identifier: "TENUIS_CLICK_CONSONANTS",
},
{
type: "operator",
operator: "or",
operands: [
{
type: "identifier",
identifier: "VOICED_CLICK_CONSONANTS",
},
{
type: "operator",
operator: "or",
operands: [
{
type: "identifier",
identifier: "NASAL_CLICK_CONSONANTS",
},
{
type: "identifier",
operands: "L_CLICK_CONSONANTS",
},
],
},
],
},
],
},
],
},
],
},
},
setDefinitionYield: {
latl: `
set NASAL_VOWELS = { [ V ] in ORAL_VOWELS yield [ Ṽ ] },
SHORT_NASAL_VOWELS = { [ Vː ] in NASAL_VOWELS yield [ V ]ː },
LONG_NASAL_VOWELS = { [ Vː ] in NASAL_VOWELS }`,
tokens: [
{ type: "whiteSpace", value: "\n" },
{ type: "kwSet", value: "set" },
{ type: "whiteSpace", value: " " },
{ type: "setIdentifier", value: "NASAL_VOWELS" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "V" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetIn", value: "in" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "ORAL_VOWELS" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetYield", value: "yield" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "V" },
{ type: "phone", value: "̃" }, // test display for COMBINING TILDE OVERLAY is deceiving
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SHORT_NASAL_VOWELS" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "V" },
{ type: "phone", value: "ː" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetIn", value: "in" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "NASAL_VOWELS" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetYield", value: "yield" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "V" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "phone", value: "ː" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "LONG_NASAL_VOWELS" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "V" },
{ type: "phone", value: "ː" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetIn", value: "in" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "NASAL_VOWELS" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
],
},
setOperationsJoin: {
latl: `
; ---- set join operations non-mutable!
set SET_C = { SET_A not SET_B }, ; left anti join
SET_D = { SET_A and SET_B }, ; inner join
SET_E = { SET_A or SET_B }, ; full outer join
SET_F = { not SET_A }, ; = { GLOBAL not SET_A }
SET_G = { not SET_A nor SET_B } ; = { GLOBAL not { SET_A or SET_B } }`,
tokens: [
{ type: "whiteSpace", value: "\n" },
{ type: "comment", value: "; ---- set join operations non-mutable! " },
{ type: "whiteSpace", value: "\n" },
{ type: "kwSet", value: "set" },
{ type: "whiteSpace", value: " " },
{ type: "setIdentifier", value: "SET_C" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetNot", value: "not" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_B" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{ type: "comment", value: " ; left anti join" },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_D" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetAnd", value: "and" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_B" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{ type: "comment", value: " ; inner join" },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_E" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetOr", value: "or" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_B" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{ type: "comment", value: " ; full outer join" },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_F" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetNot", value: "not" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{ type: "comment", value: " ; = { GLOBAL not SET_A }" },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_G" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetNot", value: "not" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetNor", value: "nor" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_B" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "whiteSpace", value: " " },
{ type: "comment", value: "; = { GLOBAL not { SET_A or SET_B } }" },
],
},
setOperations: {
latl: `
; ---- set character operations - non-mutable!
set SET_B = { [ Xy ] in SET_A }, ; FILTER: where X is any character and y is a filtering character
SET_C = { SET_A yield [ Xy ] }, ; CONCATENATE: performs transformation with (prepended or) appended character
SET_D = { SET_A yield [ X concat y ] },
SET_E = { SET_A yield [ y concat X ] },
SET_F = { SET_A yield y[ X ] }, ; DISSOCIATE: performs transformation removing prepended (or appended) character
SET_G = { SET_A yield y dissoc [ X ] },
SET_H = { SET_A yield [ X ] dissoc y },
SET_I = { [ Xy ] in SET_A yield [ X ]y } ; combined FILTER and DISSOCIATE`,
tokens: [
{ type: "whiteSpace", value: "\n" },
{
type: "comment",
value: "; ---- set character operations - non-mutable!",
},
{ type: "whiteSpace", value: "\n" },
{ type: "kwSet", value: "set" },
{ type: "whiteSpace", value: " " },
{ type: "setIdentifier", value: "SET_B" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "X" },
{ type: "phone", value: "y" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetIn", value: "in" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{
type: "comment",
value:
" ; FILTER: where X is any character and y is a filtering character",
},
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_C" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetYield", value: "yield" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "X" },
{ type: "phone", value: "y" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{
type: "comment",
value:
" ; CONCATENATE: performs transformation with (prepended or) appended character",
},
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_D" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetYield", value: "yield" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "X" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetConcat", value: "concat" },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "y" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_E" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetYield", value: "yield" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "y" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetConcat", value: "concat" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "X" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_F" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetYield", value: "yield" },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "y" },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "X" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{
type: "comment",
value:
" ; DISSOCIATE: performs transformation removing prepended (or appended) character",
},
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_G" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetYield", value: "yield" },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "y" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetDissoc", value: "dissoc" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "X" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_H" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetYield", value: "yield" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "X" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetDissoc", value: "dissoc" },
{ type: "whiteSpace", value: " " },
{ type: "phone", value: "y" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "comma", value: "," },
{ type: "whiteSpace", value: "\n " },
{ type: "setIdentifier", value: "SET_I" },
{ type: "whiteSpace", value: " " },
{ type: "equal", value: "=" },
{ type: "whiteSpace", value: " " },
{ type: "openCurlyBracket", value: "{" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "X" },
{ type: "phone", value: "y" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetIn", value: "in" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "SET_A" },
{ type: "whiteSpace", value: " " },
{ type: "kwSetYield", value: "yield" },
{ type: "whiteSpace", value: " " },
{ type: "openSquareBracket", value: "[" },
{ type: "whiteSpace", value: " " },
{ type: "identifier", value: "X" },
{ type: "whiteSpace", value: " " },
{ type: "closeSquareBracket", value: "]" },
{ type: "phone", value: "y" },
{ type: "whiteSpace", value: " " },
{ type: "closeCurlyBracket", value: "}" },
{ type: "whiteSpace", value: " " },
{ type: "comment", value: "; combined FILTER and DISSOCIATE" },
],
},
};

View file

@ -1,10 +0,0 @@
import { assertionData } from './assertionData';
import { codeGenerator } from '../codeGenerator';
describe('codeGenerator', () => {
it('parses simple comment', () => {
const { latl, code } = assertionData.simpleComment;
const generatedCode = codeGenerator(latl);
expect(generatedCode).toEqual(code);
});
})

View file

@ -1,71 +0,0 @@
import { lexer } from '../lexer';
import { assertionData } from './assertionData';
describe('lexer', () => {
const getToken = obj => obj ? formatToken(obj) : null;
const formatToken = obj => ({ type: obj.type, value: obj.value });
const getStream = latl => {
lexer.reset(latl);
let token = getToken(lexer.next());
let stream = [];
do {
stream = [...stream, token]
token = getToken(lexer.next());
} while (token);
return stream;
}
it('lexes simple comment', () => {
const { latl, tokens } = assertionData.simpleComment;
const stream = getStream(latl);
expect(stream).toStrictEqual(tokens);
});
// it('lexes simple * and identifier', () => {
// lexer.reset('*proto');
// const stream = [ getToken(lexer.next()), getToken(lexer.next()) ];
// expect(stream).toStrictEqual([ { type: 'star', value: '*' }, { type: 'identifier', value: 'proto' } ]);
// })
it('lexes set and identifier', () => {
const { latl, tokens } = assertionData.simpleSetDefinition;
const stream = getStream(latl);
expect(stream).toStrictEqual(tokens);
})
it('lexes multiple set definitions with comma operator', () => {
const { latl, tokens } = assertionData.commaSetDefinition;
const stream = getStream(latl);
expect(stream).toStrictEqual(tokens);
});
it('lexes set definition with alias', () => {
const { latl, tokens } = assertionData.setAliasDefinition;
const stream = getStream(latl);
expect(stream).toStrictEqual(tokens);
});
it('lexes set definition with set join', () => {
const { latl, tokens } = assertionData.setDefinitionJoin;
const stream = getStream(latl);
expect(stream).toStrictEqual(tokens);
});
it('lexes set definition with yield operation', () => {
const { latl, tokens } = assertionData.setDefinitionYield;
const stream = getStream(latl);
expect(stream).toStrictEqual(tokens);
});
it('lexes all set join operations', () => {
const { latl, tokens } = assertionData.setOperationsJoin;
const stream = getStream(latl);
expect(stream).toStrictEqual(tokens);
});
it('lexes set filter, concat, and dissoc operations', () => {
const { latl, tokens } = assertionData.setOperations;
const stream = getStream(latl);
expect(stream).toStrictEqual(tokens);
})
})

View file

@ -1,180 +0,0 @@
import { lexer } from "../lexer";
import { parser } from "../parser";
import { assertionData } from "./assertionData";
describe("parser", () => {
it("parses simple comment", () => {
const { latl, AST } = assertionData.simpleComment;
const feedResults = parser().feed(latl).results;
expect(feedResults.length).toBe(1);
expect(feedResults[0]).toStrictEqual(AST);
});
it("parses simple set definition", () => {
const { latl, AST } = assertionData.simpleSetDefinition;
const feedResults = parser().feed(latl).results;
expect(feedResults.length).toBe(1);
expect(feedResults[0]).toStrictEqual(AST);
});
it("parses multiple set definitions with comma operator", () => {
const { latl, AST } = assertionData.commaSetDefinition;
const feedResults = parser().feed(latl).results;
expect(feedResults.length).toBe(1);
expect(feedResults[0]).toStrictEqual(AST);
});
it("lexes set definition with alias", () => {
const { latl, AST } = assertionData.setAliasDefinition;
const feedResults = parser().feed(latl).results;
expect(feedResults[0]).toStrictEqual(AST);
});
it.skip("lexes set definition with set join", () => {
const { latl, AST } = assertionData.setDefinitionJoin;
const feedResults = parser().feed(latl).results;
expect(feedResults[0]).toStrictEqual(AST);
});
it.todo(
"lexes set definition with yield operation"
// , () => {
// const { latl, tokens } = assertionData.setDefinitionYield;
// const stream = getStream(latl);
// expect(stream).toStrictEqual(tokens);
// }
);
it.todo(
"lexes all set join operations"
// , () => {
// const { latl, tokens } = assertionData.setOperationsJoin;
// const stream = getStream(latl);
// expect(stream).toStrictEqual(tokens);
// }
);
it.todo(
"lexes set filter, concat, and dissoc operations"
// , () => {
// const { latl, tokens } = assertionData.setOperations;
// const stream = getStream(latl);
// expect(stream).toStrictEqual(tokens);
// }
);
});
// {
// "set":
// [
// [
// [
// {
// "col": 5,
// "line": 2,
// "lineBreaks": 0,
// "offset": 5,
// "text": "NASAL_PULMONIC_CONSONANTS",
// "toString": [tokenToString],
// "type": "setIdentifier",
// "value": "NASAL_PULMONIC_CONSONANTS",
// },
// null,
// {
// "col": 45,
// "line": 2,
// "lineBreaks": 0,
// "offset": 45,
// "text": "=",
// "toString": [tokenToString],
// "type": "equal",
// "value": "=",
// },
// null,
// [
// [
// {
// "col": 49,
// "line": 2,
// "lineBreaks": 0,
// "offset": 49,
// "text": "m̥",
// "toString": [tokenToString],
// "type": "phone",
// "value": "m̥",
// },
// {
// "col": 91,
// "line": 2,
// "lineBreaks": 0,
// "offset": 91,
// "text": "ɴ",
// "toString": [tokenToString],
// "type": "phone",
// "value": "ɴ",
// },
// ],
// ],
// {
// "col": 94,
// "line": 2,
// "lineBreaks": 0,
// "offset": 94,
// "text": ",",
// "toString": [tokenToString],
// "type": "comma",
// "value": ",",
// },
// null,
// ],
// ],
// - "setIdentifier": "STOP_PULMONIC_CONSONANTS",
// {
// "col": 5,
// "line": 3,
// "lineBreaks": 0,
// "offset": 100,
// "text": "STOP_PULMONIC_CONSONANTS",
// "toString": [tokenToString],
// "type": "setIdentifier",
// "value": "STOP_PULMONIC_CONSONANTS",
// },
// null,
// {
// "col": 45,
// "line": 3,
// "lineBreaks": 0,
// "offset": 140,
// "text": "=",
// "toString": [tokenToString],
// "type": "equal",
// "value": "=",
// },
// null,
// [
// [
// {
// "col": 49,
// "line": 3,
// "lineBreaks": 0,
// "offset": 144,
// "text": "p",
// "toString": [tokenToString],
// "type": "phone",
// "value": "p",
// },
// {
// "col": 104,
// "line": 3,
// "lineBreaks": 0,
// "offset": 199,
// "text": "ʔ",
// "toString": [tokenToString],
// "type": "phone",
// "value": "ʔ",
// },
// ],
// ],
// ],
// "token": "kwSet",
// }