refactor rule decomposition for cleanliness
This commit is contained in:
parent
6885aeba2f
commit
471759a7cf
3 changed files with 47 additions and 52 deletions
|
@ -16,7 +16,7 @@ export const initState = (changesArgument: number): stateType => {
|
||||||
'[+ sonorant - low rounded high back]>0/._.',
|
'[+ sonorant - low rounded high back]>0/._.',
|
||||||
'[+ obstruent]>[+ obstruent aspirated ]/#_.',
|
'[+ obstruent]>[+ obstruent aspirated ]/#_.',
|
||||||
'[+ sonorant - rounded]>[+ sonorant + rounded]/._#',
|
'[+ sonorant - rounded]>[+ sonorant + rounded]/._#',
|
||||||
'nn>nun/._.'
|
'at>ta/._#'
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|
|
@ -67,9 +67,9 @@ const findFeaturesFromGrapheme = (phones: {}, lexeme:string): [] => {
|
||||||
const errorMessage = ([prefix, separator], location, err) => `${prefix}${location}${separator}${err}`
|
const errorMessage = ([prefix, separator], location, err) => `${prefix}${location}${separator}${err}`
|
||||||
|
|
||||||
const lintRule = (rule) => {
|
const lintRule = (rule) => {
|
||||||
if (rule.match(/>/g) === null) throw `Insert '>' operator between target and result`
|
if (!rule.match(/>/g)) throw `Insert '>' operator between target and result`
|
||||||
if (rule.match(/\//g) === null) throw `Insert '/' operator between change and environment`
|
if (!rule.match(/\//g)) throw `Insert '/' operator between change and environment`
|
||||||
if (rule.match(/_/g) === null) throw `Insert '_' operator in environment`
|
if (!rule.match(/_/g)) throw `Insert '_' operator in environment`
|
||||||
if (rule.match(/>/g).length > 1) throw `Too many '>' operators`
|
if (rule.match(/>/g).length > 1) throw `Too many '>' operators`
|
||||||
if (rule.match(/\//g).length > 1) throw `Too many '/' operators`
|
if (rule.match(/\//g).length > 1) throw `Too many '/' operators`
|
||||||
if (rule.match(/_/g).length > 1) throw `Too many '_' operators`
|
if (rule.match(/_/g).length > 1) throw `Too many '_' operators`
|
||||||
|
@ -77,55 +77,50 @@ const lintRule = (rule) => {
|
||||||
}
|
}
|
||||||
|
|
||||||
const decomposeRule = (rule: string, index: number): ruleBundle => {
|
const decomposeRule = (rule: string, index: number): ruleBundle => {
|
||||||
// splits rule at '>' '/' and '_' substrings resulting in array of length 4
|
|
||||||
try {
|
try {
|
||||||
|
// splits rule at '>' '/' and '_' substrings resulting in array of length 4
|
||||||
const [position, newFeatures, pre, post] = lintRule(rule);
|
const [position, newFeatures, pre, post] = lintRule(rule);
|
||||||
return {
|
return { environment: { pre, position, post }, newFeatures }
|
||||||
environment: { pre, position, post },
|
|
||||||
newFeatures
|
|
||||||
}
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
throw errorMessage`Error in line ${index + 1}: ${err}`;
|
throw errorMessage`Error in line ${index + 1}: ${err}`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const isUnknownFeatureToken = token => token !== '-' && token !== '+' && token !== ']' && token !== '[' && token !== ' ';
|
||||||
|
|
||||||
const doesFeatureRuleContainUnknownToken = features => {
|
const doesFeatureRuleContainUnknownToken = features => {
|
||||||
const unknownTokens = features
|
const unknownTokens = features
|
||||||
.match(/\W/g)
|
.match(/\W/g)
|
||||||
.filter(v => v !== '-' && v !== '+' && v !== ']' && v !== '[' && v !== ' ')
|
.filter(isUnknownFeatureToken)
|
||||||
if (unknownTokens.length) throw `Unknown token '${unknownTokens[0]}'`;
|
if (unknownTokens.length) throw `Unknown token '${unknownTokens[0]}'`;
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
const getPositiveFeatures = phoneme => {
|
const reduceFeaturesToBoolean = bool => (map, feature) => ({...map, [feature]: bool})
|
||||||
try {
|
|
||||||
const positiveFeatures = phoneme.match(/(?=\+.).*(?<=\-)|(?=\+.).*(?!\-).*(?<=\])/g)
|
|
||||||
if (positiveFeatures) doesFeatureRuleContainUnknownToken(positiveFeatures[0])
|
|
||||||
return positiveFeatures ? positiveFeatures[0]
|
|
||||||
.trim().match(/\w+/g)
|
|
||||||
.reduce((map, feature) => ({...map, [feature]: true}), {})
|
|
||||||
: {}
|
|
||||||
} catch (err) {
|
|
||||||
throw err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const getNegativeFeatures = phoneme => {
|
const getFeatures = (phoneme: string, featureBoolean): {} => {
|
||||||
try {
|
try {
|
||||||
const negativeFeatures = phoneme.match(/(?=\-.).*(?<=\+)|(?=\-.).*(?!\+).*(?<=\])/g)
|
const featureMatch = featureBoolean
|
||||||
if (negativeFeatures) doesFeatureRuleContainUnknownToken(negativeFeatures[0])
|
// regEx to pull positive features
|
||||||
return negativeFeatures ? negativeFeatures[0]
|
? /(?=\+.).*(?<=\-)|(?=\+.).*(?!\-).*(?<=\])/g
|
||||||
|
// regEx to pull negative features
|
||||||
|
: /(?=\-.).*(?<=\+)|(?=\-.).*(?!\+).*(?<=\])/g
|
||||||
|
const [ features ] = phoneme.match(featureMatch) || [ null ];
|
||||||
|
if (features) {
|
||||||
|
doesFeatureRuleContainUnknownToken(features)
|
||||||
|
return features
|
||||||
.trim()
|
.trim()
|
||||||
.match(/\w+/g)
|
.match(/\w+/g)
|
||||||
.reduce((map, feature) => ({...map, [feature]: false}), {})
|
.reduce(reduceFeaturesToBoolean(featureBoolean), {})
|
||||||
: {}
|
}
|
||||||
|
return {}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const mapToPositiveAndNegativeFeatures = phoneme => (
|
const mapToPositiveAndNegativeFeatures = phoneme => (
|
||||||
{ ...getPositiveFeatures(phoneme), ...getNegativeFeatures(phoneme) } )
|
{ ...getFeatures(phoneme, true), ...getFeatures(phoneme, false) } )
|
||||||
|
|
||||||
const mapStringToFeatures = (ruleString, phones) => {
|
const mapStringToFeatures = (ruleString, phones) => {
|
||||||
if (ruleString) {
|
if (ruleString) {
|
||||||
|
@ -194,7 +189,7 @@ const isEnvironmentBoundByRule = (phonemeFeatures, ruleFeatures) => {
|
||||||
? true : false;
|
? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
const swapPhoneme = (phoneme, newFeatures, features) => {
|
const transformPhoneme = (phoneme, newFeatures, features) => {
|
||||||
if (!newFeatures) return {}
|
if (!newFeatures) return {}
|
||||||
const newPhonemeFeatures = Object.entries(newFeatures)
|
const newPhonemeFeatures = Object.entries(newFeatures)
|
||||||
.reduce((newPhoneme, [newFeature, newValue]) => ({ ...newPhoneme, [newFeature]: newValue })
|
.reduce((newPhoneme, [newFeature, newValue]) => ({ ...newPhoneme, [newFeature]: newValue })
|
||||||
|
@ -210,7 +205,7 @@ const transformLexemeInitial = (newLexeme, pre, post, position, phoneme, index,
|
||||||
if (index !== pre.length - 1) return [...newLexeme, phoneme];
|
if (index !== pre.length - 1) return [...newLexeme, phoneme];
|
||||||
if (!isEnvironmentBoundByRule([phoneme], position)) return [...newLexeme, phoneme];
|
if (!isEnvironmentBoundByRule([phoneme], position)) return [...newLexeme, phoneme];
|
||||||
if (!isEnvironmentBoundByRule(lexemeBundle.slice(index, index + post.length), post)) return [...newLexeme, phoneme];
|
if (!isEnvironmentBoundByRule(lexemeBundle.slice(index, index + post.length), post)) return [...newLexeme, phoneme];
|
||||||
const newPhoneme = swapPhoneme(phoneme, newFeatures[0], features);
|
const newPhoneme = transformPhoneme(phoneme, newFeatures[0], features);
|
||||||
return [...newLexeme, newPhoneme];
|
return [...newLexeme, newPhoneme];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,7 +213,7 @@ const transformLexemeCoda = (newLexeme, pre, post, position, phoneme, index, lex
|
||||||
if (index + post.length !== lexemeBundle.length) return [...newLexeme, phoneme];
|
if (index + post.length !== lexemeBundle.length) return [...newLexeme, phoneme];
|
||||||
if (!isEnvironmentBoundByRule(lexemeBundle.slice(index - pre.length, index), pre)) return [...newLexeme, phoneme];
|
if (!isEnvironmentBoundByRule(lexemeBundle.slice(index - pre.length, index), pre)) return [...newLexeme, phoneme];
|
||||||
if (!isEnvironmentBoundByRule([phoneme], position)) return [...newLexeme, phoneme];
|
if (!isEnvironmentBoundByRule([phoneme], position)) return [...newLexeme, phoneme];
|
||||||
const newPhoneme = swapPhoneme(phoneme, newFeatures[0], features);
|
const newPhoneme = transformPhoneme(phoneme, newFeatures[0], features);
|
||||||
return [...newLexeme, newPhoneme];
|
return [...newLexeme, newPhoneme];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,7 +226,7 @@ export const transformLexeme = (lexemeBundle, rule, features) => {
|
||||||
if (!isEnvironmentBoundByRule(lexemeBundle.slice(index - pre.length, index), pre)) return [...newLexeme, phoneme];
|
if (!isEnvironmentBoundByRule(lexemeBundle.slice(index - pre.length, index), pre)) return [...newLexeme, phoneme];
|
||||||
if (!isEnvironmentBoundByRule([phoneme], position)) return [...newLexeme, phoneme];
|
if (!isEnvironmentBoundByRule([phoneme], position)) return [...newLexeme, phoneme];
|
||||||
if (!isEnvironmentBoundByRule(lexemeBundle.slice(index, index + post.length), post)) return [...newLexeme, phoneme];
|
if (!isEnvironmentBoundByRule(lexemeBundle.slice(index, index + post.length), post)) return [...newLexeme, phoneme];
|
||||||
const newPhoneme = swapPhoneme(phoneme, rule.newFeatures[0], features);
|
const newPhoneme = transformPhoneme(phoneme, rule.newFeatures[0], features);
|
||||||
// if deletion occurs
|
// if deletion occurs
|
||||||
if (!newPhoneme.grapheme) return [ ...newLexeme] ;
|
if (!newPhoneme.grapheme) return [ ...newLexeme] ;
|
||||||
return [...newLexeme, newPhoneme];
|
return [...newLexeme, newPhoneme];
|
||||||
|
|
|
@ -69,7 +69,7 @@ describe('Results', () => {
|
||||||
expect(transformLexeme(lexemeBundle, rule, initState().features)).toEqual(resultsLexeme)
|
expect(transformLexeme(lexemeBundle, rule, initState().features)).toEqual(resultsLexeme)
|
||||||
})
|
})
|
||||||
|
|
||||||
it('results returned from first sound change rule', () => {
|
it('results returned from first sound change rule (feature matching)', () => {
|
||||||
const action = {type: 'RUN'};
|
const action = {type: 'RUN'};
|
||||||
state = initState(1)
|
state = initState(1)
|
||||||
expect(stateReducer(state, action).results).toEqual([
|
expect(stateReducer(state, action).results).toEqual([
|
||||||
|
@ -82,7 +82,7 @@ describe('Results', () => {
|
||||||
]);
|
]);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('results returned through second sound change rule', () => {
|
it('results returned through second sound change rule (phoneme matching)', () => {
|
||||||
const action = {type: 'RUN'};
|
const action = {type: 'RUN'};
|
||||||
state = initState(2)
|
state = initState(2)
|
||||||
expect(stateReducer(state, action).results).toEqual([
|
expect(stateReducer(state, action).results).toEqual([
|
||||||
|
@ -95,7 +95,7 @@ describe('Results', () => {
|
||||||
]);
|
]);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('results returned through third sound change rule', () => {
|
it('results returned through third sound change rule (phoneme dropping)', () => {
|
||||||
const action = {type: 'RUN'};
|
const action = {type: 'RUN'};
|
||||||
state = initState(3)
|
state = initState(3)
|
||||||
expect(stateReducer(state, action).results).toEqual([
|
expect(stateReducer(state, action).results).toEqual([
|
||||||
|
@ -108,7 +108,7 @@ describe('Results', () => {
|
||||||
]);
|
]);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('results returned through fourth sound change rule', () => {
|
it('results returned through fourth sound change rule (lexeme initial environment)', () => {
|
||||||
const action = {type: 'RUN'};
|
const action = {type: 'RUN'};
|
||||||
state = initState(4)
|
state = initState(4)
|
||||||
expect(stateReducer(state, action).results).toEqual([
|
expect(stateReducer(state, action).results).toEqual([
|
||||||
|
@ -121,7 +121,7 @@ describe('Results', () => {
|
||||||
]);
|
]);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('results returned through fifth sound change rule', () => {
|
it('results returned through fifth sound change rule (lexeme final environment)', () => {
|
||||||
const action = {type: 'RUN'};
|
const action = {type: 'RUN'};
|
||||||
state = initState(5)
|
state = initState(5)
|
||||||
expect(stateReducer(state, action).results).toEqual([
|
expect(stateReducer(state, action).results).toEqual([
|
||||||
|
@ -135,18 +135,18 @@ describe('Results', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
// it('results returned through sixth sound change rule', () => {
|
it('results returned through sixth sound change rule (multi-phoneme target)', () => {
|
||||||
// const action = {type: 'RUN'};
|
const action = {type: 'RUN'};
|
||||||
// state = initState(5)
|
state = initState(5)
|
||||||
// expect(stateReducer(state, action).results).toEqual([
|
expect(stateReducer(state, action).results).toEqual([
|
||||||
// {
|
{
|
||||||
// pass: 'epoch 1',
|
pass: 'epoch 1',
|
||||||
// lexicon: [
|
lexicon: [
|
||||||
// 'anunu', 'anat', 'ant', 'anunu', 'tʰan', 'nunu'
|
'annu', 'anta', 'ant', 'annu', 'tʰan', 'nnu'
|
||||||
// ]
|
]
|
||||||
// }
|
}
|
||||||
// ]);
|
]);
|
||||||
// });
|
});
|
||||||
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue