Skip to content

Commit

Permalink
refactor(assembler/core): minor simplify
Browse files Browse the repository at this point in the history
  • Loading branch information
exuanbo committed Dec 11, 2023
1 parent 2bde142 commit ee38719
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 33 deletions.
2 changes: 1 addition & 1 deletion src/features/assembler/core/exceptions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ export class AddressError extends ParseError {
}

export class OperandTypeError extends ParseError {
constructor({ raw, range }: Token, ...expectedTypes: OperandType[]) {
constructor({ raw, range }: Token, expectedTypes: OperandType[]) {
const types = expectedTypes
.map((type) => type.replace(/[A-Z]/g, (char) => ` ${char.toLowerCase()}`).trimStart())
.reduce((acc, cur, idx) => {
Expand Down
44 changes: 22 additions & 22 deletions src/features/assembler/core/parser.ts
Original file line number Diff line number Diff line change
Expand Up @@ -222,13 +222,13 @@ const parseSingleOperand =
}
break
}
throw new OperandTypeError(token, ...expectedTypes)
throw new OperandTypeError(token, expectedTypes)
}

const parseDoubleOperands =
(tokenizer: Tokenizer) =>
<T1 extends OperandType, T2 extends OperandType>(
...expectedTypePairs: Array<[firstOperandType: T1, secondOperandType: T2]>
expectedTypePairs: Array<[firstOperandType: T1, secondOperandType: T2]>,
): [firstOperand: Operand<T1>, secondOperand: Operand<T2>] => {
const possibleFirstOperandTypes: T1[] = []
expectedTypePairs.forEach(([firstOperandType]) => {
Expand Down Expand Up @@ -382,10 +382,10 @@ const parseStatement = (tokenizer: Tokenizer): Statement => {

switch (mnemonic as MnemonicWithTwoOperands) {
case Mnemonic.ADD:
;[firstOperand, secondOperand] = parseOperands(
;[firstOperand, secondOperand] = parseOperands([
[OperandType.Register, OperandType.Register],
[OperandType.Register, OperandType.Number],
)
])
switch (secondOperand.type) {
case OperandType.Register:
opcode = Opcode.ADD_REG_TO_REG
Expand All @@ -396,10 +396,10 @@ const parseStatement = (tokenizer: Tokenizer): Statement => {
}
break
case Mnemonic.SUB:
;[firstOperand, secondOperand] = parseOperands(
;[firstOperand, secondOperand] = parseOperands([
[OperandType.Register, OperandType.Register],
[OperandType.Register, OperandType.Number],
)
])
switch (secondOperand.type) {
case OperandType.Register:
opcode = Opcode.SUB_REG_FROM_REG
Expand All @@ -410,10 +410,10 @@ const parseStatement = (tokenizer: Tokenizer): Statement => {
}
break
case Mnemonic.MUL:
;[firstOperand, secondOperand] = parseOperands(
;[firstOperand, secondOperand] = parseOperands([
[OperandType.Register, OperandType.Register],
[OperandType.Register, OperandType.Number],
)
])
switch (secondOperand.type) {
case OperandType.Register:
opcode = Opcode.MUL_REG_BY_REG
Expand All @@ -424,10 +424,10 @@ const parseStatement = (tokenizer: Tokenizer): Statement => {
}
break
case Mnemonic.DIV:
;[firstOperand, secondOperand] = parseOperands(
;[firstOperand, secondOperand] = parseOperands([
[OperandType.Register, OperandType.Register],
[OperandType.Register, OperandType.Number],
)
])
switch (secondOperand.type) {
case OperandType.Register:
opcode = Opcode.DIV_REG_BY_REG
Expand All @@ -438,10 +438,10 @@ const parseStatement = (tokenizer: Tokenizer): Statement => {
}
break
case Mnemonic.MOD:
;[firstOperand, secondOperand] = parseOperands(
;[firstOperand, secondOperand] = parseOperands([
[OperandType.Register, OperandType.Register],
[OperandType.Register, OperandType.Number],
)
])
switch (secondOperand.type) {
case OperandType.Register:
opcode = Opcode.MOD_REG_BY_REG
Expand All @@ -452,10 +452,10 @@ const parseStatement = (tokenizer: Tokenizer): Statement => {
}
break
case Mnemonic.AND:
;[firstOperand, secondOperand] = parseOperands(
;[firstOperand, secondOperand] = parseOperands([
[OperandType.Register, OperandType.Register],
[OperandType.Register, OperandType.Number],
)
])
switch (secondOperand.type) {
case OperandType.Register:
opcode = Opcode.AND_REG_WITH_REG
Expand All @@ -466,10 +466,10 @@ const parseStatement = (tokenizer: Tokenizer): Statement => {
}
break
case Mnemonic.OR:
;[firstOperand, secondOperand] = parseOperands(
;[firstOperand, secondOperand] = parseOperands([
[OperandType.Register, OperandType.Register],
[OperandType.Register, OperandType.Number],
)
])
switch (secondOperand.type) {
case OperandType.Register:
opcode = Opcode.OR_REG_WITH_REG
Expand All @@ -480,10 +480,10 @@ const parseStatement = (tokenizer: Tokenizer): Statement => {
}
break
case Mnemonic.XOR:
;[firstOperand, secondOperand] = parseOperands(
;[firstOperand, secondOperand] = parseOperands([
[OperandType.Register, OperandType.Register],
[OperandType.Register, OperandType.Number],
)
])
switch (secondOperand.type) {
case OperandType.Register:
opcode = Opcode.XOR_REG_WITH_REG
Expand All @@ -494,13 +494,13 @@ const parseStatement = (tokenizer: Tokenizer): Statement => {
}
break
case Mnemonic.MOV:
;[firstOperand, secondOperand] = parseOperands(
;[firstOperand, secondOperand] = parseOperands([
[OperandType.Register, OperandType.Number],
[OperandType.Register, OperandType.Address],
[OperandType.Address, OperandType.Register],
[OperandType.Register, OperandType.RegisterAddress],
[OperandType.RegisterAddress, OperandType.Register],
)
])
switch (firstOperand.type) {
case OperandType.Register:
switch (secondOperand.type) {
Expand All @@ -526,11 +526,11 @@ const parseStatement = (tokenizer: Tokenizer): Statement => {
}
break
case Mnemonic.CMP:
;[firstOperand, secondOperand] = parseOperands(
;[firstOperand, secondOperand] = parseOperands([
[OperandType.Register, OperandType.Register],
[OperandType.Register, OperandType.Number],
[OperandType.Register, OperandType.Address],
)
])
switch (secondOperand.type) {
case OperandType.Register:
opcode = Opcode.CMP_REG_WITH_REG
Expand Down
20 changes: 10 additions & 10 deletions src/features/assembler/core/tokenizer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ const tokenRules: readonly TokenRule[] = [
]

const tokenRegExpSource = tokenRules.map(({ pattern }) => `(${pattern.source})`).join('|')
const createTokenRegExp = (): RegExp => new RegExp(tokenRegExpSource, 'y')
const createTokenRegExp = () => new RegExp(tokenRegExpSource, 'y')

export interface Token {
type: TokenType
Expand Down Expand Up @@ -73,6 +73,9 @@ const createToken = (type: TokenType, value: string, index: number): Token => {
}
}

const shouldSkip = (token: Token) =>
token.type === TokenType.Whitespace || token.type === TokenType.Comment

interface TokenStream {
next: () => Token | null
}
Expand All @@ -83,12 +86,12 @@ const createTokenStream = (source: string): TokenStream => {
next: () => {
const startIndex = regexp.lastIndex
const match = regexp.exec(source)
if (match !== null && match.index === startIndex) {
if (match) {
for (let ruleIndex = 0; ; ruleIndex++) {
const value = match[ruleIndex + 1]
if (value !== undefined) {
if (value) {
const { type } = tokenRules[ruleIndex]
return createToken(type, value, match.index)
return createToken(type, value, startIndex)
}
}
}
Expand Down Expand Up @@ -133,26 +136,23 @@ export const createTokenizer = (source: string): Tokenizer => {
let token: Token | null = null
do {
token = stream.next()
} while (
token !== null &&
(token.type === TokenType.Whitespace || token.type === TokenType.Comment)
)
} while (token && shouldSkip(token))
return token
}

let current: Token | null = streamNext()
let next: Token | null = streamNext()

const assertCurrent = (): Token => {
if (current === null) {
if (!current) {
throw new EndOfTokenStreamError()
}
return current
}

const tokenizer: Tokenizer = {
hasMore: () => {
return current !== null
return !!current
},
peek: () => {
return current
Expand Down

0 comments on commit ee38719

Please sign in to comment.