diff --git a/Examples/transformers-cli/Sources/transformers-cli/Transformers.swift b/Examples/transformers-cli/Sources/transformers-cli/Transformers.swift index cb24e3ca..0ff53b5d 100644 --- a/Examples/transformers-cli/Sources/transformers-cli/Transformers.swift +++ b/Examples/transformers-cli/Sources/transformers-cli/Transformers.swift @@ -125,7 +125,7 @@ enum ComputeUnits: String, ExpressibleByArgument, CaseIterable { /// Returns a cleaned and formatted version of the response. /// -/// - Parameter respone: The response to clean and format. +/// - Parameter response: The response to clean and format. /// - Returns: A 'user friendly' representation of the generated response. private func formatResponse(_ response: String) -> String { response diff --git a/Sources/Models/LanguageModel.swift b/Sources/Models/LanguageModel.swift index 59203c6a..044fd198 100644 --- a/Sources/Models/LanguageModel.swift +++ b/Sources/Models/LanguageModel.swift @@ -236,8 +236,7 @@ public extension LanguageModel { /// Determines the type of KV Cache available for the model, if any. /// - /// - Parameters: - /// - model: The Core ML model + /// - Parameter model: The Core ML model /// - Returns: The type of KV Cache available. fileprivate static func kvCacheAvailability(for model: MLModel) -> KVCacheAvailability? { func isStatefulKVCacheAvailable(for model: MLModel) -> Bool { diff --git a/Sources/Models/LanguageModelTypes.swift b/Sources/Models/LanguageModelTypes.swift index 0e7e1909..328772d8 100644 --- a/Sources/Models/LanguageModelTypes.swift +++ b/Sources/Models/LanguageModelTypes.swift @@ -57,9 +57,9 @@ public extension LanguageModelProtocol { /// This provides a more convenient syntax for calling `predictNextTokenScores`. /// /// - Parameters: - /// - tokens: The input token sequence - /// - config: The generation configuration containing model parameters - /// - Returns: A shaped array containing the logits for the next token prediction + /// - input: The input sequence tensor. + /// - config: The generation configuration containing model parameters. + /// - Returns: MLTensor with the raw scores of the next token. func callAsFunction(_ input: MLTensor, config: GenerationConfig) async -> MLTensor { await predictNextTokenScores(input, config: config) }