diff --git a/Package.swift b/Package.swift index f925f7ac..e82fb606 100644 --- a/Package.swift +++ b/Package.swift @@ -9,6 +9,8 @@ let package = Package( platforms: [ .iOS(.v16), .macOS(.v13), + .watchOS(.v10), + .visionOS(.v1) ], products: [ .library( diff --git a/Sources/WhisperKit/Core/Audio/AudioChunker.swift b/Sources/WhisperKit/Core/Audio/AudioChunker.swift index 9092aff5..79e286ff 100644 --- a/Sources/WhisperKit/Core/Audio/AudioChunker.swift +++ b/Sources/WhisperKit/Core/Audio/AudioChunker.swift @@ -6,12 +6,10 @@ import AVFoundation import Foundation /// Responsible for chunking audio into smaller pieces -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public protocol AudioChunking { func chunkAll(audioArray: [Float], maxChunkLength: Int, decodeOptions: DecodingOptions?) async throws -> [AudioChunk] } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public extension AudioChunking { func updateSeekOffsetsForResults( chunkedResults: [Result<[TranscriptionResult], Swift.Error>], @@ -42,7 +40,6 @@ public extension AudioChunking { } /// A audio chunker that splits audio into smaller pieces based on voice activity detection -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class VADAudioChunker: AudioChunking { /// prevent hallucinations at the end of the clip by stopping up to 1.0s early private let windowPadding: Int diff --git a/Sources/WhisperKit/Core/Audio/AudioProcessor.swift b/Sources/WhisperKit/Core/Audio/AudioProcessor.swift index 4645d2d8..c5bd97c6 100644 --- a/Sources/WhisperKit/Core/Audio/AudioProcessor.swift +++ b/Sources/WhisperKit/Core/Audio/AudioProcessor.swift @@ -123,7 +123,6 @@ public extension AudioProcessing { /// Loads and converts audio data from a specified file paths. /// - Parameter audioPaths: The file paths of the audio files. /// - Returns: `AVAudioPCMBuffer` containing the audio data. - @available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) static func loadAudioAsync(fromPath audioFilePath: String) async throws -> AVAudioPCMBuffer { return try await Task { try AudioProcessor.loadAudio(fromPath: audioFilePath) @@ -191,7 +190,6 @@ public extension AudioProcessing { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class AudioProcessor: NSObject, AudioProcessing { private var lastInputDevice: DeviceID? public var audioEngine: AVAudioEngine? @@ -886,7 +884,6 @@ open class AudioProcessor: NSObject, AudioProcessing { // MARK: - Streaming -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public extension AudioProcessor { /// We have a new buffer, process and store it. /// NOTE: Assumes audio is 16khz mono diff --git a/Sources/WhisperKit/Core/Audio/AudioStreamTranscriber.swift b/Sources/WhisperKit/Core/Audio/AudioStreamTranscriber.swift index 6a197590..c3bf7709 100644 --- a/Sources/WhisperKit/Core/Audio/AudioStreamTranscriber.swift +++ b/Sources/WhisperKit/Core/Audio/AudioStreamTranscriber.swift @@ -3,7 +3,6 @@ import Foundation -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public extension AudioStreamTranscriber { struct State { public var isRecording: Bool = false @@ -18,11 +17,9 @@ public extension AudioStreamTranscriber { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public typealias AudioStreamTranscriberCallback = (AudioStreamTranscriber.State, AudioStreamTranscriber.State) -> Void /// Responsible for streaming audio from the microphone, processing it, and transcribing it in real-time. -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public actor AudioStreamTranscriber { private var state: AudioStreamTranscriber.State = .init() { didSet { diff --git a/Sources/WhisperKit/Core/Audio/EnergyVAD.swift b/Sources/WhisperKit/Core/Audio/EnergyVAD.swift index b9b9251e..0228d9a0 100644 --- a/Sources/WhisperKit/Core/Audio/EnergyVAD.swift +++ b/Sources/WhisperKit/Core/Audio/EnergyVAD.swift @@ -4,7 +4,6 @@ import Foundation /// Voice activity detection based on energy threshold -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public final class EnergyVAD: VoiceActivityDetector { public let energyThreshold: Float diff --git a/Sources/WhisperKit/Core/Audio/VoiceActivityDetector.swift b/Sources/WhisperKit/Core/Audio/VoiceActivityDetector.swift index 4f3c62a6..bb00e722 100644 --- a/Sources/WhisperKit/Core/Audio/VoiceActivityDetector.swift +++ b/Sources/WhisperKit/Core/Audio/VoiceActivityDetector.swift @@ -5,7 +5,6 @@ import Foundation /// A base class for Voice Activity Detection (VAD), used to identify and separate segments of audio that contain human speech from those that do not. /// Subclasses must implement the `voiceActivity(in:)` method to provide specific voice activity detection functionality. -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class VoiceActivityDetector { /// The sample rate of the audio signal, in samples per second. public let sampleRate: Int diff --git a/Sources/WhisperKit/Core/AudioEncoder.swift b/Sources/WhisperKit/Core/AudioEncoder.swift index 9205a5c7..64b083d2 100644 --- a/Sources/WhisperKit/Core/AudioEncoder.swift +++ b/Sources/WhisperKit/Core/AudioEncoder.swift @@ -7,7 +7,6 @@ public protocol AudioEncoderOutputType {} extension MLMultiArray: AudioEncoderOutputType {} /// AudioEncoding protocol defines the requirements for an audio encoding implementation. -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public protocol AudioEncoding { /// The size of the embedding produced by the encoder. var embedSize: Int? { get } @@ -19,7 +18,6 @@ public protocol AudioEncoding { } /// Backwards-compatible AudioEncoder implementation -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public class AudioEncoder: AudioEncoding, WhisperMLModel { public var model: MLModel? diff --git a/Sources/WhisperKit/Core/Configurations.swift b/Sources/WhisperKit/Core/Configurations.swift index eb0fbd76..7271ae84 100644 --- a/Sources/WhisperKit/Core/Configurations.swift +++ b/Sources/WhisperKit/Core/Configurations.swift @@ -4,7 +4,6 @@ import Foundation /// Configuration to initialize WhisperKit -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class WhisperKitConfig { /// Name for whisper model to use public var model: String? @@ -126,7 +125,6 @@ open class WhisperKitConfig { /// - firstTokenLogProbThreshold: If the log probability over the first sampled token is below this value, treat as failed. /// - noSpeechThreshold: If the no speech probability is higher than this value AND the average log /// probability over sampled tokens is below `logProbThreshold`, consider the segment as silent. -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public struct DecodingOptions: Codable, Sendable { public var verbose: Bool public var task: DecodingTask diff --git a/Sources/WhisperKit/Core/FeatureExtractor.swift b/Sources/WhisperKit/Core/FeatureExtractor.swift index 66160271..c589f413 100644 --- a/Sources/WhisperKit/Core/FeatureExtractor.swift +++ b/Sources/WhisperKit/Core/FeatureExtractor.swift @@ -16,7 +16,6 @@ public protocol FeatureExtracting { func logMelSpectrogram(fromAudio inputAudio: any AudioProcessorOutputType) async throws -> (any FeatureExtractorOutputType)? } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class FeatureExtractor: FeatureExtracting, WhisperMLModel { public var model: MLModel? diff --git a/Sources/WhisperKit/Core/Models.swift b/Sources/WhisperKit/Core/Models.swift index 15e7914f..496c1f09 100644 --- a/Sources/WhisperKit/Core/Models.swift +++ b/Sources/WhisperKit/Core/Models.swift @@ -134,7 +134,6 @@ public enum ModelState: CustomStringConvertible { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public struct ModelComputeOptions: Sendable { public var melCompute: MLComputeUnits public var audioEncoderCompute: MLComputeUnits @@ -252,7 +251,6 @@ public struct ModelSupportConfig: Codable, Sendable { computeDisabledModels() } - @available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public func modelSupport(for deviceIdentifier: String = WhisperKit.deviceName()) -> ModelSupport { // Find the support with the longest matching identifier prefix // i.e. `iPad13,16` should match exact `iPad13,16` instead of first prefix like `iPad13,1` @@ -409,7 +407,6 @@ public enum ChunkingStrategy: String, Codable, CaseIterable { case vad } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public struct DecodingFallback: Sendable { public var needsFallback: Bool public var fallbackReason: String @@ -420,7 +417,6 @@ public struct DecodingFallback: Sendable { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public extension DecodingFallback { init?( options: DecodingOptions, @@ -447,7 +443,6 @@ public extension DecodingFallback { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public struct DecodingResult { public var language: String public var languageProbs: [String: Float] @@ -900,7 +895,6 @@ public class MelSpectrogramInput: MLFeatureProvider { } /// Model Prediction Output Type -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public class MelSpectrogramOutput: MLFeatureProvider { /// Source provided by CoreML private let provider: MLFeatureProvider @@ -937,7 +931,6 @@ public class MelSpectrogramOutput: MLFeatureProvider { // MARK: AudioEncoder /// Model Prediction Input Type -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public class AudioEncoderInput: MLFeatureProvider { /// melspectrogram_features as 1 × {80,128} × 1 × 3000 4-dimensional array of floats public var melspectrogram_features: MLMultiArray @@ -963,7 +956,6 @@ public class AudioEncoderInput: MLFeatureProvider { } /// Model Prediction Output Type -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public class AudioEncoderOutput: MLFeatureProvider { /// Source provided by CoreML private let provider: MLFeatureProvider @@ -1000,7 +992,6 @@ public class AudioEncoderOutput: MLFeatureProvider { // MARK: TextDecoder /// Model Prediction Input Type -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public class TextDecoderInput: MLFeatureProvider { /// input_ids as 1 element vector of 32-bit integers public var input_ids: MLMultiArray @@ -1068,7 +1059,6 @@ public class TextDecoderInput: MLFeatureProvider { } /// Model Prediction Output Type -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public class TextDecoderOutput: MLFeatureProvider { /// Source provided by CoreML private let provider: MLFeatureProvider @@ -1175,7 +1165,6 @@ public class TextDecoderCachePrefillInput: MLFeatureProvider { } /// Model Prediction Output Type -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public class TextDecoderCachePrefillOutput: MLFeatureProvider { /// Source provided by CoreML private let provider: MLFeatureProvider diff --git a/Sources/WhisperKit/Core/Text/LogitsFilter.swift b/Sources/WhisperKit/Core/Text/LogitsFilter.swift index 174bb50f..b3f18f19 100644 --- a/Sources/WhisperKit/Core/Text/LogitsFilter.swift +++ b/Sources/WhisperKit/Core/Text/LogitsFilter.swift @@ -10,7 +10,6 @@ public protocol LogitsFiltering { func filterLogits(_ logits: MLMultiArray, withTokens tokens: [Int]) -> MLMultiArray } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class SuppressTokensFilter: LogitsFiltering { let suppressTokens: [Int] private let suppressTokenIndexes: [[NSNumber]] @@ -26,7 +25,6 @@ open class SuppressTokensFilter: LogitsFiltering { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class SuppressBlankFilter: LogitsFiltering { let specialTokens: SpecialTokens let sampleBegin: Int @@ -54,7 +52,6 @@ open class SuppressBlankFilter: LogitsFiltering { } /// Implementation based on https://github.com/openai/whisper/blob/master/whisper/decoding.py#L441 -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class TimestampRulesFilter: LogitsFiltering { let specialTokens: SpecialTokens let sampleBegin: Int @@ -246,7 +243,6 @@ open class TimestampRulesFilter: LogitsFiltering { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class LanguageLogitsFilter: LogitsFiltering { let allLanguageTokens: Set let logitsDim: Int diff --git a/Sources/WhisperKit/Core/Text/SegmentSeeker.swift b/Sources/WhisperKit/Core/Text/SegmentSeeker.swift index 459d85eb..fbc4379f 100644 --- a/Sources/WhisperKit/Core/Text/SegmentSeeker.swift +++ b/Sources/WhisperKit/Core/Text/SegmentSeeker.swift @@ -6,7 +6,6 @@ import CoreML import Foundation import Tokenizers -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public protocol SegmentSeeking { func findSeekPointAndSegments( decodingResult: DecodingResult, @@ -34,7 +33,6 @@ public protocol SegmentSeeking { ) throws -> [TranscriptionSegment]? } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class SegmentSeeker: SegmentSeeking { public init() {} diff --git a/Sources/WhisperKit/Core/Text/TokenSampler.swift b/Sources/WhisperKit/Core/Text/TokenSampler.swift index aaa7010c..8d710518 100644 --- a/Sources/WhisperKit/Core/Text/TokenSampler.swift +++ b/Sources/WhisperKit/Core/Text/TokenSampler.swift @@ -26,7 +26,6 @@ public struct SamplingResult: Sendable { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class GreedyTokenSampler: TokenSampling { public var temperature: FloatType public var eotToken: Int diff --git a/Sources/WhisperKit/Core/TextDecoder.swift b/Sources/WhisperKit/Core/TextDecoder.swift index c110f550..0b826a11 100644 --- a/Sources/WhisperKit/Core/TextDecoder.swift +++ b/Sources/WhisperKit/Core/TextDecoder.swift @@ -57,7 +57,6 @@ public protocol DecodingInputsType { func reset(prefilledCacheSize: Int, maxTokenContext: Int) } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public protocol TextDecoding { var tokenizer: WhisperTokenizer? { get set } var prefillData: WhisperMLModel? { get set } @@ -131,7 +130,6 @@ public protocol TextDecoding { ) } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public extension TextDecoding { @available(*, deprecated, message: "Subject to removal in a future version. Use `decodeText(from:using:sampler:options:callback:) async throws -> DecodingResult` instead.") func decodeText( @@ -484,7 +482,6 @@ public class TextDecoderContextPrefill: WhisperMLModel { public var model: MLModel? } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class TextDecoder: TextDecoding, WhisperMLModel { public var model: MLModel? public var tokenizer: WhisperTokenizer? diff --git a/Sources/WhisperKit/Core/TranscribeTask.swift b/Sources/WhisperKit/Core/TranscribeTask.swift index c9751481..1f35f7a4 100644 --- a/Sources/WhisperKit/Core/TranscribeTask.swift +++ b/Sources/WhisperKit/Core/TranscribeTask.swift @@ -5,7 +5,6 @@ import CoreML import Foundation /// Responsible for transcribing audio chunk to text using the provided models and configurations. -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) final class TranscribeTask { private var timings: TranscriptionTimings private let progress: Progress diff --git a/Sources/WhisperKit/Core/WhisperKit.swift b/Sources/WhisperKit/Core/WhisperKit.swift index 5a5fd38b..b9f51c28 100644 --- a/Sources/WhisperKit/Core/WhisperKit.swift +++ b/Sources/WhisperKit/Core/WhisperKit.swift @@ -9,7 +9,6 @@ import Hub import TensorUtils import Tokenizers -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) open class WhisperKit { /// Models public private(set) var modelVariant: ModelVariant = .tiny diff --git a/Sources/WhisperKit/Utilities/Concurrency.swift b/Sources/WhisperKit/Utilities/Concurrency.swift index 58c94d60..25f41c07 100644 --- a/Sources/WhisperKit/Utilities/Concurrency.swift +++ b/Sources/WhisperKit/Utilities/Concurrency.swift @@ -4,7 +4,6 @@ import Foundation /// An actor that provides thread-safe early stopping functionality using UUIDs as keys -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public actor EarlyStopActor { private var shouldStop = [UUID: Bool]() diff --git a/Sources/WhisperKit/Utilities/Extensions+Internal.swift b/Sources/WhisperKit/Utilities/Extensions+Internal.swift index e2cd7045..1af90efa 100644 --- a/Sources/WhisperKit/Utilities/Extensions+Internal.swift +++ b/Sources/WhisperKit/Utilities/Extensions+Internal.swift @@ -131,7 +131,6 @@ extension AudioProcessing { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) extension DecodingOptions { func prepareSeekClips(contentFrames: Int) -> [(start: Int, end: Int)] { var seekPoints: [Int] = clipTimestamps.map { Int(round($0 * Float(WhisperKit.sampleRate))) } diff --git a/Sources/WhisperKit/Utilities/Extensions+Public.swift b/Sources/WhisperKit/Utilities/Extensions+Public.swift index ac621f60..40839891 100644 --- a/Sources/WhisperKit/Utilities/Extensions+Public.swift +++ b/Sources/WhisperKit/Utilities/Extensions+Public.swift @@ -10,7 +10,6 @@ public extension Array where Element == TranscriptionSegment { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public extension WhisperKit { static var isRunningOnSimulator: Bool { #if targetEnvironment(simulator) @@ -56,7 +55,6 @@ public extension String { // MARK: CoreML public extension MLMultiArray { - @available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) convenience init(shape: [NSNumber], dataType: MLMultiArrayDataType, initialValue: Any) throws { switch dataType { case .float16: @@ -300,7 +298,6 @@ public func resolveAbsolutePath(_ inputPath: String) -> String { @available(*, deprecated, message: "Subject to removal in a future version. Use `ModelUtilities.formatModelFiles(_:)` instead.") -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public extension WhisperKit { static func formatModelFiles(_ modelFiles: [String]) -> [String] { return ModelUtilities.formatModelFiles(modelFiles) diff --git a/Sources/WhisperKit/Utilities/ModelUtilities.swift b/Sources/WhisperKit/Utilities/ModelUtilities.swift index 9292eb6b..5af4d950 100644 --- a/Sources/WhisperKit/Utilities/ModelUtilities.swift +++ b/Sources/WhisperKit/Utilities/ModelUtilities.swift @@ -11,7 +11,6 @@ public struct ModelUtilities { // MARK: Public - @available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public static func modelSupport(for deviceName: String, from config: ModelSupportConfig? = nil) -> ModelSupport { let config = config ?? Constants.fallbackModelSupportConfig let modelSupport = config.modelSupport(for: deviceName) @@ -274,14 +273,12 @@ public func loadTokenizer( } @available(*, deprecated, message: "Subject to removal in a future version. Use ModelUtilities.modelSupport(for:from:) -> ModelSupport instead.") -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public func modelSupport(for deviceName: String, from config: ModelSupportConfig? = nil) -> ModelSupport { return ModelUtilities.modelSupport(for: deviceName, from: config) } @available(*, deprecated, message: "Subject to removal in a future version. Use ModelUtilities.modelSupport(for:from:) -> ModelSupport instead.") @_disfavoredOverload -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public func modelSupport(for deviceName: String, from config: ModelSupportConfig? = nil) -> (default: String, disabled: [String]) { let modelSupport = ModelUtilities.modelSupport(for: deviceName, from: config) return (modelSupport.default, modelSupport.disabled) diff --git a/Sources/WhisperKit/Utilities/TranscriptionUtilities.swift b/Sources/WhisperKit/Utilities/TranscriptionUtilities.swift index 9d53ff54..1c1dcf61 100644 --- a/Sources/WhisperKit/Utilities/TranscriptionUtilities.swift +++ b/Sources/WhisperKit/Utilities/TranscriptionUtilities.swift @@ -52,7 +52,6 @@ public struct TranscriptionUtilities { /// - segment: The transcription segment to update /// - seekTime: The time offset to add to all timings /// - Returns: Updated transcription segment with adjusted timings - @available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public static func updateSegmentTimings(segment: TranscriptionSegment, seekTime: Float) -> TranscriptionSegment { var updatedSegment = segment let seekOffsetIndex = Int(seekTime * Float(WhisperKit.sampleRate)) @@ -74,7 +73,6 @@ public struct TranscriptionUtilities { /// - results: Array of transcription results to merge /// - confirmedWords: Optional array of confirmed word timings to use instead of merging text /// - Returns: A single merged transcription result - @available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public static func mergeTranscriptionResults(_ results: [TranscriptionResult?], confirmedWords: [WordTiming]? = nil) -> TranscriptionResult { var mergedText = "" if let words = confirmedWords { @@ -176,13 +174,11 @@ public func findLongestDifferentSuffix(_ words1: [WordTiming], _ words2: [WordTi } @available(*, deprecated, message: "Subject to removal in a future version. Use `TranscriptionUtilities.mergeTranscriptionResults(_:confirmedWords:)` instead.") -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public func mergeTranscriptionResults(_ results: [TranscriptionResult?], confirmedWords: [WordTiming]? = nil) -> TranscriptionResult { return TranscriptionUtilities.mergeTranscriptionResults(results, confirmedWords: confirmedWords) } @available(*, deprecated, message: "Subject to removal in a future version. Use `TranscriptionUtilities.updateSegmentTimings(segment:seekTime:)` instead.") -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) public func updateSegmentTimings(segment: TranscriptionSegment, seekTime: Float) -> TranscriptionSegment { return TranscriptionUtilities.updateSegmentTimings(segment: segment, seekTime: seekTime) } diff --git a/Sources/WhisperKitCLI/Server/OpenAIHandler.swift b/Sources/WhisperKitCLI/Server/OpenAIHandler.swift index f9825b26..aafe7913 100644 --- a/Sources/WhisperKitCLI/Server/OpenAIHandler.swift +++ b/Sources/WhisperKitCLI/Server/OpenAIHandler.swift @@ -7,7 +7,6 @@ import OpenAPIRuntime import OpenAPIVapor @preconcurrency import WhisperKit -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) struct OpenAIHandler: APIProtocol { let whisperKit: WhisperKit private let logger: Logger diff --git a/Sources/WhisperKitCLI/Server/ServeCLI.swift b/Sources/WhisperKitCLI/Server/ServeCLI.swift index 6aac9c7c..26ceee1d 100644 --- a/Sources/WhisperKitCLI/Server/ServeCLI.swift +++ b/Sources/WhisperKitCLI/Server/ServeCLI.swift @@ -10,7 +10,6 @@ import OpenAPIRuntime import OpenAPIVapor import AVFoundation -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) struct ServeCLI: AsyncParsableCommand { static let configuration = CommandConfiguration( commandName: "serve", diff --git a/Sources/WhisperKitCLI/TranscribeCLI.swift b/Sources/WhisperKitCLI/TranscribeCLI.swift index 62b15404..0ce09a4f 100644 --- a/Sources/WhisperKitCLI/TranscribeCLI.swift +++ b/Sources/WhisperKitCLI/TranscribeCLI.swift @@ -6,7 +6,6 @@ import CoreML import Foundation import WhisperKit -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) struct TranscribeCLI: AsyncParsableCommand { static let configuration = CommandConfiguration( commandName: "transcribe", diff --git a/Sources/WhisperKitCLI/TranscribeCLIUtils.swift b/Sources/WhisperKitCLI/TranscribeCLIUtils.swift index 4e88787d..98dbfe41 100644 --- a/Sources/WhisperKitCLI/TranscribeCLIUtils.swift +++ b/Sources/WhisperKitCLI/TranscribeCLIUtils.swift @@ -5,7 +5,6 @@ import Foundation import CoreML @preconcurrency import WhisperKit -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) internal class TranscribeCLIUtils { /// Creates WhisperKit configuration from CLI arguments diff --git a/Sources/WhisperKitCLI/WhisperKitCLI.swift b/Sources/WhisperKitCLI/WhisperKitCLI.swift index 2d29fb8d..3ceec64b 100644 --- a/Sources/WhisperKitCLI/WhisperKitCLI.swift +++ b/Sources/WhisperKitCLI/WhisperKitCLI.swift @@ -6,7 +6,6 @@ import Foundation let VERSION: String = "development" -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) var subcommands: [ParsableCommand.Type] { #if BUILD_SERVER_CLI [TranscribeCLI.self, ServeCLI.self] @@ -15,7 +14,6 @@ var subcommands: [ParsableCommand.Type] { #endif } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) @main struct WhisperKitCLI: AsyncParsableCommand { static let configuration = CommandConfiguration( diff --git a/Tests/WhisperKitTests/FunctionalTests.swift b/Tests/WhisperKitTests/FunctionalTests.swift index 28915ea5..c6987633 100644 --- a/Tests/WhisperKitTests/FunctionalTests.swift +++ b/Tests/WhisperKitTests/FunctionalTests.swift @@ -5,7 +5,6 @@ import CoreML import WhisperKit import XCTest -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) final class FunctionalTests: XCTestCase { func testInitLarge() async throws { try await XCTAssertNoThrowAsync( diff --git a/Tests/WhisperKitTests/RegressionTestUtils.swift b/Tests/WhisperKitTests/RegressionTestUtils.swift index f3aac3e2..f31dfa9c 100644 --- a/Tests/WhisperKitTests/RegressionTestUtils.swift +++ b/Tests/WhisperKitTests/RegressionTestUtils.swift @@ -20,7 +20,6 @@ import WatchKit // MARK: RegressionStats -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) class RegressionStats: JSONCodable { let testInfo: TestInfo let memoryStats: MemoryStats @@ -159,7 +158,6 @@ class Stats: JSONCodable { // MARK: StaticAttributes -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) class StaticAttributes: Codable { let osVersion: String let isLowPowerMode: String @@ -265,7 +263,6 @@ extension Data { // MARK: - SystemMemoryChecker -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) class AppMemoryChecker: NSObject { static func getMemoryUsed() -> UInt64 { // The `TASK_VM_INFO_COUNT` and `TASK_VM_INFO_REV1_COUNT` macros are too @@ -292,7 +289,6 @@ class AppMemoryChecker: NSObject { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) class SystemMemoryCheckerAdvanced: NSObject { static func getMemoryUsage() -> SystemMemoryUsage { // Get total and available memory using host_statistics64 @@ -487,7 +483,6 @@ private extension MLComputeUnits { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) actor TranscriptionTestState { private var aggregatedCount: Double = 0 private var cumulativeTokenCount: Double = 0 diff --git a/Tests/WhisperKitTests/RegressionTests.swift b/Tests/WhisperKitTests/RegressionTests.swift index 119d0d3a..6a7fa88e 100644 --- a/Tests/WhisperKitTests/RegressionTests.swift +++ b/Tests/WhisperKitTests/RegressionTests.swift @@ -12,7 +12,6 @@ import XCTest import WatchKit #endif -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) class RegressionTests: XCTestCase { var audioFileURLs: [URL]? var remoteFileURLs: [URL]? diff --git a/Tests/WhisperKitTests/TestUtils.swift b/Tests/WhisperKitTests/TestUtils.swift index ade3c33b..a207d5d7 100644 --- a/Tests/WhisperKitTests/TestUtils.swift +++ b/Tests/WhisperKitTests/TestUtils.swift @@ -77,7 +77,6 @@ func XCTAssertNoThrowAsync( // MARK: Helpers -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) extension Bundle { static func current(for classObject: AnyObject? = nil) -> Bundle { #if SWIFT_PACKAGE @@ -93,7 +92,6 @@ extension Bundle { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) extension FileManager { func allocatedSizeOfDirectory(at url: URL) throws -> Int64 { guard let enumerator = enumerator(at: url, includingPropertiesForKeys: [.totalFileAllocatedSizeKey, .fileAllocatedSizeKey]) else { @@ -109,7 +107,6 @@ extension FileManager { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) extension MLMultiArray { /// Create `MLMultiArray` of shape [1, 1, arr.count] and fill up the last /// dimension with with values from arr. @@ -137,7 +134,6 @@ extension MLMultiArray { } } -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) extension XCTestCase { func transcribe( with variant: ModelVariant, diff --git a/Tests/WhisperKitTests/UnitTests.swift b/Tests/WhisperKitTests/UnitTests.swift index b9480bb6..34437ae9 100644 --- a/Tests/WhisperKitTests/UnitTests.swift +++ b/Tests/WhisperKitTests/UnitTests.swift @@ -10,7 +10,6 @@ import Tokenizers @testable import WhisperKit import XCTest -@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *) final class UnitTests: XCTestCase { override func setUp() async throws { Logging.shared.logLevel = .debug