Skip to content

Commit 341279b

Browse files
committed
Set watchOS and visionOS minimum deployment targets in Package manifest instead
1 parent b01d157 commit 341279b

31 files changed

+2
-69
lines changed

Package.swift

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ let package = Package(
99
platforms: [
1010
.iOS(.v16),
1111
.macOS(.v13),
12+
.watchOS(.v10),
13+
.visionOS(.v1)
1214
],
1315
products: [
1416
.library(

Sources/WhisperKit/Core/Audio/AudioChunker.swift

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,10 @@ import AVFoundation
66
import Foundation
77

88
/// Responsible for chunking audio into smaller pieces
9-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
109
public protocol AudioChunking {
1110
func chunkAll(audioArray: [Float], maxChunkLength: Int, decodeOptions: DecodingOptions?) async throws -> [AudioChunk]
1211
}
1312

14-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
1513
public extension AudioChunking {
1614
func updateSeekOffsetsForResults(
1715
chunkedResults: [Result<[TranscriptionResult], Swift.Error>],
@@ -42,7 +40,6 @@ public extension AudioChunking {
4240
}
4341

4442
/// A audio chunker that splits audio into smaller pieces based on voice activity detection
45-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
4643
open class VADAudioChunker: AudioChunking {
4744
/// prevent hallucinations at the end of the clip by stopping up to 1.0s early
4845
private let windowPadding: Int

Sources/WhisperKit/Core/Audio/AudioProcessor.swift

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,6 @@ public extension AudioProcessing {
123123
/// Loads and converts audio data from a specified file paths.
124124
/// - Parameter audioPaths: The file paths of the audio files.
125125
/// - Returns: `AVAudioPCMBuffer` containing the audio data.
126-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
127126
static func loadAudioAsync(fromPath audioFilePath: String) async throws -> AVAudioPCMBuffer {
128127
return try await Task {
129128
try AudioProcessor.loadAudio(fromPath: audioFilePath)
@@ -191,7 +190,6 @@ public extension AudioProcessing {
191190
}
192191
}
193192

194-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
195193
open class AudioProcessor: NSObject, AudioProcessing {
196194
private var lastInputDevice: DeviceID?
197195
public var audioEngine: AVAudioEngine?
@@ -886,7 +884,6 @@ open class AudioProcessor: NSObject, AudioProcessing {
886884

887885
// MARK: - Streaming
888886

889-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
890887
public extension AudioProcessor {
891888
/// We have a new buffer, process and store it.
892889
/// NOTE: Assumes audio is 16khz mono

Sources/WhisperKit/Core/Audio/AudioStreamTranscriber.swift

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33

44
import Foundation
55

6-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
76
public extension AudioStreamTranscriber {
87
struct State {
98
public var isRecording: Bool = false
@@ -18,11 +17,9 @@ public extension AudioStreamTranscriber {
1817
}
1918
}
2019

21-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
2220
public typealias AudioStreamTranscriberCallback = (AudioStreamTranscriber.State, AudioStreamTranscriber.State) -> Void
2321

2422
/// Responsible for streaming audio from the microphone, processing it, and transcribing it in real-time.
25-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
2623
public actor AudioStreamTranscriber {
2724
private var state: AudioStreamTranscriber.State = .init() {
2825
didSet {

Sources/WhisperKit/Core/Audio/EnergyVAD.swift

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
import Foundation
55

66
/// Voice activity detection based on energy threshold
7-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
87
public final class EnergyVAD: VoiceActivityDetector {
98
public let energyThreshold: Float
109

Sources/WhisperKit/Core/Audio/VoiceActivityDetector.swift

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ import Foundation
55

66
/// A base class for Voice Activity Detection (VAD), used to identify and separate segments of audio that contain human speech from those that do not.
77
/// Subclasses must implement the `voiceActivity(in:)` method to provide specific voice activity detection functionality.
8-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
98
open class VoiceActivityDetector {
109
/// The sample rate of the audio signal, in samples per second.
1110
public let sampleRate: Int

Sources/WhisperKit/Core/AudioEncoder.swift

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ public protocol AudioEncoderOutputType {}
77
extension MLMultiArray: AudioEncoderOutputType {}
88

99
/// AudioEncoding protocol defines the requirements for an audio encoding implementation.
10-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
1110
public protocol AudioEncoding {
1211
/// The size of the embedding produced by the encoder.
1312
var embedSize: Int? { get }
@@ -19,7 +18,6 @@ public protocol AudioEncoding {
1918
}
2019

2120
/// Backwards-compatible AudioEncoder implementation
22-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
2321
public class AudioEncoder: AudioEncoding, WhisperMLModel {
2422
public var model: MLModel?
2523

Sources/WhisperKit/Core/Configurations.swift

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
import Foundation
55

66
/// Configuration to initialize WhisperKit
7-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
87
open class WhisperKitConfig {
98
/// Name for whisper model to use
109
public var model: String?
@@ -126,7 +125,6 @@ open class WhisperKitConfig {
126125
/// - firstTokenLogProbThreshold: If the log probability over the first sampled token is below this value, treat as failed.
127126
/// - noSpeechThreshold: If the no speech probability is higher than this value AND the average log
128127
/// probability over sampled tokens is below `logProbThreshold`, consider the segment as silent.
129-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
130128
public struct DecodingOptions: Codable {
131129
public var verbose: Bool
132130
public var task: DecodingTask

Sources/WhisperKit/Core/FeatureExtractor.swift

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ public protocol FeatureExtracting {
1616
func logMelSpectrogram(fromAudio inputAudio: any AudioProcessorOutputType) async throws -> (any FeatureExtractorOutputType)?
1717
}
1818

19-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
2019
open class FeatureExtractor: FeatureExtracting, WhisperMLModel {
2120
public var model: MLModel?
2221

Sources/WhisperKit/Core/Models.swift

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,6 @@ public enum ModelState: CustomStringConvertible {
134134
}
135135
}
136136

137-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
138137
public struct ModelComputeOptions {
139138
public var melCompute: MLComputeUnits
140139
public var audioEncoderCompute: MLComputeUnits
@@ -252,7 +251,6 @@ public struct ModelSupportConfig: Codable {
252251
computeDisabledModels()
253252
}
254253

255-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
256254
public func modelSupport(for deviceIdentifier: String = WhisperKit.deviceName()) -> ModelSupport {
257255
// Find the support with the longest matching identifier prefix
258256
// i.e. `iPad13,16` should match exact `iPad13,16` instead of first prefix like `iPad13,1`
@@ -409,7 +407,6 @@ public enum ChunkingStrategy: String, Codable, CaseIterable {
409407
case vad
410408
}
411409

412-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
413410
public struct DecodingFallback {
414411
public var needsFallback: Bool
415412
public var fallbackReason: String
@@ -420,7 +417,6 @@ public struct DecodingFallback {
420417
}
421418
}
422419

423-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
424420
public extension DecodingFallback {
425421
init?(
426422
options: DecodingOptions,
@@ -447,7 +443,6 @@ public extension DecodingFallback {
447443
}
448444
}
449445

450-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
451446
public struct DecodingResult {
452447
public var language: String
453448
public var languageProbs: [String: Float]
@@ -900,7 +895,6 @@ public class MelSpectrogramInput: MLFeatureProvider {
900895
}
901896

902897
/// Model Prediction Output Type
903-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
904898
public class MelSpectrogramOutput: MLFeatureProvider {
905899
/// Source provided by CoreML
906900
private let provider: MLFeatureProvider
@@ -937,7 +931,6 @@ public class MelSpectrogramOutput: MLFeatureProvider {
937931
// MARK: AudioEncoder
938932

939933
/// Model Prediction Input Type
940-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
941934
public class AudioEncoderInput: MLFeatureProvider {
942935
/// melspectrogram_features as 1 × {80,128} × 1 × 3000 4-dimensional array of floats
943936
public var melspectrogram_features: MLMultiArray
@@ -963,7 +956,6 @@ public class AudioEncoderInput: MLFeatureProvider {
963956
}
964957

965958
/// Model Prediction Output Type
966-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
967959
public class AudioEncoderOutput: MLFeatureProvider {
968960
/// Source provided by CoreML
969961
private let provider: MLFeatureProvider
@@ -1000,7 +992,6 @@ public class AudioEncoderOutput: MLFeatureProvider {
1000992
// MARK: TextDecoder
1001993

1002994
/// Model Prediction Input Type
1003-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
1004995
public class TextDecoderInput: MLFeatureProvider {
1005996
/// input_ids as 1 element vector of 32-bit integers
1006997
public var input_ids: MLMultiArray
@@ -1068,7 +1059,6 @@ public class TextDecoderInput: MLFeatureProvider {
10681059
}
10691060

10701061
/// Model Prediction Output Type
1071-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
10721062
public class TextDecoderOutput: MLFeatureProvider {
10731063
/// Source provided by CoreML
10741064
private let provider: MLFeatureProvider
@@ -1175,7 +1165,6 @@ public class TextDecoderCachePrefillInput: MLFeatureProvider {
11751165
}
11761166

11771167
/// Model Prediction Output Type
1178-
@available(macOS 13, iOS 16, watchOS 10, visionOS 1, *)
11791168
public class TextDecoderCachePrefillOutput: MLFeatureProvider {
11801169
/// Source provided by CoreML
11811170
private let provider: MLFeatureProvider

0 commit comments

Comments
 (0)