-
Notifications
You must be signed in to change notification settings - Fork 558
Speech macOS xcode26.4 b2
#Speech.framework
diff -ruN /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h
--- /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h 2025-11-09 04:42:48
+++ /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h 2026-02-13 11:00:40
@@ -15,7 +15,8 @@
Pass this object to ``SFSpeechLanguageModel/prepareCustomLanguageModelForUrl:configuration:completion:`` to indicate where that method should create the custom language model file, and to ``SFSpeechRecognitionRequest/customizedLanguageModel`` or ``DictationTranscriber/ContentHint/customizedLanguage(modelConfiguration:)`` to indicate where the system should find that model to use.
*/
-API_AVAILABLE(ios(17), macos(14), tvos(18))
+API_AVAILABLE(ios(17), macos(14))
+API_UNAVAILABLE(tvos)
NS_SWIFT_SENDABLE
NS_SWIFT_NAME(SFSpeechLanguageModel.Configuration)
@interface SFSpeechLanguageModelConfiguration : NSObject <NSCopying, NSSecureCoding>
@@ -27,7 +28,9 @@
@property (nonatomic, readonly, nullable, copy) NSURL *vocabulary;
/** The relative weight of the language model customization. Value must be between 0.0 and 1.0 inclusive. */
-@property (nonatomic, readonly, nullable, copy) NSNumber *weight API_AVAILABLE(ios(26.0), macos(26.0), tvos(26.0));
+@property (nonatomic, readonly, nullable, copy) NSNumber *weight
+API_AVAILABLE(ios(26.0), macos(26.0))
+API_UNAVAILABLE(tvos);
/** Creates a configuration with the location of a language model file. */
- (instancetype)initWithLanguageModel:(NSURL *)languageModel;
@@ -36,7 +39,9 @@
- (instancetype)initWithLanguageModel:(NSURL *)languageModel vocabulary:(NSURL * __nullable)vocabulary;
/** Creates a configuration with the locations of language model and vocabulary files, and custom weight. */
-- (instancetype)initWithLanguageModel:(NSURL *)languageModel vocabulary:(NSURL * __nullable)vocabulary weight:(NSNumber * __nullable)weight API_AVAILABLE(ios(26.0), macos(26.0), tvos(26.0));
+- (instancetype)initWithLanguageModel:(NSURL *)languageModel vocabulary:(NSURL * __nullable)vocabulary weight:(NSNumber * __nullable)weight
+API_AVAILABLE(ios(26.0), macos(26.0))
+API_UNAVAILABLE(tvos);
@end
@@ -45,7 +50,8 @@
Create this object using ``SFSpeechLanguageModel/prepareCustomLanguageModelForUrl:configuration:completion:`` or ``SFSpeechLanguageModel/prepareCustomLanguageModelForUrl:configuration:ignoresCache:completion:``.
*/
-API_AVAILABLE(ios(17), macos(14), tvos(18))
+API_AVAILABLE(ios(17), macos(14))
+API_UNAVAILABLE(tvos)
@interface SFSpeechLanguageModel : NSObject
+ (void)prepareCustomLanguageModelForUrl:(NSURL *)asset clientIdentifier:(NSString *)clientIdentifier configuration:(SFSpeechLanguageModelConfiguration *)configuration completion:(void(^)(NSError * __nullable error))completion
diff -ruN /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h
--- /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h 2025-11-09 05:16:05
+++ /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h 2026-02-14 01:17:59
@@ -13,7 +13,8 @@
/**
The metadata of speech in the audio of a speech recognition request.
*/
-API_AVAILABLE(ios(14.5), macos(11.3), tvos(18))
+API_AVAILABLE(ios(14.5), macos(11.3))
+API_UNAVAILABLE(tvos)
@interface SFSpeechRecognitionMetadata : NSObject <NSCopying, NSSecureCoding>
/**
diff -ruN /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h
--- /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h 2025-11-09 04:40:32
+++ /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h 2026-02-13 13:55:18
@@ -17,7 +17,8 @@
Don't create ``SFSpeechRecognitionRequest`` objects directly. Create an ``SFSpeechURLRecognitionRequest`` or ``SFSpeechAudioBufferRecognitionRequest`` object instead. Use the properties of this class to configure various aspects of your request object before you start the speech recognition process. For example, use the ``shouldReportPartialResults`` property to specify whether you want partial results or only the final result of speech recognition.
*/
-API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
+API_AVAILABLE(ios(10.0), macos(10.15))
+API_UNAVAILABLE(tvos)
@interface SFSpeechRecognitionRequest : NSObject
/**
@@ -61,16 +62,16 @@
> Note:
> The request only honors this setting if the ``SFSpeechRecognizer/supportsOnDeviceRecognition`` (``SFSpeechRecognizer``) property is also `true`.
*/
-@property (nonatomic) BOOL requiresOnDeviceRecognition API_AVAILABLE(ios(13), macos(10.15), tvos(18));
+@property (nonatomic) BOOL requiresOnDeviceRecognition API_AVAILABLE(ios(13), macos(10.15));
/**
A Boolean value that indicates whether to add punctuation to speech recognition results.
Set this property to `true` for the speech framework to automatically include punctuation in the recognition results. Punctuation includes a period or question mark at the end of a sentence, and a comma within a sentence.
*/
-@property (nonatomic) BOOL addsPunctuation API_AVAILABLE(ios(16), macos(13), tvos(18));
+@property (nonatomic) BOOL addsPunctuation API_AVAILABLE(ios(16), macos(13));
-@property (nonatomic, copy, nullable) SFSpeechLanguageModelConfiguration *customizedLanguageModel API_AVAILABLE(ios(17), macos(14), tvos(18));
+@property (nonatomic, copy, nullable) SFSpeechLanguageModelConfiguration *customizedLanguageModel API_AVAILABLE(ios(17), macos(14));
@end
@@ -113,7 +114,8 @@
}*/ -API_AVAILABLE(ios(10.0), macos(10.15), tvos(18)) +API_AVAILABLE(ios(10.0), macos(10.15)) +API_UNAVAILABLE(tvos) @interface SFSpeechURLRecognitionRequest : SFSpeechRecognitionRequest
- (instancetype)init NS_UNAVAILABLE; @@ -141,7 +143,8 @@
For a complete example of how to use audio buffers with speech recognition, see SpeakToMe: Using Speech Recognition with AVAudioEngine. */ -API_AVAILABLE(ios(10.0), macos(10.15), tvos(18)) +API_AVAILABLE(ios(10.0), macos(10.15)) +API_UNAVAILABLE(tvos) @interface SFSpeechAudioBufferRecognitionRequest : SFSpeechRecognitionRequest
/** diff -ruN /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h --- /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h 2025-11-09 05:16:05 +++ /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h 2026-02-14 01:17:59 @@ -20,7 +20,8 @@
If you requested partial results from the speech recognizer, the transcriptions may represent only part of the total audio content. Use the isFinal property to determine if the request contains partial or final results.
*/
-API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
+API_AVAILABLE(ios(10.0), macos(10.15))
+API_UNAVAILABLE(tvos)
@interface SFSpeechRecognitionResult : NSObject <NSCopying, NSSecureCoding>
/** @@ -45,7 +46,7 @@ /** An object that contains the metadata results for a speech recognition request. */ -@property (nonatomic, nullable, readonly) SFSpeechRecognitionMetadata *speechRecognitionMetadata API_AVAILABLE(ios(14.0), macos(11.0), tvos(18)); +@property (nonatomic, nullable, readonly) SFSpeechRecognitionMetadata *speechRecognitionMetadata API_AVAILABLE(ios(14.0), macos(11.0));
@end
diff -ruN /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h --- /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h 2025-11-09 05:16:05 +++ /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h 2026-02-13 11:14:38 @@ -30,7 +30,9 @@
/// Delivery of recognition requests has finished and audio recording has stopped.
SFSpeechRecognitionTaskStateCompleted = 4,
-} API_AVAILABLE(ios(10.0), macos(10.15), tvos(18)); +} +API_AVAILABLE(ios(10.0), macos(10.15)) +API_UNAVAILABLE(tvos);
/** A task object for monitoring the speech recognition progress. @@ -39,7 +41,8 @@
You don't create speech recognition task objects directly. Instead, you receive one of these objects after calling SFSpeechRecognizer/recognitionTask(with:resultHandler:) or SFSpeechRecognizer/recognitionTask(with:delegate:) on your SFSpeechRecognizer object.
*/
-API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
+API_AVAILABLE(ios(10.0), macos(10.15))
+API_UNAVAILABLE(tvos)
@interface SFSpeechRecognitionTask : NSObject
/** @@ -114,7 +117,8 @@
Adopt the methods of this protocol in an object and pass that object in to the delegate parameter of SFSpeechRecognizer/recognitionTask(with:delegate:) when starting your speech recognition task.
*/
-API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
+API_AVAILABLE(ios(10.0), macos(10.15))
+API_UNAVAILABLE(tvos)
@protocol SFSpeechRecognitionTaskDelegate
@optional
@@ -183,7 +187,7 @@
- task: The speech recognition task (an SFSpeechRecognitionTask object) that represents the request.
- duration: The seconds of audio input that the recognizer has processed.
*/
-- (void)speechRecognitionTask:(SFSpeechRecognitionTask *)task didProcessAudioDuration:(NSTimeInterval)duration API_AVAILABLE(ios(10.0), macos(10.15), tvos(18));
+- (void)speechRecognitionTask:(SFSpeechRecognitionTask *)task didProcessAudioDuration:(NSTimeInterval)duration API_AVAILABLE(ios(10.0), macos(10.15));
@end
diff -ruN /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h --- /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h 2025-11-09 05:16:05 +++ /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h 2026-02-14 01:17:59 @@ -27,4 +27,6 @@ /// /// Use this hint type when you are using speech recognition to handle confirmation commands, such as "yes," "no," or "maybe." SFSpeechRecognitionTaskHintConfirmation = 3, -} API_AVAILABLE(ios(10.0), macos(10.15), tvos(18)); +} +API_AVAILABLE(ios(10.0), macos(10.15)) +API_UNAVAILABLE(tvos); diff -ruN /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h --- /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h 2025-11-09 05:06:25 +++ /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h 2026-02-13 13:55:18 @@ -36,7 +36,9 @@
/// The user granted your app's request to perform speech recognition.
SFSpeechRecognizerAuthorizationStatusAuthorized,
-} API_AVAILABLE(ios(10.0), macos(10.15), tvos(18)); +} +API_AVAILABLE(ios(10.0), macos(10.15)) +API_UNAVAILABLE(tvos);
/** An object you use to check for the availability of the speech recognition service, and to initiate the speech recognition process. @@ -71,7 +73,8 @@
- Remind the user when your app is recording. For example, display a visual indicator and play sounds at the beginning and end of speech recognition to help users understand that they're being actively recorded. You can also display speech as it is being recognized so that users understand what your app is doing and see any mistakes made during the recognition process.
- Do not perform speech recognition on private or sensitive information. Some speech is not appropriate for recognition. Don't send passwords, health or financial data, and other sensitive speech for recognition. */ -API_AVAILABLE(ios(10.0), macos(10.15), tvos(18)) +API_AVAILABLE(ios(10.0), macos(10.15)) +API_UNAVAILABLE(tvos) @interface SFSpeechRecognizer : NSObject
/** @@ -158,7 +161,7 @@
An SFSpeechRecognitionRequest can only honor its SFSpeechRecognitionRequest/requiresOnDeviceRecognition property if supportsOnDeviceRecognition is true. If supportsOnDeviceRecognition is false, the SFSpeechRecognizer requires a network in order to recognize speech.
*/
-@property (nonatomic) BOOL supportsOnDeviceRecognition API_AVAILABLE(ios(13), tvos(18));
+@property (nonatomic) BOOL supportsOnDeviceRecognition API_AVAILABLE(ios(13));
/** The delegate object that handles changes to the availability of speech recognition services. @@ -223,7 +226,8 @@
A speech recognizer's availability can change due to the device's Internet connection or other factors. Use this protocol's optional method to track those changes and provide an appropriate response. For example, when speech recognition becomes unavailable, you might disable related features in your app. */ -API_AVAILABLE(ios(10.0), macos(10.15), tvos(18)) +API_AVAILABLE(ios(10.0), macos(10.15)) +API_UNAVAILABLE(tvos) @protocol SFSpeechRecognizerDelegate @optional
diff -ruN /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h --- /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h 2025-11-09 05:16:05 +++ /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h 2026-02-14 01:17:59 @@ -22,7 +22,8 @@
An SFTranscription represents only a potential version of the speech. It might not be an accurate representation of the utterances.
*/
-API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
+API_AVAILABLE(ios(10.0), macos(10.15))
+API_UNAVAILABLE(tvos)
@interface SFTranscription : NSObject <NSCopying, NSSecureCoding>
/** diff -ruN /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h --- /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h 2025-11-09 05:16:05 +++ /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h 2026-02-14 01:17:59 @@ -23,7 +23,8 @@
- A
confidencevalue, indicating how likely it is that the specified string matches the audible speech. - A
timestampanddurationvalue, indicating the position of the segment within the provided audio stream. */ -API_AVAILABLE(ios(10.0), macos(10.15), tvos(18)) +API_AVAILABLE(ios(10.0), macos(10.15)) +API_UNAVAILABLE(tvos) @interface SFTranscriptionSegment : NSObject <NSCopying, NSSecureCoding>
/** diff -ruN /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h --- /Applications/Xcode_26.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h 2025-11-09 05:16:05 +++ /Applications/Xcode_26.4.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h 2026-02-14 01:17:59 @@ -9,7 +9,8 @@ /** The value of a voice analysis metric. */ -API_AVAILABLE(ios(13), macos(10.15), tvos(18)) +API_AVAILABLE(ios(13), macos(10.15)) +API_UNAVAILABLE(tvos) @interface SFAcousticFeature : NSObject <NSCopying, NSSecureCoding>
/** @@ -36,7 +37,8 @@
These results are part of the SFTranscriptionSegment object and are available when the system sends the SFSpeechRecognitionResult/isFinal flag.
*/
-API_AVAILABLE(ios(13), macos(10.15), tvos(18))
+API_AVAILABLE(ios(13), macos(10.15))
+API_UNAVAILABLE(tvos)
@interface SFVoiceAnalytics : NSObject <NSCopying, NSSecureCoding>
/**