diff --git a/Package.swift b/Package.swift index a82452c..d1d9733 100644 --- a/Package.swift +++ b/Package.swift @@ -4,7 +4,7 @@ import PackageDescription let package = Package( name: "SCNRecorder", - platforms: [ .iOS(.v12) ], + platforms: [ .iOS(.v13) ], products: [ .library( name: "SCNRecorder", diff --git a/README.md b/README.md index 9f63386..cf42768 100644 --- a/README.md +++ b/README.md @@ -138,6 +138,8 @@ override func viewDidLoad() { } ``` + **Known issue:** Capturing audio requires that you provide a valid NSCameraUsageDescription key in the InfoPlist when you submit your app to appstoreconnect. If you are not using the audio recorder, you can exclude the `AVCaptureSession+BaseRecorder.swift` extension from the Target Membership. Doing that fixes appstoreconnect asking for NSCameraUsageDescription key. + ### Music Overlay Instead of capturing audio using microphone you can play music and add it to video at the same time. diff --git a/SCNRecorder.podspec b/SCNRecorder.podspec index cdc1f50..6d6534d 100644 --- a/SCNRecorder.podspec +++ b/SCNRecorder.podspec @@ -10,6 +10,7 @@ Pod::Spec.new do |s| s.module_name = 'SCNRecorder' s.swift_version = '5.0' s.source_files = 'Sources/**/*.{swift}' + s.exclude_files = 'Sources/Extensions/AVCaptureSession+BaseRecorder.swift' s.dependency 'MTDMulticastDelegate' s.app_spec 'Example' do |app_spec| diff --git a/Sources/AudioEngine/AudioEngine.AVPlayerTapper.swift b/Sources/AudioEngine/AudioEngine.AVPlayerTapper.swift new file mode 100644 index 0000000..0f11086 --- /dev/null +++ b/Sources/AudioEngine/AudioEngine.AVPlayerTapper.swift @@ -0,0 +1,232 @@ +import Foundation +import AVFoundation + +extension AudioEngine { + public final class AVPlayerTapper { + private weak var player: AVPlayer? + private var audioFormat: AVAudioFormat? { + AVAudioFormat(commonFormat: .pcmFormatFloat32, + sampleRate: 44100, + channels: 2, + interleaved: false) + } + + @SCNObservable public internal(set) var error: Swift.Error? + + public weak var recorder: BaseRecorder? { + didSet { + oldValue?.audioInput.audioFormat = nil + guard let recorder = recorder else { + removeAudioTap() + return + } + + recorder.audioInput.audioFormat = audioFormat + + guard oldValue == nil else { return } + removeAudioTap() + setupAudioTap() + } + } + + deinit { + recorder = nil + } + + public init(player: AVPlayer) { + self.player = player + guard player.currentItem != nil else { + fatalError("FATAL: Player item is not initialized.") + } + } + + private func setupAudioTap() { + // Create an AVMutableAudioMix + let audioMix = AVMutableAudioMix() + + // Get the first audio track + guard let audioTrack = player?.currentItem?.asset.tracks(withMediaType: .audio).first else { + print("ERROR: No audio track found") + return + } + + // Create AVMutableAudioMixInputParameters for the track + let inputParams = AVMutableAudioMixInputParameters(track: audioTrack) + + // Install tap + inputParams.setVolume(1.0, at: .zero) + inputParams.audioTapProcessor = createAudioTapProcessor() + + audioMix.inputParameters = [inputParams] + + // Set the audio mix to the player item + player?.currentItem?.audioMix = audioMix + } + + private func removeAudioTap() { + player?.currentItem?.audioMix = nil + } + + private func createAudioTapProcessor() -> MTAudioProcessingTap { + var callbacks = MTAudioProcessingTapCallbacks( + version: kMTAudioProcessingTapCallbacksVersion_0, + clientInfo: UnsafeMutableRawPointer(Unmanaged.passUnretained(self).toOpaque()), + init: tapInitCallback, + finalize: tapFinalizeCallback, + prepare: tapPrepareCallback, + unprepare: tapUnprepareCallback, + process: tapProcessCallback + ) + + var tap: MTAudioProcessingTap? + let status = MTAudioProcessingTapCreate(kCFAllocatorDefault, &callbacks, kMTAudioProcessingTapCreationFlag_PostEffects, &tap) + + if status != noErr { + fatalError("FATAL: creating MTAudioProcessingTap: \(status)") + } + + return tap! + } + + // Define the TapContext class + class TapContext { + var processingFormat: AVAudioFormat? + weak var selfInstance: AudioEngine.AVPlayerTapper? + } + + // MTAudioProcessingTap callbacks + private let tapInitCallback: MTAudioProcessingTapInitCallback = { (tap, clientInfo, tapStorageOut) in + // Initialization code + let context = TapContext() + context.selfInstance = Unmanaged.fromOpaque(clientInfo!).takeUnretainedValue() + tapStorageOut.pointee = Unmanaged.passRetained(context).toOpaque() + } + + private let tapFinalizeCallback: MTAudioProcessingTapFinalizeCallback = { (tap) in + // Finalization code + let storage = MTAudioProcessingTapGetStorage(tap) + Unmanaged.fromOpaque(storage).release() + } + + private let tapPrepareCallback: MTAudioProcessingTapPrepareCallback = { (tap, maxFrames, processingFormat) in + // Prepare code + let storage = MTAudioProcessingTapGetStorage(tap) + let context = Unmanaged.fromOpaque(storage).takeUnretainedValue() + // Save the processing format + var asbd = processingFormat.pointee + context.processingFormat = AVAudioFormat(streamDescription: &asbd) + } + + private let tapUnprepareCallback: MTAudioProcessingTapUnprepareCallback = { (tap) in + // Unprepare code if needed + } + + private let tapProcessCallback: MTAudioProcessingTapProcessCallback = { + (tap, numberFrames, flags, bufferListInOut, numberFramesOut, flagsOut) in + + let storage = MTAudioProcessingTapGetStorage(tap) + let context = Unmanaged.fromOpaque(storage).takeUnretainedValue() + + guard let selfInstance = context.selfInstance else { + print("ERROR: tapProcessCallback: selfInstance not available") + return + } + + var status = noErr + var tapFlags: MTAudioProcessingTapFlags = 0 + var numFrames = numberFrames + var timeRangeOut = CMTimeRange() + + // Get source audio + status = MTAudioProcessingTapGetSourceAudio( + tap, + numberFrames, + bufferListInOut, + &tapFlags, + &timeRangeOut, + &numFrames + ) + + if status != noErr { + print("ERROR: tapProcessCallback: getting source audio, status: \(status)") + return + } + + numberFramesOut.pointee = numFrames + flagsOut.pointee = tapFlags + + let bufferListPtr = UnsafeMutableAudioBufferListPointer(bufferListInOut) + + // MARK: - SAFE FORMAT HANDLING + let processingFormat: AVAudioFormat + + if let format = context.processingFormat { + processingFormat = format + } else { + let channels = min(2, bufferListPtr.count) + + guard let fallback = AVAudioFormat( + commonFormat: .pcmFormatFloat32, + sampleRate: 44100, + channels: AVAudioChannelCount(channels), + interleaved: false + ) else { + print("ERROR: cannot create fallback format") + return + } + + context.processingFormat = fallback + processingFormat = fallback + } + + let audioTime = AVAudioTime(hostTime: mach_absolute_time()) + + guard let pcmBuffer = AVAudioPCMBuffer( + pcmFormat: processingFormat, + frameCapacity: AVAudioFrameCount(numFrames) + ) else { + return + } + + pcmBuffer.frameLength = AVAudioFrameCount(numFrames) + + // MARK: - SAFE CHANNEL COPY (FIXED CRASH HERE) + let dstChannels = Int(processingFormat.channelCount) + let srcChannels = bufferListPtr.count + let channelsToCopy = min(dstChannels, srcChannels) + + for i in 0...size + ) + + memcpy(dst, src, byteSize) + } + + // MARK: - SEND TO RECORDER + do { + let sampleBuffer = try AudioEngine.createAudioSampleBuffer( + from: pcmBuffer, + time: audioTime + ) + + selfInstance.recorder?.audioInput.audioEngine( + didOutputAudioSampleBuffer: sampleBuffer + ) + + } catch { + print("ERROR: tapProcessCallback: failed to create sample buffer: \(error)") + selfInstance.error = error + } + } + } +} diff --git a/Sources/AudioEngine/AudioEngine.swift b/Sources/AudioEngine/AudioEngine.swift index da81933..b0e089d 100644 --- a/Sources/AudioEngine/AudioEngine.swift +++ b/Sources/AudioEngine/AudioEngine.swift @@ -29,283 +29,283 @@ import UIKit @available(iOS 13.0, *) public final class AudioEngine { - - enum State { - - case normal - - case interrupted(playing: Bool) - - var isInterrupted: Bool { - if case .interrupted = self { return true } - return false - } - - var shouldResume: Bool { - if case .interrupted(true) = self { return true } - return false - } - } - - let queue = DispatchQueue(label: "AudioEngine.Processing", qos: .userInitiated) - - lazy var engine: AVAudioEngine = { - let engine = AVAudioEngine() - engine.attach(playerNode) - engine.attach(raterNode) - return engine - }() - - lazy var playerNode = AVAudioPlayerNode() - - lazy var raterNode = AVAudioUnitTimePitch() - - let audioSession = AVAudioSession.sharedInstance() - - let notificationCenter = NotificationCenter.default - - var observers = [NSObjectProtocol]() - - var state: State = .normal - - public var player: Player? { - didSet { - guard player !== oldValue else { return } - if let oldPlayer = oldValue { oldPlayer.stop() } - - recorder?.audioInput.audioFormat = nil - guard let player = player else { return } - recorder?.audioInput.audioFormat = player.audioFormat - - engine.connect(playerNode, to: raterNode, format: player.audioFormat) - engine.connect(raterNode, to: engine.mainMixerNode, format: player.audioFormat) - - player.attach( - playerNode: playerNode, - raterNode: raterNode, - queue: queue - ) - - player.willStart = { [weak self] in - guard let self = self else { return false } - - let isInterrupted = DispatchQueue.main.sync { () -> Bool in - if self.state.isInterrupted { self.state = .interrupted(playing: true) } - return self.state.isInterrupted - } - guard !isInterrupted else { return false } - - do { - try self.activateAudioSession() - try self.engine.start() - return true + + enum State { + + case normal + + case interrupted(playing: Bool) + + var isInterrupted: Bool { + if case .interrupted = self { return true } + return false } - catch { - self.error = error - return false + + var shouldResume: Bool { + if case .interrupted(true) = self { return true } + return false } - } - - player.didPause = { [weak self] in - guard let self = self else { return } - self.engine.pause() - self.deactivateAudioSession() - } - - player.didStop = { [weak self] in - guard let self = self else { return } - - self.engine.stop() - self.engine.reset() - self.deactivateAudioSession() - } } - } - - public weak var recorder: BaseRecorder? { - didSet { - oldValue?.audioInput.audioFormat = nil - guard let recorder = recorder else { - engine.mainMixerNode.removeTap(onBus: 0) - return - } - - recorder.audioInput.audioFormat = player?.audioFormat - - guard oldValue == nil else { return } - engine.mainMixerNode.removeTap(onBus: 0) - engine.mainMixerNode.installTap( - onBus: 0, - bufferSize: 4096, - format: nil - ) { [weak self] (buffer, time) in - guard let self = self else { return } - guard let recorder = self.recorder else { - self.engine.mainMixerNode.removeTap(onBus: 0) - return - } - - do { - let sampleBuffer = try Self.createAudioSampleBuffer(from: buffer, time: time) - recorder.audioInput.audioEngine(self, didOutputAudioSampleBuffer: sampleBuffer) + + let queue = DispatchQueue(label: "AudioEngine.Processing", qos: .userInteractive) + + lazy var engine: AVAudioEngine = { + let engine = AVAudioEngine() + engine.attach(playerNode) + engine.attach(raterNode) + return engine + }() + + lazy var playerNode = AVAudioPlayerNode() + + lazy var raterNode = AVAudioUnitTimePitch() + + let audioSession = AVAudioSession.sharedInstance() + + let notificationCenter = NotificationCenter.default + + var observers = [NSObjectProtocol]() + + var state: State = .normal + + public var player: Player? { + didSet { + guard player !== oldValue else { return } + if let oldPlayer = oldValue { oldPlayer.stop() } + + recorder?.audioInput.audioFormat = nil + guard let player = player else { return } + recorder?.audioInput.audioFormat = player.audioFormat + + engine.connect(playerNode, to: raterNode, format: player.audioFormat) + engine.connect(raterNode, to: engine.mainMixerNode, format: player.audioFormat) + + player.attach( + playerNode: playerNode, + raterNode: raterNode, + queue: queue + ) + + player.willStart = { [weak self] in + guard let self = self else { return false } + + let isInterrupted = DispatchQueue.main.sync { () -> Bool in + if self.state.isInterrupted { self.state = .interrupted(playing: true) } + return self.state.isInterrupted + } + guard !isInterrupted else { return false } + + do { + try self.activateAudioSession() + try self.engine.start() + return true + } + catch { + self.error = error + return false + } + } + + player.didPause = { [weak self] in + guard let self = self else { return } + self.engine.pause() + self.deactivateAudioSession() + } + + player.didStop = { [weak self] in + guard let self = self else { return } + + self.engine.stop() + self.engine.reset() + self.deactivateAudioSession() + } } - catch { - self.error = error + } + + public weak var recorder: BaseRecorder? { + didSet { + oldValue?.audioInput.audioFormat = nil + guard let recorder = recorder else { + engine.mainMixerNode.removeTap(onBus: 0) + return + } + + recorder.audioInput.audioFormat = player?.audioFormat + + guard oldValue == nil else { return } + engine.mainMixerNode.removeTap(onBus: 0) + engine.mainMixerNode.installTap( + onBus: 0, + bufferSize: 4096, + format: engine.mainMixerNode.outputFormat(forBus: 0) + ) { [weak self] (buffer, time) in + guard let self = self else { return } + guard let recorder = self.recorder else { + self.engine.mainMixerNode.removeTap(onBus: 0) + return + } + + do { + let sampleBuffer = try Self.createAudioSampleBuffer(from: buffer, time: time) + recorder.audioInput.audioEngine(didOutputAudioSampleBuffer: sampleBuffer) + } + catch { + self.error = error + } + } } - } } - } - - let canDeactivateAudioSession: Bool - - @SCNObservable public internal(set) var error: Swift.Error? - - public init(canDeactivateAudioSession: Bool = true) { - self.canDeactivateAudioSession = canDeactivateAudioSession - setupObservers() - } - - deinit { - recorder?.audioInput.audioFormat = nil - player?.stop() - observers.forEach { notificationCenter.removeObserver($0) } - deactivateAudioSession() - } - - func activateAudioSession() throws { - try audioSession.setCategory(.playAndRecord, options: .defaultToSpeaker) - try audioSession.setActive(true) - } - - func deactivateAudioSession() { - guard canDeactivateAudioSession else { return } - - do { try audioSession.setActive(false) } - catch { self.error = error } - } + + let canDeactivateAudioSession: Bool + + @SCNObservable public internal(set) var error: Swift.Error? + + public init(canDeactivateAudioSession: Bool = true) { + self.canDeactivateAudioSession = canDeactivateAudioSession + setupObservers() + } + + deinit { + recorder?.audioInput.audioFormat = nil + player?.stop() + observers.forEach { notificationCenter.removeObserver($0) } + deactivateAudioSession() + } + + func activateAudioSession() throws { + try audioSession.setCategory(.playAndRecord, options: .defaultToSpeaker) + try audioSession.setActive(true) + } + + func deactivateAudioSession() { + guard canDeactivateAudioSession else { return } + + do { try audioSession.setActive(false) } + catch { self.error = error } + } } // MARK: - Observers @available(iOS 13.0, *) extension AudioEngine { - - func setupObservers() { - observers.append( - notificationCenter.addObserver( - forName: AVAudioSession.interruptionNotification, - object: nil, - queue: nil, - using: { [weak self] in self?.handleInterruption($0) } - ) - ) - - observers.append( - notificationCenter.addObserver( - forName: AVAudioSession.mediaServicesWereResetNotification, - object: nil, - queue: nil, - using: { [weak self] in self?.handleMediaServicesWereReset($0) } - ) - ) - - observers.append( - notificationCenter.addObserver( - forName: UIApplication.willResignActiveNotification, - object: nil, - queue: nil, - using: { [weak self] in self?.handleApplicationWillResignActiveNotification($0) } - ) - ) - - observers.append( - notificationCenter.addObserver( - forName: UIApplication.didBecomeActiveNotification, - object: nil, - queue: nil, - using: { [weak self] in self?.handleApplicationDidBecomeActive($0) }) - ) - } - - func handleApplicationWillResignActiveNotification(_ notification: Notification) { - state = .interrupted(playing: queue.sync { player?.state.isPlaying ?? false }) - player?.pause() - } - - func handleApplicationDidBecomeActive(_ notification: Notification) { - let shouldResume = state.shouldResume - state = .normal - if shouldResume { player?.play() } - } - - func handleInterruption(_ notification: Notification) { - guard let userInfo = notification.userInfo, - let typeValue = userInfo[AVAudioSessionInterruptionTypeKey] as? UInt, - let type = AVAudioSession.InterruptionType(rawValue: typeValue) - else { return } - - switch type { - case .began: - state = .interrupted(playing: queue.sync { player?.state.isPlaying ?? false }) - player?.pause() - - case .ended: - let shouldResume = state.shouldResume - state = .normal - - guard shouldResume, - let optionsValue = userInfo[AVAudioSessionInterruptionOptionKey] as? UInt, - AVAudioSession.InterruptionOptions(rawValue: optionsValue).contains(.shouldResume) - else { return } - player?.play() - - @unknown default: - break + + func setupObservers() { + observers.append( + notificationCenter.addObserver( + forName: AVAudioSession.interruptionNotification, + object: nil, + queue: nil, + using: { [weak self] in self?.handleInterruption($0) } + ) + ) + + observers.append( + notificationCenter.addObserver( + forName: AVAudioSession.mediaServicesWereResetNotification, + object: nil, + queue: nil, + using: { [weak self] in self?.handleMediaServicesWereReset($0) } + ) + ) + + observers.append( + notificationCenter.addObserver( + forName: UIApplication.willResignActiveNotification, + object: nil, + queue: nil, + using: { [weak self] in self?.handleApplicationWillResignActiveNotification($0) } + ) + ) + + observers.append( + notificationCenter.addObserver( + forName: UIApplication.didBecomeActiveNotification, + object: nil, + queue: nil, + using: { [weak self] in self?.handleApplicationDidBecomeActive($0) }) + ) + } + + func handleApplicationWillResignActiveNotification(_ notification: Notification) { + state = .interrupted(playing: queue.sync { player?.state.isPlaying ?? false }) + player?.pause() + } + + func handleApplicationDidBecomeActive(_ notification: Notification) { + let shouldResume = state.shouldResume + state = .normal + if shouldResume { player?.play() } + } + + func handleInterruption(_ notification: Notification) { + guard let userInfo = notification.userInfo, + let typeValue = userInfo[AVAudioSessionInterruptionTypeKey] as? UInt, + let type = AVAudioSession.InterruptionType(rawValue: typeValue) + else { return } + + switch type { + case .began: + state = .interrupted(playing: queue.sync { player?.state.isPlaying ?? false }) + player?.pause() + + case .ended: + let shouldResume = state.shouldResume + state = .normal + + guard shouldResume, + let optionsValue = userInfo[AVAudioSessionInterruptionOptionKey] as? UInt, + AVAudioSession.InterruptionOptions(rawValue: optionsValue).contains(.shouldResume) + else { return } + player?.play() + + @unknown default: + break + } + } + + func handleMediaServicesWereReset(_ notification: Notification) { + let player = self.player + let recorder = self.recorder + + self.player = nil + self.recorder = nil + + playerNode = AVAudioPlayerNode() + raterNode = AVAudioUnitTimePitch() + engine = AVAudioEngine() + engine.attach(playerNode) + engine.attach(raterNode) + + self.player = player + self.recorder = recorder } - } - - func handleMediaServicesWereReset(_ notification: Notification) { - let player = self.player - let recorder = self.recorder - - self.player = nil - self.recorder = nil - - playerNode = AVAudioPlayerNode() - raterNode = AVAudioUnitTimePitch() - engine = AVAudioEngine() - engine.attach(playerNode) - engine.attach(raterNode) - - self.player = player - self.recorder = recorder - } } @available(iOS 13.0, *) extension AudioEngine { - - static func createAudioSampleBuffer(from buffer: AVAudioPCMBuffer, time: AVAudioTime) throws -> CMSampleBuffer { - let audioBufferList = buffer.mutableAudioBufferList - let streamDescription = buffer.format.streamDescription.pointee - let timescale = CMTimeScale(streamDescription.mSampleRate) - let format = try CMAudioFormatDescription(audioStreamBasicDescription: streamDescription) - let sampleBuffer = try CMSampleBuffer( - dataBuffer: nil, - formatDescription: format, - numSamples: CMItemCount(buffer.frameLength), - sampleTimings: [ - CMSampleTimingInfo( - duration: CMTime(value: 1, timescale: timescale), - presentationTimeStamp: CMTime( - seconds: AVAudioTime.seconds(forHostTime: time.hostTime), - preferredTimescale: timescale - ), - decodeTimeStamp: .invalid + + static func createAudioSampleBuffer(from buffer: AVAudioPCMBuffer, time: AVAudioTime) throws -> CMSampleBuffer { + let audioBufferList = buffer.mutableAudioBufferList + let streamDescription = buffer.format.streamDescription.pointee + let timescale = CMTimeScale(streamDescription.mSampleRate) + let format = try CMAudioFormatDescription(audioStreamBasicDescription: streamDescription) + let sampleBuffer = try CMSampleBuffer( + dataBuffer: nil, + formatDescription: format, + numSamples: CMItemCount(buffer.frameLength), + sampleTimings: [ + CMSampleTimingInfo( + duration: CMTime(value: 1, timescale: timescale), + presentationTimeStamp: CMTime( + seconds: AVAudioTime.seconds(forHostTime: time.hostTime), + preferredTimescale: timescale + ), + decodeTimeStamp: .invalid + ) + ], + sampleSizes: [] ) - ], - sampleSizes: [] - ) - try sampleBuffer.setDataBuffer(fromAudioBufferList: audioBufferList) - return sampleBuffer - } + try sampleBuffer.setDataBuffer(fromAudioBufferList: audioBufferList) + return sampleBuffer + } } diff --git a/Sources/Outputs/VideoOutput/VideoOutput.State.swift b/Sources/Outputs/VideoOutput/VideoOutput.State.swift index a6a6942..838a3a3 100644 --- a/Sources/Outputs/VideoOutput/VideoOutput.State.swift +++ b/Sources/Outputs/VideoOutput/VideoOutput.State.swift @@ -155,8 +155,8 @@ public enum VideoOutputState: Equatable { handler() return .canceled - case .recording, - .paused: + case let .recording (currentTime, _, _), let .paused(currentTime, _): + videoOutput.endSession(at: currentTime) videoOutput.finishWriting(completionHandler: handler) return .finished diff --git a/Sources/Outputs/VideoOutput/VideoOutput.swift b/Sources/Outputs/VideoOutput/VideoOutput.swift index 131cbc9..fc4a276 100644 --- a/Sources/Outputs/VideoOutput/VideoOutput.swift +++ b/Sources/Outputs/VideoOutput/VideoOutput.swift @@ -91,7 +91,9 @@ final class VideoOutput { let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings.outputSettings) videoInput.expectsMediaDataInRealTime = true videoInput.transform = videoSettings.transform ?? .identity - + if let metadata = videoSettings.metadata { + videoInput.metadata = metadata + } guard assetWriter.canAdd(videoInput) else { throw Error.cantAddVideoAssetWriterInput } assetWriter.add(videoInput) self.videoInput = videoInput @@ -214,7 +216,9 @@ extension VideoOutput { } func finish(completionHandler handler: @escaping () -> Void) { - queue.async { [weak self] in self?.unsafeFinish(completionHandler: handler) } + queue.async { [weak self] in + self?.unsafeFinish(completionHandler: handler) + } } func cancel() { diff --git a/Sources/Recorder/BaseRecorder/BaseRecorder.AudioInput.swift b/Sources/Recorder/BaseRecorder/BaseRecorder.AudioInput.swift index 875ebcb..ffa69f1 100644 --- a/Sources/Recorder/BaseRecorder/BaseRecorder.AudioInput.swift +++ b/Sources/Recorder/BaseRecorder/BaseRecorder.AudioInput.swift @@ -88,10 +88,8 @@ extension BaseRecorder.AudioInput: ARSessionObserver { @available(iOS 13.0, *) extension BaseRecorder.AudioInput { - - func audioEngine( - _ audioEngine: AudioEngine, - didOutputAudioSampleBuffer audioSampleBuffer: CMSampleBuffer + + func audioEngine(didOutputAudioSampleBuffer audioSampleBuffer: CMSampleBuffer ) { guard started, useAudioEngine else { return } queue.async { [output] in output?(audioSampleBuffer) } diff --git a/Sources/Recorder/CleanRecorder/CleanRecorder.swift b/Sources/Recorder/CleanRecorder/CleanRecorder.swift index 37b2490..fa9b726 100644 --- a/Sources/Recorder/CleanRecorder/CleanRecorder.swift +++ b/Sources/Recorder/CleanRecorder/CleanRecorder.swift @@ -33,7 +33,7 @@ public final class CleanRecorder: BaseRecorder, let videoInput: VideoInput init(_ cleanRecordable: T, timeScale: CMTimeScale = 600) { - let queue = DispatchQueue(label: "SCNRecorder.Processing.DispatchQueue", qos: .userInitiated) + let queue = DispatchQueue(label: "SCNRecorder.Processing.DispatchQueue", qos: .userInteractive) self.videoInput = VideoInput( cleanRecordable: cleanRecordable, diff --git a/Sources/Recorder/SceneRecorder/SceneRecorder.swift b/Sources/Recorder/SceneRecorder/SceneRecorder.swift index f2dd5b5..2f0788c 100644 --- a/Sources/Recorder/SceneRecorder/SceneRecorder.swift +++ b/Sources/Recorder/SceneRecorder/SceneRecorder.swift @@ -45,7 +45,7 @@ public final class SceneRecorder: BaseRecorder, Renderable, SCNSceneRendererDele ) throws { let queue = DispatchQueue( label: "SCNRecorder.Processing.DispatchQueue", - qos: .userInitiated + qos: .userInteractive ) try self.init( videoInput: VideoInput( diff --git a/Sources/VideoSettings.swift b/Sources/VideoSettings.swift index 2b7af92..8b2e0d0 100644 --- a/Sources/VideoSettings.swift +++ b/Sources/VideoSettings.swift @@ -48,6 +48,10 @@ public struct VideoSettings { /// Be carefull, the value is not always obvious. public var transform: CGAffineTransform? + + /// The metadata of the video + public var metadata : [AVMetadataItem]? + var videoColorProperties: [String: String]? public init( @@ -55,13 +59,15 @@ public struct VideoSettings { codec: Codec = .h264(), size: CGSize? = nil, scalingMode: ScalingMode = .resizeAspectFill, - transform: CGAffineTransform? = nil + transform: CGAffineTransform? = nil, + metadata: [AVMetadataItem]? = nil ) { self.fileType = fileType self.codec = codec self.size = size self.scalingMode = scalingMode self.transform = transform + self.metadata = metadata } }