iOS 仿微信語音輸入動畫
阿新 • • 發佈:2018-12-18
這篇是接著上一篇文章, 完成一個隨著語音輸入大小的變化, 而變化的動畫.
// // PBSpeechRecognizer.h // ParkBest // // Created by summerxx27 on 2018/10/30. // Copyright © 2018年 summerxx27. All rights reserved. // #import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN @protocol PBSpeechRecognizerProtocol <NSObject> @optional - (void)recognitionSuccess:(NSString *)result; - (void)recognitionFail:(NSString *)result; - (void)level:(float)value; @end @interface PBSpeechRecognizer : NSObject @property(nonatomic,weak) id<PBSpeechRecognizerProtocol> delegate; - (void)startR; - (void)stopR; @end NS_ASSUME_NONNULL_END
// // PBSpeechRecognizer.m // ParkBest // // Created by summerxx27 on 2018/10/30. // Copyright © 2018年 summerxx27. All rights reserved. // #import "PBSpeechRecognizer.h" #import <Speech/Speech.h> API_AVAILABLE(ios(10.0)) @interface PBSpeechRecognizer() @property (nonatomic, strong) AVAudioEngine *audioEngine; @property (nonatomic, strong) SFSpeechRecognizer *speechRecognizer; @property (nonatomic, strong) SFSpeechAudioBufferRecognitionRequest *recognitionRequest; @property (nonatomic, strong) AVAudioRecorder *recorder; @property (nonatomic, strong) NSTimer *levelTimer; @end @implementation PBSpeechRecognizer - (void)startR { if (!self.speechRecognizer) { // 設定語言 NSLocale *locale = [NSLocale localeWithLocaleIdentifier:@"zh-CN"]; if (@available(iOS 10.0, *)) { self.speechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale]; } else { // Fallback on earlier versions } } if (!self.audioEngine) { self.audioEngine = [[AVAudioEngine alloc] init]; } AVAudioSession *audioSession = [AVAudioSession sharedInstance]; if (@available(iOS 10.0, *)) { [audioSession setCategory:AVAudioSessionCategoryRecord mode:AVAudioSessionModeMeasurement options:AVAudioSessionCategoryOptionDuckOthers error:nil]; } else { // Fallback on earlier versions } [audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:nil]; if (self.recognitionRequest) { [self.recognitionRequest endAudio]; self.recognitionRequest = nil; } if (@available(iOS 10.0, *)) { self.recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init]; } else { // Fallback on earlier versions } self.recognitionRequest.shouldReportPartialResults = YES; // 實時翻譯 if (@available(iOS 10.0, *)) { [self.speechRecognizer recognitionTaskWithRequest:self.recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) { if (result.isFinal) { NSLog(@"is final: %d result: %@", result.isFinal, result.bestTranscription.formattedString); if ([self.delegate respondsToSelector:@selector(recognitionSuccess:)]) { [self.delegate recognitionSuccess:result.bestTranscription.formattedString]; } }else { if ([self.delegate respondsToSelector:@selector(recognitionFail:)]) { // [self.delegate recognitionFail:error.domain]; } } }]; } else { // Fallback on earlier versions } AVAudioFormat *recordingFormat = [[self.audioEngine inputNode] outputFormatForBus:0]; [[self.audioEngine inputNode] installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) { [self.recognitionRequest appendAudioPCMBuffer:buffer]; }]; [self.audioEngine prepare]; [self.audioEngine startAndReturnError:nil]; /// 檢測聲音 [[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategoryPlayAndRecord error: nil]; /// 不需要保存錄音檔案 NSURL *url = [NSURL fileURLWithPath:@"/dev/null"]; NSDictionary *settings = [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithFloat: 44100.0], AVSampleRateKey, [NSNumber numberWithInt: kAudioFormatAppleLossless], AVFormatIDKey, [NSNumber numberWithInt: 2], AVNumberOfChannelsKey, [NSNumber numberWithInt: AVAudioQualityMax], AVEncoderAudioQualityKey, nil]; NSError *error; _recorder = [[AVAudioRecorder alloc] initWithURL:url settings:settings error:&error]; if (_recorder) { [_recorder prepareToRecord]; _recorder.meteringEnabled = YES; [_recorder record]; _levelTimer = [NSTimer scheduledTimerWithTimeInterval: 1 target: self selector: @selector(levelTimerCallback:) userInfo: nil repeats: YES]; } else { NSLog(@"%@", [error description]); } } /// 開始語音輸入後, 開啟一個定時器, 來檢測聲音的大小 - (void)levelTimerCallback:(NSTimer *)timer { [_recorder updateMeters]; float level; // The linear 0.0 .. 1.0 value we need. float minDecibels = -80.0f; // Or use -60dB, which I measured in a silent room. float decibels = [_recorder averagePowerForChannel:0]; if (decibels < minDecibels) { level = 0.0f; } else if (decibels >= 0.0f) { level = 1.0f; } else { float root = 2.0f; float minAmp = powf(10.0f, 0.05f * minDecibels); float inverseAmpRange = 1.0f / (1.0f - minAmp); float amp = powf(10.0f, 0.05f * decibels); float adjAmp = (amp - minAmp) * inverseAmpRange; level = powf(adjAmp, 1.0f / root); } /// level 範圍[0 ~ 1], 轉為[0 ~120] 之間 /// 通過這個delegate來回調到使用的類中 if ([self.delegate respondsToSelector:@selector(level:)]) { [self.delegate level:120 * level]; } } - (void)stopR { [_levelTimer invalidate]; [[self.audioEngine inputNode] removeTapOnBus:0]; [self.audioEngine stop]; [self.recognitionRequest endAudio]; self.recognitionRequest = nil; } @end
通過Value的值來動態切換圖片就可以了, 或者不使用圖片而自己繪製話筒旁邊的小橫線.
- (void)level:(float)value { if (0 < value && value < 10) { _voiceView.image = [UIImage imageNamed:@"v_1"]; }else if (value > 10 && value < 20) { _voiceView.image = [UIImage imageNamed:@"v_2"]; }else if (value > 20 && value < 25) { _voiceView.image = [UIImage imageNamed:@"v_3"]; }else if (value > 25 && value < 35) { _voiceView.image = [UIImage imageNamed:@"v_4"]; }else if (value > 35 && value < 45) { _voiceView.image = [UIImage imageNamed:@"v_5"]; }else if (value > 45 ) { _voiceView.image = [UIImage imageNamed:@"v_6"]; } }
這裡是長按方法
- (void)longPress:(UILongPressGestureRecognizer *)gestureRecognizer{
CGPoint point = [gestureRecognizer locationInView:self.view];
if(gestureRecognizer.state == UIGestureRecognizerStateBegan) {
[self startRecording];
} else if(gestureRecognizer.state == UIGestureRecognizerStateEnded) {
[self stopRecording];
} else if(gestureRecognizer.state == UIGestureRecognizerStateChanged) {
NSLog(@"y ========== %f", point.y);
/// 判斷y滑動到一定的值, 且取消語音的識別, 這裡可以通過邏輯簡單控制下
if (point.y < 513) {
_cancel = @"yes";
NSLog(@"voice cencel");
}
} else if (gestureRecognizer.state == UIGestureRecognizerStateFailed) {
} else if (gestureRecognizer.state == UIGestureRecognizerStateCancelled) {
}
}
當然這裡是一個簡單的模擬, 更多細節待完善, 看似簡單,實則不然. sad