Created
October 24, 2017 18:59
-
-
Save jevgenits/847a1de55bb6873da556c545bd083540 to your computer and use it in GitHub Desktop.
texttospeechsynthesizerobjc
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| // HEADER FILE | |
| #import <Foundation/Foundation.h> | |
| #import <AVFoundation/AVFoundation.h> | |
| @interface TSTextToSpeechUtils : NSObject<AVSpeechSynthesizerDelegate> | |
| - (void)synthesizeSpeechForText:(NSString *)text; | |
| @end | |
| // IMPLEMENTATION FILE | |
| #import "TSTextToSpeechUtils.h" | |
| @interface TSTextToSpeechUtils () | |
| @property (strong, nonatomic) AVSpeechSynthesizer *synthesizer; | |
| @property (strong, nonatomic) AVSpeechUtterance *lastPlayingUtterance; | |
| @property (strong, nonatomic) AVAudioSession *audioSession; | |
| @end | |
| @implementation TSTextToSpeechUtils | |
| - (instancetype)init | |
| { | |
| if ((self = [super init])) { | |
| _synthesizer = [[AVSpeechSynthesizer alloc] init]; | |
| _synthesizer.delegate = self; | |
| } | |
| return self; | |
| } | |
| - (void)synthesizeSpeechForText:(NSString *)text | |
| { | |
| if ([text length] == 0) { | |
| return; | |
| } | |
| self.audioSession = [AVAudioSession sharedInstance]; | |
| NSError *error; | |
| // activate audioSession to play utterance | |
| [self.audioSession setCategory:AVAudioSessionCategoryPlayback withOptions:AVAudioSessionCategoryOptionDuckOthers error:&error]; | |
| [self.audioSession setActive:YES error:&error]; | |
| AVSpeechUtterance *utterance = [[AVSpeechUtterance alloc] initWithString:text]; | |
| utterance.rate = AVSpeechUtteranceDefaultSpeechRate; | |
| utterance.voice = [AVSpeechSynthesisVoice voiceWithLanguage:[self detectLanguageFromText:text]]; | |
| utterance.volume = 0.7; | |
| [self.synthesizer speakUtterance:utterance]; | |
| self.lastPlayingUtterance = utterance; | |
| } | |
| - (void)speechSynthesizer:(AVSpeechSynthesizer *)synthesizer didFinishSpeechUtterance:(AVSpeechUtterance *)utterance | |
| { | |
| if (synthesizer == self.synthesizer && self.lastPlayingUtterance == utterance) { | |
| NSError *error; | |
| // after last utterance has played - deactivate the audio session | |
| [self.audioSession setActive:NO error:&error]; | |
| } | |
| } | |
| - (NSString *)detectLanguageFromText:(NSString *)text | |
| { | |
| NSArray *tagSchemes = [NSArray arrayWithObjects:NSLinguisticTagSchemeLanguage, nil]; | |
| NSLinguisticTagger *tagger = [[NSLinguisticTagger alloc] initWithTagSchemes:tagSchemes options:0]; | |
| [tagger setString:text]; | |
| NSString *textLanguage = [tagger tagAtIndex:0 scheme:NSLinguisticTagSchemeLanguage tokenRange:nil sentenceRange:nil]; | |
| NSString *detectedLanguage = nil; | |
| // check if the text language exists within installed languages | |
| for (id installedLanguage in [AVSpeechSynthesisVoice speechVoices]) { | |
| NSArray *languageStringParts = [[installedLanguage language] componentsSeparatedByString:@"-"]; | |
| if (languageStringParts.count > 0 && [languageStringParts[0] isEqualToString:textLanguage]) { | |
| detectedLanguage = [installedLanguage language]; | |
| break; | |
| } | |
| } | |
| if (detectedLanguage == nil) { | |
| // if language could not be detected assign to default | |
| detectedLanguage = @"en-US"; | |
| } | |
| return detectedLanguage; | |
| } | |
| @end |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment