Pick a voice from the available catalog, then stream synthesized audio together with character-level timing information for subtitles, captions, or lip-sync.
This example assumes using ElevenLabs; is in scope and apiKey contains your ElevenLabs API key.
usingvarclient=newElevenLabsClient(apiKey);// Choose a voice to synthesize with.varvoices=awaitclient.Voices.GetAllAsync();varvoice=voices.Voices[0];conststringtext="Hello, this has timestamps.";Console.WriteLine($"Using voice: {voice.Name} ({voice.VoiceId})");Console.WriteLine($"Input text: {text}");// Request streamed speech audio with timing metadata.StreamingAudioChunkWithTimestampsResponseModel?firstChunk=null;intchunkCount=0;awaitforeach(varchunkinclient.TextToSpeech2.StreamWithTimestampsAsync(voiceId:voice.VoiceId,text:text,modelId:"eleven_multilingual_v2",outputFormat:TextToSpeechStreamWithTimestampsOutputFormat.Mp32205032)){firstChunk??=chunk;chunkCount++;// Inspect the alignment information when it is present.if(chunkCount==1&&chunk.Alignmentis{}alignment){for(inti=0;i<alignment.Characters?.Count;i++){Console.WriteLine($"'{alignment.Characters[i]}' "+$"{alignment.CharacterStartTimesSeconds?[i]:F3}s - "+$"{alignment.CharacterEndTimesSeconds?[i]:F3}s");}}}