Skip to content

Daspeak client docs

vericlient.daspeak.client.DaspeakClient

Bases: Client

Class to interact with the Daspeak API.

Source code in src/vericlient/daspeak/client.py
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
class DaspeakClient(Client):
    """Class to interact with the Daspeak API."""

    def __init__(
            self,
            apikey: str | None = None,
            timeout: int | None = None,
            environment: str | None = None,
            location: str | None = None,
            url: str | None = None,
            headers: dict | None = None,
    ) -> None:
        """Create the DaspeakClient class.

        Args:
            apikey: The API key to use
            timeout: The timeout to use in the requests
            environment: The environment to use
            location: The location to use
            url: The URL to use in case of a custom target
            headers: The headers to be used in the requests

        """
        api = APIs.DASPEAK.value
        super().__init__(
            api=api,
            apikey=apikey,
            timeout=timeout,
            environment=environment,
            location=location,
            url=url,
            headers=headers,
        )
        self._exceptions = [
            "AudioInputException",
            "SignalNoiseRatioException",
            "VoiceDurationIsNotEnoughException",
            "InvalidChannelException",
            "InsufficientQuality",
            "CalibrationNotAvailable",
            "ServerError",
            "InvalidCredential",
            "UnsupportedMediaType",
        ]
        self._compare_functions_map = {
            CompareCredential2AudioInput: self._compare_credential2audio,
            CompareAudio2AudioInput: self._compare_audio2audio,
            CompareCredential2CredentialInput: self._compare_credential2credential,
            CompareAudio2CredentialsInput: self._compare_audio2credentials,
            CompareCredential2CredentialsInput: self._compare_credential2credentials,
        }
        self._exception_map = {
            "SignalNoiseRatioException": SignalNoiseRatioError,
            "VoiceDurationIsNotEnoughException": self._handle_voice_duration_error,
            "InvalidChannelException": InvalidSpecifiedChannelError,
            "InsufficientQuality": InsufficientQualityError,
            "CalibrationNotAvailable": self._handle_calibration_error,
            "InvalidCredential": InvalidCredentialError,
            "UnsupportedMediaType": UnsupportedMediaTypeError,
        }
        self._audio_input_errors = {
            "more channels than": TooManyAudioChannelsError,
            "unsupported codec": UnsupportedAudioCodecError,
            "sample rate": UnsupportedSampleRateError,
            "duration is longer": AudioDurationTooLongError,
        }

    def alive(self) -> bool:
        """Check if the service is alive.

        Returns
            bool: True if the service is alive, False otherwise

        """
        response = self._get(endpoint=DaspeakEndpoints.ALIVE.value)
        accepted_status_code = 200
        return response.status_code == accepted_status_code

    def _handle_error_response(self, response: Response) -> None:
        """Handle error responses from the API."""
        response_json = response.json()

        exception = response_json.get("exception")
        if not exception or exception not in self._exceptions:
            self._raise_server_error(response)

        if exception == "AudioInputException":
            error_message = response_json.get("error", "")
            for error_text, error_class in self._audio_input_errors.items():
                if error_text in error_message:
                    raise error_class
            raise ValueError(error_message)

        handler = self._exception_map.get(exception)
        if handler:
            if isinstance(handler, type) and issubclass(handler, VeriClientError):
                raise handler
            handler(response_json)

        raise ValueError(response_json.get("error", "Unknown error"))

    def _handle_voice_duration_error(self, response_json: dict) -> None:
        error_message = response_json.get("error", "")
        net_speech_detected = float(error_message.split(" ")[-3].replace("s", ""))
        raise NetSpeechDurationIsNotEnoughError(net_speech_detected)

    def _handle_calibration_error(self, response_json: dict) -> None:
        error_message = response_json.get("error", "")
        calibration = str(error_message.split(" ")[2])
        raise CalibrationNotAvailableError(calibration)

    def get_models(self) -> ModelsOutput:
        """Get the models available biometrics models in the service.

        Returns:
            The response from the service

        """
        response = self._get(endpoint=DaspeakEndpoints.MODELS.value)
        return ModelsOutput(status_code=response.status_code, **response.json())

    def generate_credential(self, data_model: GenerateCredentialInput) -> GenerateCredentialOutput:
        """Generate a credential from a WAV file.

        Args:
            data_model: The data required to generate the credential

        Returns:
            The response from the service

        Raises:
            ValueError: If the `data_model` is not an instance of `GenerateCredentialInput`
            TooManyAudioChannelsError: If the audio has more channels than the service supports
            UnsupportedAudioCodecError: If the audio has an unsupported codec
            UnsupportedSampleRateError: If the audio has an unsupported sample rate
            AudioDurationTooLongError: If the audio duration is longer than the service supports
            SignalNoiseRatioError: If the signal-to-noise ratio is too low
            NetSpeechDurationIsNotEnoughError: If the net speech duration is not enough
            InvalidSpecifiedChannelError: If the specified channel is invalid
            InsufficientQualityError: If the audio quality is insufficient
            CalibrationNotAvailableError: If the calibration is not available
            UnsupportedMediaTypeError: If the media type is not supported

        """
        endpoint = DaspeakEndpoints.MODELS_HASH_CREDENTIAL_AUDIO.value.replace("<hash>", data_model.hash)
        audio = get_virtual_file(data_model.audio)
        files = {
            "audio": ("audio", audio, "audio/wav"),
        }
        data = {
            "channel": data_model.channel,
            "calibration": data_model.calibration,
        }
        response = self._post(endpoint=endpoint, data=data, files=files)
        return GenerateCredentialOutput(status_code=response.status_code, **response.json())

    def compare(    # noqa: D417
            self,
            data_model: CompareInput,
        ) -> CompareCredential2AudioOutput | CompareAudio2AudioOutput | \
             CompareCredential2CredentialOutput | CompareAudio2CredentialsOutput | \
             CompareCredential2CredentialsOutput:
        """Compare two sets of data based on the provided input.

        Args:
            data_model (CompareCredential2AudioInput | CompareAudio2AudioInput | CompareCredential2CredentialInput | \
                        CompareAudio2CredentialsInput | CompareCredential2CredentialsInput):
                The data required to compare the audio files or credentials

        Returns:
            The response from the service, depending on the input type.

        Raises:
            ValueError: If the `data_model` is not an instance of `CompareInput`
            TooManyAudioChannelsError: If the audio has more channels than the service supports
            UnsupportedAudioCodecError: If the audio has an unsupported codec
            UnsupportedSampleRateError: If the audio has an unsupported sample rate
            AudioDurationTooLongError: If the audio duration is longer than the service supports
            SignalNoiseRatioError: If the signal-to-noise ratio is too low
            NetSpeechDurationIsNotEnoughError: If the net speech duration is not enough
            InvalidSpecifiedChannelError: If the specified channel is invalid
            InsufficientQualityError: If the audio quality is insufficient
            CalibrationNotAvailableError: If the calibration is not available
            InvalidCredentialError: If the credential is invalid
            UnsupportedMediaTypeError: If the media type is not supported

        """
        try:
            func = self._compare_functions_map.get(type(data_model))
            return func(data_model)
        except AttributeError as e:
            error = "data_model must be an instance of CompareInput"
            raise TypeError(error) from e

    def _compare_credential2audio(
            self,
            data_model: CompareCredential2AudioInput,
        ) -> CompareCredential2AudioOutput:
        """Compare a credential with an audio file.

        Args:
            data_model: The data required to compare the credential with the audio

        Returns:
            CompareCredential2AudioOutput: The response from the service

        """
        endpoint = DaspeakEndpoints.SIMILARITY_CREDENTIAL2AUDIO.value
        audio = get_virtual_file(data_model.audio_to_evaluate)
        files = {
            "audio_to_evaluate": ("audio", audio, "audio/wav"),
        }
        data = {
            "credential_reference": data_model.credential_reference,
            "channel": data_model.channel,
            "calibration": data_model.calibration,
        }
        response = self._post(endpoint=endpoint, data=data, files=files)
        return CompareCredential2AudioOutput(status_code=response.status_code, **response.json())

    def _compare_audio2audio(self, data_model: CompareAudio2AudioInput) -> CompareAudio2AudioOutput:
        """Compare two audio files.

        Args:
            data_model: The data required to compare the audio files

        Returns:
            CompareAudio2AudioOutput: The response from the service

        """
        endpoint = DaspeakEndpoints.SIMILARITY_AUDIO2AUDIO.value
        audio_reference = get_virtual_file(data_model.audio_reference)
        audio_to_evaluate = get_virtual_file(data_model.audio_to_evaluate)
        files = {
            "audio_reference": ("audio", audio_reference, "audio/wav"),
            "audio_to_evaluate": ("audio", audio_to_evaluate, "audio/wav"),
        }
        data = {
            "channel_reference": data_model.channel_reference,
            "channel_to_evaluate": data_model.channel_to_evaluate,
            "calibration": data_model.calibration,
        }
        response = self._post(endpoint=endpoint, data=data, files=files)
        return CompareAudio2AudioOutput(status_code=response.status_code, **response.json())

    def _compare_credential2credential(
            self,
            data_model: CompareCredential2CredentialInput,
        ) -> CompareCredential2CredentialOutput:
        """Compare two credentials.

        Args:
            data_model: The data required to compare the credentials

        Returns:
            CompareCredential2CredentialOutput: The response from the service

        """
        endpoint = DaspeakEndpoints.SIMILARITY_CREDENTIAL2CREDENTIAL.value
        data = {
            "credential_reference": data_model.credential_reference,
            "credential_to_evaluate": data_model.credential_to_evaluate,
            "calibration": data_model.calibration,
        }
        response = self._post(endpoint=endpoint, data=data)
        return CompareCredential2CredentialOutput(status_code=response.status_code, **response.json())

    def _compare_audio2credentials(
        self,
        data_model: CompareAudio2CredentialsInput,
    ) -> CompareAudio2CredentialsOutput:
        """Compare an audio file with a list of credentials.

        Args:
            data_model: The data required to compare the audio file with the credentials

        Returns:
            CompareAudio2CredentialsOutput: The response from the service

        """
        endpoint = DaspeakEndpoints.IDENTIFICATION_AUDIO2CREDENTIALS.value
        audio = get_virtual_file(data_model.audio_reference)
        files = {
            "audio_reference": ("audio_reference", audio, "audio/wav"),
        }
        credential_list = json.dumps(data_model.credential_list)
        data = {
            "credential_list": credential_list,
            "channel": data_model.channel,
            "calibration": data_model.calibration,
        }
        response = self._post(endpoint=endpoint, data=data, files=files)
        return CompareAudio2CredentialsOutput(status_code=response.status_code, **response.json())

    def _compare_credential2credentials(
        self,
        data_model: CompareCredential2CredentialsInput,
    ) -> CompareCredential2CredentialsOutput:
        endpoint = DaspeakEndpoints.IDENTIFICATION_CREDENTIAL2CREDENTIALS.value
        credential_list = json.dumps(data_model.credential_list)
        data = {
            "credential_reference": data_model.credential_reference,
            "credential_list": credential_list,
            "calibration": data_model.calibration,
        }
        response = self._post(endpoint=endpoint, data=data)
        return CompareCredential2CredentialsOutput(status_code=response.status_code, **response.json())

__init__(apikey=None, timeout=None, environment=None, location=None, url=None, headers=None)

Create the DaspeakClient class.

Parameters:

Name Type Description Default
apikey str | None

The API key to use

None
timeout int | None

The timeout to use in the requests

None
environment str | None

The environment to use

None
location str | None

The location to use

None
url str | None

The URL to use in case of a custom target

None
headers dict | None

The headers to be used in the requests

None
Source code in src/vericlient/daspeak/client.py
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
def __init__(
        self,
        apikey: str | None = None,
        timeout: int | None = None,
        environment: str | None = None,
        location: str | None = None,
        url: str | None = None,
        headers: dict | None = None,
) -> None:
    """Create the DaspeakClient class.

    Args:
        apikey: The API key to use
        timeout: The timeout to use in the requests
        environment: The environment to use
        location: The location to use
        url: The URL to use in case of a custom target
        headers: The headers to be used in the requests

    """
    api = APIs.DASPEAK.value
    super().__init__(
        api=api,
        apikey=apikey,
        timeout=timeout,
        environment=environment,
        location=location,
        url=url,
        headers=headers,
    )
    self._exceptions = [
        "AudioInputException",
        "SignalNoiseRatioException",
        "VoiceDurationIsNotEnoughException",
        "InvalidChannelException",
        "InsufficientQuality",
        "CalibrationNotAvailable",
        "ServerError",
        "InvalidCredential",
        "UnsupportedMediaType",
    ]
    self._compare_functions_map = {
        CompareCredential2AudioInput: self._compare_credential2audio,
        CompareAudio2AudioInput: self._compare_audio2audio,
        CompareCredential2CredentialInput: self._compare_credential2credential,
        CompareAudio2CredentialsInput: self._compare_audio2credentials,
        CompareCredential2CredentialsInput: self._compare_credential2credentials,
    }
    self._exception_map = {
        "SignalNoiseRatioException": SignalNoiseRatioError,
        "VoiceDurationIsNotEnoughException": self._handle_voice_duration_error,
        "InvalidChannelException": InvalidSpecifiedChannelError,
        "InsufficientQuality": InsufficientQualityError,
        "CalibrationNotAvailable": self._handle_calibration_error,
        "InvalidCredential": InvalidCredentialError,
        "UnsupportedMediaType": UnsupportedMediaTypeError,
    }
    self._audio_input_errors = {
        "more channels than": TooManyAudioChannelsError,
        "unsupported codec": UnsupportedAudioCodecError,
        "sample rate": UnsupportedSampleRateError,
        "duration is longer": AudioDurationTooLongError,
    }

alive()

Check if the service is alive.

Returns bool: True if the service is alive, False otherwise

Source code in src/vericlient/daspeak/client.py
108
109
110
111
112
113
114
115
116
117
def alive(self) -> bool:
    """Check if the service is alive.

    Returns
        bool: True if the service is alive, False otherwise

    """
    response = self._get(endpoint=DaspeakEndpoints.ALIVE.value)
    accepted_status_code = 200
    return response.status_code == accepted_status_code

compare(data_model)

Compare two sets of data based on the provided input.

Parameters:

Name Type Description Default
data_model CompareCredential2AudioInput | CompareAudio2AudioInput | CompareCredential2CredentialInput | CompareAudio2CredentialsInput | CompareCredential2CredentialsInput

The data required to compare the audio files or credentials

required

Returns:

Type Description
CompareCredential2AudioOutput | CompareAudio2AudioOutput | CompareCredential2CredentialOutput | CompareAudio2CredentialsOutput | CompareCredential2CredentialsOutput

The response from the service, depending on the input type.

Raises:

Type Description
ValueError

If the data_model is not an instance of CompareInput

TooManyAudioChannelsError

If the audio has more channels than the service supports

UnsupportedAudioCodecError

If the audio has an unsupported codec

UnsupportedSampleRateError

If the audio has an unsupported sample rate

AudioDurationTooLongError

If the audio duration is longer than the service supports

SignalNoiseRatioError

If the signal-to-noise ratio is too low

NetSpeechDurationIsNotEnoughError

If the net speech duration is not enough

InvalidSpecifiedChannelError

If the specified channel is invalid

InsufficientQualityError

If the audio quality is insufficient

CalibrationNotAvailableError

If the calibration is not available

InvalidCredentialError

If the credential is invalid

UnsupportedMediaTypeError

If the media type is not supported

Source code in src/vericlient/daspeak/client.py
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
def compare(    # noqa: D417
        self,
        data_model: CompareInput,
    ) -> CompareCredential2AudioOutput | CompareAudio2AudioOutput | \
         CompareCredential2CredentialOutput | CompareAudio2CredentialsOutput | \
         CompareCredential2CredentialsOutput:
    """Compare two sets of data based on the provided input.

    Args:
        data_model (CompareCredential2AudioInput | CompareAudio2AudioInput | CompareCredential2CredentialInput | \
                    CompareAudio2CredentialsInput | CompareCredential2CredentialsInput):
            The data required to compare the audio files or credentials

    Returns:
        The response from the service, depending on the input type.

    Raises:
        ValueError: If the `data_model` is not an instance of `CompareInput`
        TooManyAudioChannelsError: If the audio has more channels than the service supports
        UnsupportedAudioCodecError: If the audio has an unsupported codec
        UnsupportedSampleRateError: If the audio has an unsupported sample rate
        AudioDurationTooLongError: If the audio duration is longer than the service supports
        SignalNoiseRatioError: If the signal-to-noise ratio is too low
        NetSpeechDurationIsNotEnoughError: If the net speech duration is not enough
        InvalidSpecifiedChannelError: If the specified channel is invalid
        InsufficientQualityError: If the audio quality is insufficient
        CalibrationNotAvailableError: If the calibration is not available
        InvalidCredentialError: If the credential is invalid
        UnsupportedMediaTypeError: If the media type is not supported

    """
    try:
        func = self._compare_functions_map.get(type(data_model))
        return func(data_model)
    except AttributeError as e:
        error = "data_model must be an instance of CompareInput"
        raise TypeError(error) from e

generate_credential(data_model)

Generate a credential from a WAV file.

Parameters:

Name Type Description Default
data_model GenerateCredentialInput

The data required to generate the credential

required

Returns:

Type Description
GenerateCredentialOutput

The response from the service

Raises:

Type Description
ValueError

If the data_model is not an instance of GenerateCredentialInput

TooManyAudioChannelsError

If the audio has more channels than the service supports

UnsupportedAudioCodecError

If the audio has an unsupported codec

UnsupportedSampleRateError

If the audio has an unsupported sample rate

AudioDurationTooLongError

If the audio duration is longer than the service supports

SignalNoiseRatioError

If the signal-to-noise ratio is too low

NetSpeechDurationIsNotEnoughError

If the net speech duration is not enough

InvalidSpecifiedChannelError

If the specified channel is invalid

InsufficientQualityError

If the audio quality is insufficient

CalibrationNotAvailableError

If the calibration is not available

UnsupportedMediaTypeError

If the media type is not supported

Source code in src/vericlient/daspeak/client.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
def generate_credential(self, data_model: GenerateCredentialInput) -> GenerateCredentialOutput:
    """Generate a credential from a WAV file.

    Args:
        data_model: The data required to generate the credential

    Returns:
        The response from the service

    Raises:
        ValueError: If the `data_model` is not an instance of `GenerateCredentialInput`
        TooManyAudioChannelsError: If the audio has more channels than the service supports
        UnsupportedAudioCodecError: If the audio has an unsupported codec
        UnsupportedSampleRateError: If the audio has an unsupported sample rate
        AudioDurationTooLongError: If the audio duration is longer than the service supports
        SignalNoiseRatioError: If the signal-to-noise ratio is too low
        NetSpeechDurationIsNotEnoughError: If the net speech duration is not enough
        InvalidSpecifiedChannelError: If the specified channel is invalid
        InsufficientQualityError: If the audio quality is insufficient
        CalibrationNotAvailableError: If the calibration is not available
        UnsupportedMediaTypeError: If the media type is not supported

    """
    endpoint = DaspeakEndpoints.MODELS_HASH_CREDENTIAL_AUDIO.value.replace("<hash>", data_model.hash)
    audio = get_virtual_file(data_model.audio)
    files = {
        "audio": ("audio", audio, "audio/wav"),
    }
    data = {
        "channel": data_model.channel,
        "calibration": data_model.calibration,
    }
    response = self._post(endpoint=endpoint, data=data, files=files)
    return GenerateCredentialOutput(status_code=response.status_code, **response.json())

get_models()

Get the models available biometrics models in the service.

Returns:

Type Description
ModelsOutput

The response from the service

Source code in src/vericlient/daspeak/client.py
152
153
154
155
156
157
158
159
160
def get_models(self) -> ModelsOutput:
    """Get the models available biometrics models in the service.

    Returns:
        The response from the service

    """
    response = self._get(endpoint=DaspeakEndpoints.MODELS.value)
    return ModelsOutput(status_code=response.status_code, **response.json())

vericlient.daspeak.models

Module to define the models for the Daspeak API.

CompareAudio2AudioInput

Bases: CompareInput

Input class for the similarity audio to audio endpoint.

Attributes:

Name Type Description
audio_reference str | bytes

The reference audio. It can be a path to a file or a bytes object with the audio content

audio_to_evaluate str | bytes

The audio to evaluate. It can be a path to a file or a bytes object with the audio content

channel_reference int

The nchannel of the reference audio if it is stereo

channel_to_evaluate int

The nchannel of the audio to evaluate if it is stereo

Source code in src/vericlient/daspeak/models.py
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
class CompareAudio2AudioInput(CompareInput):
    """Input class for the similarity audio to audio endpoint.

    Attributes:
        audio_reference: The reference audio.
            It can be a path to a file or a bytes object
            with the audio content
        audio_to_evaluate: The audio to evaluate.
            It can be a path to a file or a bytes object
            with the audio content
        channel_reference: The `nchannel` of the reference audio if it is stereo
        channel_to_evaluate: The `nchannel` of the audio to evaluate if it is stereo

    """

    audio_reference: str | bytes
    audio_to_evaluate: str | bytes
    channel_reference: int = 1
    channel_to_evaluate: int = 1

    @field_validator("audio_reference", "audio_to_evaluate")
    def audio_ref_must_be_str_or_bytes(cls, value: object):
        if not isinstance(value, (str, bytes)):
            error = "audio must be a string or a bytes object"
            raise TypeError(error)
        return value

    class Config:
        arbitrary_types_allowed = True

CompareAudio2AudioOutput

Bases: CompareOutput

Output class for the similarity audio to audio endpoint.

Attributes:

Name Type Description
model ModelMetadata

The model used to generate the credential

calibration str

The calibration used

authenticity_reference float

The authenticity of the reference audio sample

authenticity_to_evaluate float

The authenticity of the audio to evaluate

input_audio_duration_reference float

The duration of the reference audio

input_audio_duration_to_evaluate float

The duration of the audio to evaluate

net_speech_duration_reference float

The duration of the speech in the reference audio

net_speech_duration_to_evaluate float

The duration of the speech in the audio to evaluate

Source code in src/vericlient/daspeak/models.py
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
class CompareAudio2AudioOutput(CompareOutput):
    """Output class for the similarity audio to audio endpoint.

    Attributes:
        model: The model used to generate the credential
        calibration: The calibration used
        authenticity_reference: The authenticity of the reference audio sample
        authenticity_to_evaluate: The authenticity of the audio to evaluate
        input_audio_duration_reference: The duration of the reference audio
        input_audio_duration_to_evaluate: The duration of the audio to evaluate
        net_speech_duration_reference: The duration of the speech in the reference audio
        net_speech_duration_to_evaluate: The duration of the speech in the audio to evaluate

    """

    model: ModelMetadata
    calibration: str
    authenticity_reference: float
    authenticity_to_evaluate: float
    input_audio_duration_reference: float
    input_audio_duration_to_evaluate: float
    net_speech_duration_reference: float
    net_speech_duration_to_evaluate: float

    @field_validator("authenticity_reference", "authenticity_to_evaluate")
    def round_value(cls, value: float) -> float:
        return round(value, 3)

CompareAudio2CredentialsInput

Bases: CompareInput

Input class for the identification audio to credentials endpoint.

Attributes:

Name Type Description
audio_reference str | bytes

The audio to evaluate. It can be a path to a file or a bytes object with the audio content

credential_list list[tuple[str, str]]

The credentials to compare the audio with. The list contains touples with two strings: the id and the credential

channel int

The nchannel of the audio if it is stereo

Source code in src/vericlient/daspeak/models.py
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
class CompareAudio2CredentialsInput(CompareInput):
    """Input class for the identification audio to credentials endpoint.

    Attributes:
        audio_reference: The audio to evaluate.
            It can be a path to a file or a bytes object
            with the audio content
        credential_list: The credentials to compare the audio with.
            The list contains touples with two strings: the id and the credential
        channel: The `nchannel` of the audio if it is stereo

    """

    audio_reference: str | bytes
    credential_list: list[tuple[str, str]]
    channel: int = 1

    @field_validator("audio_reference")
    def must_be_str_or_bytes(cls, value: object):
        if not isinstance(value, (str, bytes)):
            error = "audio must be a string or a bytes object"
            raise TypeError(error)
        return value

    @field_validator("credential_list")
    def validate_and_build_list_format(cls, value: list):
        if not value:
            error = "credential_list must not be empty"
            raise ValueError(error)
        error = "credential_list must contain touples with two strings"
        n_items = 2
        for item in value:
            if not isinstance(item, tuple) or len(item) != n_items:
                raise ValueError(error)
            if not all(isinstance(i, str) for i in item):
                raise ValueError(error)
        return [{"id": item[0], "credential": item[1]} for item in value]

    class Config:
        arbitrary_types_allowed = True

CompareAudio2CredentialsOutput

Bases: DaspeakResponse

Output class for the identification audio to credentials endpoint.

Attributes:

Name Type Description
result dict

The result of the identification, a dictionary with the "id" and the "score" of the best match

scores list[dict]

The whole list of scores for each credential. The list contains dictionaries with two keys (and values): "id" and "score"

calibration str

The calibration used

model ModelMetadata

The model used to generate the credential

authenticity_reference float

The authenticity of the reference audio sample

input_audio_duration_reference float

The duration of the input audio

net_speech_duration_reference float

The duration of the speech in the audio

Source code in src/vericlient/daspeak/models.py
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
class CompareAudio2CredentialsOutput(DaspeakResponse):
    """Output class for the identification audio to credentials endpoint.

    Attributes:
        result: The result of the identification, a dictionary with the "id" and the "score"
            of the best match
        scores: The whole list of scores for each credential.
            The list contains dictionaries with two keys (and values): "id" and "score"
        calibration: The calibration used
        model: The model used to generate the credential
        authenticity_reference: The authenticity of the reference audio sample
        input_audio_duration_reference: The duration of the input audio
        net_speech_duration_reference: The duration of the speech in the audio

    """

    result: dict
    scores: list[dict]
    calibration: str
    model: ModelMetadata
    authenticity_reference: float
    input_audio_duration_reference: float
    net_speech_duration_reference: float

    @field_validator("authenticity_reference")
    def round_value(cls, value: float) -> float:
        return round(value, 3)

CompareCredential2AudioInput

Bases: CompareInput

Input class for the similarity credential to audio endpoint.

Attributes:

Name Type Description
credential_reference str

The reference credential

audio_to_evaluate str | bytes

The audio to evaluate. It can be a path to a file or a bytes object with the audio content

channel int

The nchannel of the audio if it is stereo

Source code in src/vericlient/daspeak/models.py
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
class CompareCredential2AudioInput(CompareInput):
    """Input class for the similarity credential to audio endpoint.

    Attributes:
        credential_reference: The reference credential
        audio_to_evaluate: The audio to evaluate.
            It can be a path to a file or a bytes object
            with the audio content
        channel: The `nchannel` of the audio if it is stereo

    """

    credential_reference: str
    audio_to_evaluate: str | bytes
    channel: int = 1

    @field_validator("audio_to_evaluate")
    def must_be_str_or_bytes(cls, value: object):
        if not isinstance(value, (str, bytes)):
            error = "audio must be a string or a bytes object"
            raise TypeError(error)
        return value

    class Config:
        arbitrary_types_allowed = True

CompareCredential2AudioOutput

Bases: CompareOutput

Output class for the similarity credential to audio endpoint.

Attributes:

Name Type Description
model ModelMetadata

The model used to generate the credential

calibration str

The calibration used

authenticity_to_evaluate float

The authenticity of the audio sample used

input_audio_duration float

The duration of the input audio

net_speech_duration float

The duration of the speech in the audio

Source code in src/vericlient/daspeak/models.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
class CompareCredential2AudioOutput(CompareOutput):
    """Output class for the similarity credential to audio endpoint.

    Attributes:
        model: The model used to generate the credential
        calibration: The calibration used
        authenticity_to_evaluate: The authenticity of the audio sample used
        input_audio_duration: The duration of the input audio
        net_speech_duration: The duration of the speech in the audio

    """

    model: ModelMetadata
    calibration: str
    authenticity_to_evaluate: float
    input_audio_duration_to_evaluate: float
    net_speech_duration_to_evaluate: float

    @field_validator("authenticity_to_evaluate")
    def round_value(cls, value: float) -> float:
        return round(value, 3)

CompareCredential2CredentialInput

Bases: CompareInput

Input class for the similarity credential to credential endpoint.

Attributes:

Name Type Description
credential_reference str

The reference credential

credential_to_evaluate str

The credential to evaluate

calibration str

The calibration to use

Source code in src/vericlient/daspeak/models.py
122
123
124
125
126
127
128
129
130
131
132
133
class CompareCredential2CredentialInput(CompareInput):
    """Input class for the similarity credential to credential endpoint.

    Attributes:
        credential_reference: The reference credential
        credential_to_evaluate: The credential to evaluate
        calibration: The calibration to use

    """

    credential_reference: str
    credential_to_evaluate: str

CompareCredential2CredentialOutput

Bases: CompareOutput

Output class for the similarity credential to credential endpoint.

Attributes:

Name Type Description
calibration str

The calibration used

Source code in src/vericlient/daspeak/models.py
136
137
138
139
140
141
142
143
144
class CompareCredential2CredentialOutput(CompareOutput):
    """Output class for the similarity credential to credential endpoint.

    Attributes:
        calibration: The calibration used

    """

    calibration: str

CompareCredential2CredentialsInput

Bases: CompareInput

Input class for the identification credential to credentials endpoint.

Attributes:

Name Type Description
credential_reference str

The reference credential

credential_list list[tuple[str, str]]

The credentials to compare the audio with. The list contains touples with two strings: the id and the credential

Source code in src/vericlient/daspeak/models.py
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
class CompareCredential2CredentialsInput(CompareInput):
    """Input class for the identification credential to credentials endpoint.

    Attributes:
        credential_reference: The reference credential
        credential_list: The credentials to compare the audio with.
            The list contains touples with two strings: the id and the credential

    """

    credential_reference: str
    credential_list: list[tuple[str, str]]

    @field_validator("credential_list")
    def validate_and_build_list_format(cls, value: list):
        if not value:
            error = "credential_list must not be empty"
            raise ValueError(error)
        error = "credential_list must contain touples with two strings"
        n_items = 2
        for item in value:
            if not isinstance(item, tuple) or len(item) != n_items:
                raise ValueError(error)
            if not all(isinstance(i, str) for i in item):
                raise ValueError(error)
        return [{"id": item[0], "credential": item[1]} for item in value]

CompareCredential2CredentialsOutput

Bases: DaspeakResponse

Output class for the identification credential to credentials endpoint.

Attributes:

Name Type Description
result dict

The result of the identification, a dictionary with the "id" and the "score" of the best match

scores list[dict]

The whole list of scores for each credential. The list contains dictionaries with two keys (and values): "id" and "score"

calibration str

The calibration used

model str

The model used to generate the credential

Source code in src/vericlient/daspeak/models.py
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
class CompareCredential2CredentialsOutput(DaspeakResponse):
    """Output class for the identification credential to credentials endpoint.

    Attributes:
        result: The result of the identification, a dictionary with the "id" and the "score"
            of the best match
        scores: The whole list of scores for each credential.
            The list contains dictionaries with two keys (and values): "id" and "score"
        calibration: The calibration used
        model: The model used to generate the credential

    """

    result: dict
    scores: list[dict]
    calibration: str

CompareInput

Bases: BaseModel

Base class for the similarity inputs.

Attributes:

Name Type Description
calibration str

The calibration to use

Source code in src/vericlient/daspeak/models.py
 96
 97
 98
 99
100
101
102
103
104
class CompareInput(BaseModel):
    """Base class for the similarity inputs.

    Attributes:
        calibration: The calibration to use

    """

    calibration: str = "telephone-channel"

CompareOutput

Bases: DaspeakResponse

Base class for the similarity outputs.

Attributes:

Name Type Description
score float

The similarity score between the two inputs

Source code in src/vericlient/daspeak/models.py
107
108
109
110
111
112
113
114
115
116
117
118
119
class CompareOutput(DaspeakResponse):
    """Base class for the similarity outputs.

    Attributes:
        score: The similarity score between the two inputs

    """

    score: float

    @field_validator("score")
    def round_value(cls, value: float) -> float:
        return round(value, 3)

DaspeakResponse

Bases: BaseModel

Base class for the Daspeak API responses.

Attributes:

Name Type Description
version str

The version of the API

status_code int

The status code of the response

Source code in src/vericlient/daspeak/models.py
 7
 8
 9
10
11
12
13
14
15
16
17
class DaspeakResponse(BaseModel):
    """Base class for the Daspeak API responses.

    Attributes:
        version: The version of the API
        status_code: The status code of the response

    """

    version: str
    status_code: int

GenerateCredentialInput

Bases: BaseModel

Input class for the generate credential endpoint.

Attributes:

Name Type Description
audio str | bytes

The audio to generate the credential with. It can be a path to a file or a bytes object with the audio content

hash str

The hash of the biometrics model to use

channel int

The nchannel of the audio if it is stereo

calibration str

The calibration to use

Source code in src/vericlient/daspeak/models.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
class GenerateCredentialInput(BaseModel):
    """Input class for the generate credential endpoint.

    Attributes:
        audio: The audio to generate the credential with.
            It can be a path to a file or a bytes object
            with the audio content
        hash: The hash of the biometrics model to use
        channel: The `nchannel` of the audio if it is stereo
        calibration: The calibration to use

    """

    audio: str | bytes
    hash: str
    channel: int = 1
    calibration: str = "telephone-channel"

    @field_validator("audio")
    def must_be_str_or_bytes(cls, value: object):
        if not isinstance(value, (str, bytes)):
            error = "audio must be a string or a bytes object"
            raise TypeError(error)
        return value

    class Config:
        arbitrary_types_allowed = True

GenerateCredentialOutput

Bases: DaspeakResponse

Output class for the generate credential endpoint.

Attributes:

Name Type Description
model ModelMetadata

The model used to generate the credential

credential str

The generated credential

authenticity float

The authenticity of the audio sample used

input_audio_duration float

The duration of the input audio

net_speech_duration float

The duration of the speech in the audio

Source code in src/vericlient/daspeak/models.py
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
class GenerateCredentialOutput(DaspeakResponse):
    """Output class for the generate credential endpoint.

    Attributes:
        model: The model used to generate the credential
        credential: The generated credential
        authenticity: The authenticity of the audio sample used
        input_audio_duration: The duration of the input audio
        net_speech_duration: The duration of the speech in the audio

    """

    model: ModelMetadata
    credential: str
    authenticity: float
    input_audio_duration: float
    net_speech_duration: float

    @field_validator("authenticity")
    def round_authenticity(cls, value: float) -> float:
        return round(value, 3)

ModelMetadata

Bases: BaseModel

Metadata of the model used to generate the credential.

Attributes:

Name Type Description
hash str

The hash of the model

mode str

The mode of the model

Source code in src/vericlient/daspeak/models.py
60
61
62
63
64
65
66
67
68
69
70
class ModelMetadata(BaseModel):
    """Metadata of the model used to generate the credential.

    Attributes:
        hash: The hash of the model
        mode: The mode of the model

    """

    hash: str
    mode: str

ModelsOutput

Bases: DaspeakResponse

Output class for the get models endpoint.

Attributes:

Name Type Description
models list

The available models in the service

Source code in src/vericlient/daspeak/models.py
20
21
22
23
24
25
26
27
28
class ModelsOutput(DaspeakResponse):
    """Output class for the get models endpoint.

    Attributes:
        models: The available models in the service

    """

    models: list

vericlient.daspeak.exceptions

Module to define the exceptions for the Daspeak API.

AudioDurationTooLongError

Bases: AudioInputError

Exception raised for audio duration too long.

Source code in src/vericlient/daspeak/exceptions.py
35
36
37
38
39
40
class AudioDurationTooLongError(AudioInputError):
    """Exception raised for audio duration too long."""

    def __init__(self) -> None:
        message = "The audio duration is too long, must be less than 30 seconds"
        super().__init__(message)

AudioInputError

Bases: DaspeakError

Exception raised for errors in the audio input.

Source code in src/vericlient/daspeak/exceptions.py
12
13
14
15
16
class AudioInputError(DaspeakError):
    """Exception raised for errors in the audio input."""

    def __init__(self, message: str) -> None:
        super().__init__(message)

CalibrationNotAvailableError

Bases: DaspeakError

Exception raised for calibration not available.

Source code in src/vericlient/daspeak/exceptions.py
86
87
88
89
90
91
class CalibrationNotAvailableError(DaspeakError):
    """Exception raised for calibration not available."""

    def __init__(self, calibration: str) -> None:
        message = f"The calibration {calibration} is not available"
        super().__init__(message)

DaspeakError

Bases: VeriClientError

Base class for exceptions in the Daspeak API.

Source code in src/vericlient/daspeak/exceptions.py
5
6
7
8
9
class DaspeakError(VeriClientError):
    """Base class for exceptions in the Daspeak API."""

    def __init__(self, message: str) -> None:
        super().__init__(message)

InsufficientQualityError

Bases: DaspeakError

Exception raised for insufficient quality.

Source code in src/vericlient/daspeak/exceptions.py
78
79
80
81
82
83
class InsufficientQualityError(DaspeakError):
    """Exception raised for insufficient quality."""

    def __init__(self) -> None:
        message = "The audio quality is insufficient or may contain more than one speaker"
        super().__init__(message)

InvalidSpecifiedChannelError

Bases: DaspeakError

Exception raised for invalid specified channel.

Source code in src/vericlient/daspeak/exceptions.py
70
71
72
73
74
75
class InvalidSpecifiedChannelError(DaspeakError):
    """Exception raised for invalid specified channel."""

    def __init__(self) -> None:
        message = "The specified channel is invalid, must be 1 or 2"
        super().__init__(message)

NetSpeechDurationIsNotEnoughError

Bases: DaspeakError

Exception raised for errors in the net speech duration.

Source code in src/vericlient/daspeak/exceptions.py
59
60
61
62
63
64
65
66
67
class NetSpeechDurationIsNotEnoughError(DaspeakError):
    """Exception raised for errors in the net speech duration."""

    def __init__(self, net_speech_detected: float) -> None:
        message = (
            f"You need at least 3 seconds of speech to perform the operation, "
            f"but only {net_speech_detected} seconds were detected"
        )
        super().__init__(message)

SignalNoiseRatioError

Bases: DaspeakError

Exception raised for errors in the signal noise ratio.

Source code in src/vericlient/daspeak/exceptions.py
51
52
53
54
55
56
class SignalNoiseRatioError(DaspeakError):
    """Exception raised for errors in the signal noise ratio."""

    def __init__(self) -> None:
        message = "Noise level of the audio exceeded"
        super().__init__(message)

TooManyAudioChannelsError

Bases: AudioInputError

Exception raised for too many audio channels.

Source code in src/vericlient/daspeak/exceptions.py
19
20
21
22
23
24
class TooManyAudioChannelsError(AudioInputError):
    """Exception raised for too many audio channels."""

    def __init__(self) -> None:
        message = "The maximum allowed number of audio channels is 2, and the audio provided has more channels"
        super().__init__(message)

UnsupportedAudioCodecError

Bases: AudioInputError

Exception raised for unsupported audio codec.

Source code in src/vericlient/daspeak/exceptions.py
43
44
45
46
47
48
class UnsupportedAudioCodecError(AudioInputError):
    """Exception raised for unsupported audio codec."""

    def __init__(self) -> None:
        message = "The audio codec is not supported. Supported codecs are: 'PCM_16', 'ULAW', 'ALAW'"
        super().__init__(message)

UnsupportedSampleRateError

Bases: AudioInputError

Exception raised for unsupported sample rates.

Source code in src/vericlient/daspeak/exceptions.py
27
28
29
30
31
32
class UnsupportedSampleRateError(AudioInputError):
    """Exception raised for unsupported sample rates."""

    def __init__(self) -> None:
        message = "The sample rate of the audio is not supported, must be 8 Khz or 16 Khz"
        super().__init__(message)