Home
last modified time | relevance | path

Searched refs:hUtt (Results 1 – 9 of 9) sorted by relevance

/external/srec/srec/ca/
Dutt_basi.c42 CA_Utterance *hUtt = NULL; in CA_AllocateUtterance() local
45 hUtt = (CA_Utterance *) CALLOC_CLR(1, sizeof(CA_Utterance), "ca.hUtt"); in CA_AllocateUtterance() local
46 hUtt->ca_rtti = CA_UTTERANCE_SIGNATURE; in CA_AllocateUtterance()
47 return (hUtt); in CA_AllocateUtterance()
50 END_CATCH_CA_EXCEPT(hUtt) in CA_AllocateUtterance()
54 void CA_FreeUtterance(CA_Utterance *hUtt) in CA_FreeUtterance() argument
57 ASSERT(hUtt); in CA_FreeUtterance()
59 FREE((char *) hUtt); in CA_FreeUtterance()
63 END_CATCH_CA_EXCEPT(hUtt) in CA_FreeUtterance()
67 int CA_InitUtteranceForFrontend(CA_Utterance *hUtt, in CA_InitUtteranceForFrontend() argument
[all …]
Dutt_data.c39 int CA_SeekStartOfUtterance(CA_Utterance *hUtt) in CA_SeekStartOfUtterance() argument
44 ASSERT(hUtt); in CA_SeekStartOfUtterance()
46 if (utterance_started(&hUtt->data)) in CA_SeekStartOfUtterance()
48 if ((gap = getBlockGap(hUtt->data.gen_utt.frame)) > 0) in CA_SeekStartOfUtterance()
49 (void) setRECframePtr(hUtt->data.gen_utt.frame, gap, 1); in CA_SeekStartOfUtterance()
50 if (hUtt->data.gen_utt.frame->holdOffPeriod > 0) in CA_SeekStartOfUtterance()
51 …setRECframePtr(hUtt->data.gen_utt.frame, -MIN(hUtt->data.gen_utt.frame->holdOffPeriod, getFrameGap… in CA_SeekStartOfUtterance()
52 while (!(rec_frame_voicing_status(hUtt->data.gen_utt.frame) & VOICE_BIT)) in CA_SeekStartOfUtterance()
54 incRECframePtr(hUtt->data.gen_utt.frame); in CA_SeekStartOfUtterance()
55 if (getFrameGap(hUtt->data.gen_utt.frame) == 0) in CA_SeekStartOfUtterance()
[all …]
Dutt_proc.c37 int CA_CalculateUtteranceStatistics(CA_Utterance *hUtt, int start, int end) in CA_CalculateUtteranceStatistics() argument
42 frames = get_background_statistics(hUtt->data.gen_utt.frame, in CA_CalculateUtteranceStatistics()
44 hUtt->data.gen_utt.backchan, in CA_CalculateUtteranceStatistics()
45 hUtt->data.gen_utt.num_chan, 1); in CA_CalculateUtteranceStatistics()
48 for (ii = 0; ii < hUtt->data.gen_utt.num_chan; ii++) in CA_CalculateUtteranceStatistics()
50 evaluate_parameters(hUtt->data.gen_utt.backchan[ii]); in CA_CalculateUtteranceStatistics()
57 END_CATCH_CA_EXCEPT(hUtt) in CA_CalculateUtteranceStatistics()
Dann_api.c310 int CA_SegmentUtterance(CA_Annotation* hAnnotation, CA_Utterance* hUtt, in CA_SegmentUtterance() argument
326 ASSERT(hUtt); in CA_SegmentUtterance()
336 if (hUtt->data.utt_type == FILE_OUTPUT) in CA_SegmentUtterance()
338 if (hUtt->data.utt_type != FILE_INPUT && in CA_SegmentUtterance()
339 hUtt->data.utt_type != LIVE_INPUT) in CA_SegmentUtterance()
343 if (isFrameBufferActive(hUtt->data.gen_utt.frame)) in CA_SegmentUtterance()
359 &hUtt->data, in CA_SegmentUtterance()
390 peakC0[ii] = get_c0_peak_over_range(hUtt->data.gen_utt.frame, in CA_SegmentUtterance()
551 CA_Utterance *hUtt, CA_Annotation *hAnnotation) in CA_AddUttSegmentsToAcousticWhole() argument
558 ASSERT(hUtt); in CA_AddUttSegmentsToAcousticWhole()
[all …]
Dpat_basi.c253 int CA_MakePatternFrame(CA_Pattern *hPattern, CA_Utterance *hUtt) in CA_MakePatternFrame() argument
261 ASSERT(hUtt); in CA_MakePatternFrame()
266 status_code = get_data_frame(hPattern->data.prep, &hUtt->data); in CA_MakePatternFrame()
271 swicms = hUtt->data.gen_utt.swicms; in CA_MakePatternFrame()
276 hUtt->data.gen_utt.channorm->dim); in CA_MakePatternFrame()
279 hUtt->data.gen_utt.channorm->dim); in CA_MakePatternFrame()
/external/srec/srec/cfront/
Dca_cms.c156 void CA_AttachCMStoUtterance(CA_Wave *hWave, CA_Utterance *hUtt) in CA_AttachCMStoUtterance() argument
163 ASSERT(hUtt); in CA_AttachCMStoUtterance()
171 hUtt->data.gen_utt.spchchan = hWave->data.channel->spchchan; in CA_AttachCMStoUtterance()
172 hUtt->data.gen_utt.channorm = hWave->data.channel->channorm; in CA_AttachCMStoUtterance()
173 hUtt->data.gen_utt.swicms = hWave->data.channel->swicms; in CA_AttachCMStoUtterance()
174 hUtt->data.gen_utt.do_channorm = True; in CA_AttachCMStoUtterance()
176 hUtt->data.gen_utt.num_chan = 3 * hWave->data.channel->mel_dim; in CA_AttachCMStoUtterance()
178 hUtt->data.gen_utt.num_chan = hWave->data.channel->mel_dim; in CA_AttachCMStoUtterance()
203 void CA_DetachCMSfromUtterance(CA_Wave *hWave, CA_Utterance *hUtt) in CA_DetachCMSfromUtterance() argument
210 if (hUtt && hUtt->data.gen_utt.do_channorm == False) in CA_DetachCMSfromUtterance()
[all …]
Dca_front.c124 int CA_MakeFrame(CA_Frontend *hFrontend, CA_Utterance *hUtt, CA_Wave *hWave) in CA_MakeFrame() argument
133 ASSERT(hUtt); in CA_MakeFrame()
136 ASSERT(hUtt->data.gen_utt.frame->uttDim <= MAX_CHAN_DIM); in CA_MakeFrame()
140 if (hUtt->data.utt_type != LIVE_INPUT) in CA_MakeFrame()
148 if (hUtt->data.gen_utt.frame->haveVoiced) in CA_MakeFrame()
168 if (pushSingleFEPframe(hUtt->data.gen_utt.frame, in CA_MakeFrame()
/external/srec/srec/include/
Dsimapi.h822 CA_Utterance *hUtt);
854 CA_Utterance *hUtt);
1040 void CA_FreeUtterance(CA_Utterance *hUtt);
1055 int CA_InitUtteranceForFrontend(CA_Utterance *hUtt,
1080 void CA_ClearUtterance(CA_Utterance *hUtt);
1096 CA_Utterance *hUtt);
1119 int CA_AdvanceUtteranceFrame(CA_Utterance *hUtt);
1132 int CA_UtteranceHasVoicing(CA_Utterance *hUtt);
1144 ESR_BOOL CA_IsUtteranceLockedForInput(CA_Utterance *hUtt);
1158 void CA_UnlockUtteranceForInput(CA_Utterance *hUtt);
[all …]
Dfrontapi.h175 CA_Utterance* hUtt,
320 CA_Utterance *hUtt);
356 CA_Utterance *hUtt);