1,979c /* ----------------------------------------------------------- */ /* */ /* ___ */ /* |_| | |_/ SPEECH */ /* | | | | \ RECOGNITION */ /* ========= SOFTWARE */ /* */ /* */ /* ----------------------------------------------------------- */ /* developed at: */ /* */ /* Speech Vision and Robotics group */ /* Cambridge University Engineering Department */ /* http://svr-www.eng.cam.ac.uk/ */ /* */ /* Entropic Cambridge Research Laboratory */ /* (now part of Microsoft) */ /* */ /* ----------------------------------------------------------- */ /* Copyright: Microsoft Corporation */ /* 1995-2000 Redmond, Washington USA */ /* http://www.microsoft.com */ /* */ /* 2001 Cambridge University */ /* Engineering Department */ /* */ /* Use of this software is governed by a License Agreement */ /* ** See the file License for the Conditions of Use ** */ /* ** This banner notice must not be removed ** */ /* */ /* ----------------------------------------------------------- */ /* File: HVite.c: recognise or align file or audio */ /* ----------------------------------------------------------- */ char *hvite_version = "!HVER!HVite: 3.1 [CUED 16/01/02]"; char *hvite_vc_id = "$Id: HVite.c,v 1.10 2002/01/16 18:11:29 ge204 Exp $"; #include "HShell.h" #include "HMem.h" #include "HMath.h" #include "HSigP.h" #include "HAudio.h" #include "HWave.h" #include "HVQ.h" #include "HParm.h" #include "HLabel.h" #include "HModel.h" #include "HUtil.h" #include "HTrain.h" #include "HAdapt.h" #include "HFB.h" #include "HDict.h" #include "HNet.h" #include "HRec.h" /* -------------------------- Trace Flags & Vars ------------------------ */ #define T_TOP 00001 /* Basic progress reporting */ #define T_OBS 00002 /* list observations */ #define T_FRS 00004 /* Frame by frame best token */ #define T_MEM 00010 /* Memory usage, start and finish */ #define T_MMU 00020 /* Memory usage after each utterance */ static int trace = 0; Boolean traceHFB = FALSE; /* pass to HFB to retain top-level tracing */ /* -------------------------- Global Variables etc ---------------------- */ static int obsidx=0; /* Doing what */ static int nToks = 0; /* Number of tokens for N best */ static int nTrans = 1; /* Number of transcriptions for N best */ static Boolean states = FALSE; /* Keep track of state alignment */ static Boolean models = FALSE; /* Keep track of model alignment */ /* With what */ static char *datFN; /* Speech file */ static char *dictFn; /* Dictionary */ static char *wdNetFn = NULL; /* Word level lattice */ static char *hmmListFn; /* HMMs */ static char * hmmDir = NULL; /* directory to look for hmm def files */ static char * hmmExt = NULL; /* hmm def file extension */ static Boolean loadLabels = FALSE; /* Load network for each file */ static Boolean loadNetworks = FALSE; /* Load network for each file */ static LabId bndId = NULL; /* Boundary word for alignment */ /* Results and formats */ static char * labDir = NULL; /* output label file directory */ static char * labExt = "rec"; /* output label file extension */ static char * labForm = NULL; /* output label reformat */ static char * latForm = NULL; /* output lattice format */ static char * labInDir = NULL; /* input network/label file directory */ static char * labInExt = "lab"; /* input network/label file extension */ static char * latExt = NULL; /* output lattice file extension */ static FileFormat dfmt=UNDEFF; /* Data input file format */ static FileFormat ifmt=UNDEFF; /* Label input file format */ static FileFormat ofmt=UNDEFF; /* Label output file format */ static Boolean saveAudioOut=FALSE;/* Save rec output from direct audio */ static char * roPrefix=NULL; /* Prefix for direct audio output name */ static char * roSuffix=NULL; /* Suffix for direct audio output name */ static int roCounter = 0; /* Counter for audio output name */ static Boolean replay = FALSE; /* enable audio replay */ /* Language model */ static double lmScale = 1.0; /* bigram and log(1/NSucc) scale factor */ static LogDouble wordPen = 0.0; /* inter model propagation log prob */ static double prScale = 1.0; /* pronunciation scale factor */ /* Pruning */ static LogDouble genBeam = -LZERO;/* genBeam threshold */ static LogDouble genBeamInc = 0.0; /* increment */ static LogDouble genBeamLim = -LZERO; /* max value */ static LogDouble nBeam = 0.0; /* nBeam threshold */ static LogDouble wordBeam = -LZERO;/* word-end pruning threshold */ static LogFloat tmBeam = 10.0; /* tied mix prune threshold */ static int maxActive = 0; /* max active phone instances */ /* Global variables */ static Observation obs; /* current observation */ static HMMSet hset; /* the HMM set */ static Vocab vocab; /* the dictionary */ static Lattice *wdNet; /* the word level recognition network */ static PSetInfo *psi; /* Private data used by HRec */ static VRecInfo *vri; /* Visible HRec Info */ static int maxM = 0; /* max mixtures in any model */ static int maxMixInS[SMAX]; /* array[1..swidth[0]] of max mixes */ /* Global adaptation variables */ static char *transFile=NULL; /* MLLR transform file */ static char *outTransFile=NULL; /* output an MLLR transform file */ static RegTransInfo *rt; /* Regression classes transforms storage */ static int update = 0; /* Perfom MLLR & update every n utts */ static UttInfo *utt; /* utterance info for state/frame align */ static FBInfo *fbInfo; /* forward-backward info for alignment */ static PSetInfo *alignpsi; /* Private data used by HRec */ static VRecInfo *alignvri; /* Visible HRec Info */ static Boolean saveBinary=FALSE; /* Save tmf in binary format */ static char *uid=NULL; /* User id in tmf */ static char *uname=NULL; /* User name in tmf */ static char *chan=NULL; /* User channel in tmf */ static char *desc=NULL; /* User general description in tmf */ /* Heaps */ static MemHeap ansHeap; static MemHeap modelHeap; static MemHeap netHeap; static MemHeap bufHeap; static MemHeap repHeap; static MemHeap regHeap; /* ---------------- Configuration Parameters --------------------- */ static ConfParam *cParm[MAXGLOBS]; static int nParm = 0; /* total num params */ /* ---------------- Process Command Line ------------------------- */ /* SetConfParms: set conf parms relevant to this tool */ void SetConfParms(void) { int i; Boolean b; char buf[MAXSTRLEN]; nParm = GetConfig("HVITE", TRUE, cParm, MAXGLOBS); if (nParm>0){ if (GetConfInt(cParm,nParm,"TRACE",&i)) trace = i; if (GetConfStr(cParm,nParm,"RECOUTPREFIX",buf)) roPrefix=CopyString(&gstack,buf); if (GetConfStr(cParm,nParm,"RECOUTSUFFIX",buf)) roSuffix=CopyString(&gstack,buf); if (GetConfBool(cParm,nParm,"SAVEBINARY",&b)) saveBinary = b; } } void ReportUsage(void) { printf("\nUSAGE: HVite [options] VocabFile HMMList DataFiles...\n\n"); printf(" Option Default\n\n"); printf(" -a align from label files off\n"); printf(" -b s def s as utterance boundary word none\n"); printf(" -c f tied mixture pruning threshold 10.0\n"); printf(" -d s dir to find hmm definitions current\n"); printf(" -e save direct audio rec output off\n"); printf(" -f output full state alignment off\n"); printf(" -g enable audio replay off\n"); printf(" -i s Output transcriptions to MLF s off\n"); printf(" -j i Online MLLR adaptation off\n"); printf(" Perform update every i utterances \n"); printf(" -k s1 s2 Save s2 for field s1 in tmf defaults saved\n"); printf(" -l s dir to store label/lattice files current\n"); printf(" -m output model alignment off\n"); printf(" -n i [N] N-best recognition (using i tokens) off\n"); printf(" -o s output label formating NCSTWMX none\n"); printf(" -p f inter model trans penalty (log) 0.0\n"); printf(" -q s output lattice formating ABtvaldmn tvaldmn\n"); printf(" -r f pronunciation prob scale factor 1.0\n"); printf(" -s f grammar scale factor 1.0\n"); printf(" -t f [f f] set pruning threshold 0.0\n"); printf(" -u i set pruning max active 0\n"); printf(" -v f set word end pruning threshold 0.0\n"); printf(" -w [s] recognise from network off\n"); printf(" -x s extension for hmm files none\n"); printf(" -y s output label file extension rec\n"); printf(" -z s generate lattices with extension s off\n"); PrintStdOpts("BFGHIJKLPSX"); printf("\n\n"); } int main(int argc, char *argv[]) { char *s, *c; char fmt[MAXSTRLEN]; void Initialise(void); void DoRecognition(void); void DoAlignment(void); if(InitShell(argc,argv,hvite_version,hvite_vc_id)1) HError(3230,"HVite: Alignment using multiple tokens is not supported"); if (NumArgs()==0 && wdNetFn==NULL) HError(3230,"HVite: Network must be specified for recognition from audio"); if (loadNetworks && loadLabels) HError(3230,"HVite: Must choose either alignment from network or labels"); if (nToks>1 && latExt==NULL && nTrans==1) HError(-3230,"HVite: Performing nbest recognition with no nbest output"); Initialise(); /* Process the data */ if (wdNetFn==NULL) DoAlignment(); else DoRecognition(); /* Free up and we are done */ if (trace & T_MEM) { printf("Memory State on Completion\n"); PrintAllHeapStats(); } DeleteVRecInfo(vri); /*HRec*/ ResetHeap(&netHeap); FreePSetInfo(psi); /*HRec*/ if (outTransFile != NULL) SaveTransformSet(&hset, rt, outTransFile, NULL, uid, uname, /*HADAPT*/ chan, desc, FALSE, FALSE, saveBinary); ResetHeap(®Heap); ResetHeap(&modelHeap); Exit(0); return (0); /* never reached -- make compiler happy */ } /* --------------------------- Initialisation ----------------------- */ /* Initialise: set up global data structures */ void Initialise(void) { Boolean loadTransStats=FALSE; Boolean eSep; int s; /* Load hmms, convert to inverse DiagC */ if(MakeHMMSet(&hset,hmmListFn) 0) { CreateHeap(®Heap, "regClassStore", MSTAK, 1, 0.5, 1000, 8000 ); rt = (RegTransInfo *) New(®Heap, sizeof(RegTransInfo)); rt->nBlocks = 0; rt->classKind = DEF_REGCLASS; rt->adptSil = TRI_UNDEF; rt->nodeOccThresh = 0.0; /* if online adaptation then initailise some extra structures */ if (update > 0) { InitialiseTransform(&hset, ®Heap, rt, TRUE); /* initialise structures for the f-b frame-state alignment pass */ utt = (UttInfo *) New(®Heap, sizeof(UttInfo)); fbInfo = (FBInfo *) New(®Heap, sizeof(FBInfo)); /* initialise a recogniser for frame/state alignment purposes */ alignpsi=InitPSetInfo(&hset); alignvri=InitVRecInfo(alignpsi,1,TRUE,FALSE); SetPruningLevels(alignvri,0,genBeam,-LZERO,0.0,tmBeam); /* initialise core structures and memory for the adaptation process */ InitialiseAdapt(&hset, ®Heap, rt); InitUttInfo(utt, FALSE); InitialiseForBack(fbInfo, ®Heap, &hset, rt, (UPDSet) (UPADAPT|UPMIXES), genBeam*2.0, genBeam*2.0, genBeam*4.0+1.0, 10.0); utt->twoDataFiles = FALSE; utt->S = hset.swidth[0]; } else InitialiseTransform(&hset, ®Heap, rt, FALSE); /*//HAdapt*/ } if (transFile != NULL) { if (rt->rtree == NULL) HError(3232, "Main: Error loading the MLLR transforms!\n"); LoadTransformSet(&hset, transFile, uid, rt, &loadTransStats); /*//HAdapt*/ if (rt->transId->name != NULL && rt->transId->uid != NULL) printf("Loaded speaker transforms for %s (%s)\n", rt->transId->name, rt->transId->uid); ApplyTransforms(rt); /*//HAdapt*/ } /* Create observation and storage for input buffer */ SetStreamWidths(hset.pkind,hset.vecSize,hset.swidth,&eSep); /*//HParm*/ obs=MakeObservation(&gstack,hset.swidth,hset.pkind, /*//HParm*/ hset.hsKind==DISCRETEHS,eSep); CreateHeap(&bufHeap,"Input Buffer heap",MSTAK,1,0.0,50000,50000); CreateHeap(&repHeap,"Replay Buffer heap",MSTAK,1,0.0,50000,50000); maxM = MaxMixInSet(&hset); for (s=1; s<=hset.swidth[0]; s++) maxMixInS[s] = MaxMixInSetS(&hset, s); if (trace&T_TOP) { printf("Read %d physical / %d logical HMMs\n", hset.numPhyHMM,hset.numLogHMM); fflush(stdout); } /* Initialise recogniser */ if (nToks>1) nBeam=genBeam; psi=InitPSetInfo(&hset); /*//HRec*/ vri=InitVRecInfo(psi,nToks,models,states); /*//HRec*/ /* Read dictionary and create storage for lattice */ InitVocab(&vocab); /*//HDict*/ if(ReadDict(dictFn,&vocab) 0 ); CloseAudioOutput(ao); } } /* DoOnlineAdaptation: Perform unsupervised online adaptation using the recognition hypothesis as the transcription */ int DoOnlineAdaptation(Lattice *lat, ParmBuf pbuf, int nFrames) { Transcription *modelTrans, *trans; BufferInfo pbinfo; Lattice *alignLat, *wordNet; Network *alignNet; int i; GetBufferInfo(pbuf,&pbinfo); trans=TranscriptionFromLattice(&netHeap,lat,1); wordNet=LatticeFromLabels(GetLabelList(trans,1),bndId, &vocab,&netHeap); alignNet=ExpandWordNet(&netHeap,wordNet,&vocab,&hset); StartRecognition(alignvri,alignNet,0.0,0.0,0.0); /* do forced alignment */ for (i = 0; i < nFrames; i++) { ReadAsTable(pbuf, i, &obs); ProcessObservation(alignvri,&obs,-1); } alignLat=CompleteRecognition(alignvri, pbinfo.tgtSampRate/10000000.0, &netHeap); if (alignvri->noTokenSurvived) { Dispose(&netHeap, trans); /* Return value 0 to indicate zero frames process failed */ return 0; } modelTrans=TranscriptionFromLattice(&netHeap,alignLat,1); /* format the transcription so that it contains just the models */ FormatTranscription(modelTrans,pbinfo.tgtSampRate,FALSE,TRUE, FALSE,FALSE,TRUE,FALSE,TRUE,TRUE, FALSE); /* Now do the frame/state alignment accumulating MLLR statistics */ /* set the various values in the utterance storage */ utt->tr = modelTrans; utt->pbuf = pbuf; utt->Q = CountLabs(utt->tr->head); utt->T = nFrames; utt->ot = obs; /* do frame state alignment and accumulate statistics */ if (!FBFile(fbInfo, utt, NULL)) nFrames = 0; Dispose(&netHeap, trans); if (trace&T_TOP) { printf("Accumulated statistics...\n"); fflush(stdout); } return nFrames; } /* ProcessFile: process given file. If fn=NULL then direct audio */ Boolean ProcessFile(char *fn, Network *net, int utterNum, LogDouble currGenBeam, Boolean restartable) { FILE *file; ParmBuf pbuf; BufferInfo pbinfo; NetNode *d; Lattice *lat; LArc *arc,*cur; LNode *node; Transcription *trans; MLink m; LogFloat lmlk,aclk; int s,j,tact,nFrames; LatFormat form; char *p,lfn[255],buf1[80],buf2[80],thisFN[MAXSTRLEN]; Boolean enableOutput = TRUE, isPipe; if (fn!=NULL) strcpy(thisFN,fn); else if (fn==NULL && saveAudioOut) CounterFN(roPrefix,roSuffix,++roCounter,4,thisFN); /*//HShell*/ else enableOutput = FALSE; if((pbuf = OpenBuffer(&bufHeap,fn,50,dfmt,TRI_UNDEF,TRI_UNDEF))==NULL) HError(3250,"ProcessFile: Config parameters invalid"); /* Check pbuf same as hset */ GetBufferInfo(pbuf,&pbinfo); /*//HParm*/ if (pbinfo.tgtPK!=hset.pkind) HError(3231,"ProcessFile: Incompatible sample kind %s vs %s", ParmKind2Str(pbinfo.tgtPK,buf1), ParmKind2Str(hset.pkind,buf2)); if (pbinfo.a != NULL && replay) AttachReplayBuf(pbinfo.a, (int) (3*(1.0E+07/pbinfo.srcSampRate))); StartRecognition(vri,net,lmScale,wordPen,prScale); /*//HRec*/ SetPruningLevels(vri,maxActive,currGenBeam,wordBeam,nBeam,tmBeam); /*//HRec*/ tact=0;nFrames=0; StartBuffer(pbuf); /*//HParm, for live audio*/ while(BufferStatus(pbuf)!=PB_CLEARED) { ReadAsBuffer(pbuf,&obs); if (trace&T_OBS) PrintObservation(nFrames,&obs,13); if (hset.hsKind==DISCRETEHS){ for (s=1; s<=hset.swidth[0]; s++){ if( (obs.vq[s] < 1) || (obs.vq[s] > maxMixInS[s])) HError(3250,"ProcessFile: Discrete data value [ %d ] out of range in stream [ %d ] in file %s",obs.vq[s],s,fn); } } obsidx++; if(obsidx==42) printf(""); ProcessObservation(vri,&obs,-1); /*//HRec*/ if (trace & T_FRS) { for (d=vri->genMaxNode,j=0;j<30;d=d->links[0].node,j++) if (d->type==n_word) break; if (d->type==n_word){ if (d->info.pron==NULL) p=":bound:"; else p=d->info.pron->word->wordName->name; } else p=":external:"; m=FindMacroStruct(&hset,'h',vri->genMaxNode->info.hmm); /*//HModel*/ printf("Optimum @%-4d HMM: %s (%s) %d %5.3f\n", vri->frame,m->id->name,p, vri->nact,vri->genMaxTok.like/vri->frame); fflush(stdout); } nFrames++; tact+=vri->nact; } lat=CompleteRecognition(vri,pbinfo.tgtSampRate/10000000.0,&ansHeap); /*//HRec*/ if (lat==NULL) { if ((trace & T_TOP) && fn != NULL){ printf("No tokens survived to final node of network\n"); fflush(stdout); } else if (fn==NULL){ printf("Sorry [%d frames]?\n",nFrames);fflush(stdout); } if (pbinfo.a != NULL && replay) ReplayAudio(pbinfo); CloseBuffer(pbuf); return FALSE; } if (vri->noTokenSurvived && restartable) return FALSE; if (vri->noTokenSurvived && trace & T_TOP) { printf("No tokens survived to final node of network\n"); printf(" Output most likely partial hypothesis within network\n"); fflush(stdout); } lat->utterance=thisFN; lat->net=wdNetFn; lat->vocab=dictFn; if (trace & T_TOP || fn==NULL) { node=NULL; for (j=0;jnn;j++) { node=lat->lnodes+j; if (node->pred==NULL) break; node=NULL; } aclk=lmlk=0.0; while(node!=NULL) { for (arc=NULL,cur=node->foll;cur!=NULL;cur=cur->farc) arc=cur; if (arc==NULL) break; if (arc->end->word!=NULL) printf("%s ",arc->end->word->wordName->name); aclk+=arc->aclike+arc->prlike*lat->prscale; lmlk+=arc->lmlike*lat->lmscale+lat->wdpenalty; node=arc->end; } printf(" == [%d frames] %.4f [Ac=%.1f LM=%.1f] (Act=%.1f)\n",nFrames, (aclk+lmlk)/nFrames, aclk,lmlk,(float)tact/nFrames); fflush(stdout); } if (pbinfo.a != NULL && replay) ReplayAudio(pbinfo); /* accumulate stats for online unsupervised adaptation only if a token survived */ if ((lat != NULL) && (!vri->noTokenSurvived) && (update > 0)) DoOnlineAdaptation(lat, pbuf, nFrames); if (enableOutput){ if (nToks>1 && latExt!=NULL) { MakeFN(thisFN,labDir,latExt,lfn); if ((file=FOpen(lfn,NetOFilter,&isPipe))==NULL) HError(3211,"ProcessFile: Could not open file %s for lattice output",lfn); if (latForm==NULL) form=HLAT_DEFAULT; else { for (p=latForm,form=0;*p!=0;p++) { switch (*p) { case 'A': form|=HLAT_ALABS; break; case 'B': form|=HLAT_LBIN; break; case 't': form|=HLAT_TIMES; break; case 'v': form|=HLAT_PRON; break; case 'a': form|=HLAT_ACLIKE; break; case 'l': form|=HLAT_LMLIKE; break; case 'd': form|=HLAT_ALIGN; break; case 'm': form|=HLAT_ALDUR; break; case 'n': form|=HLAT_ALLIKE; break; case 'r': form|=HLAT_PRLIKE; break; } } } if(WriteLattice(lat,file,form)noTokenSurvived; } /* --------------------- Top Level Processing --------------------- */ /* DoAlignment: by creating network from transcriptions or lattices */ void DoAlignment(void) { FILE *nf; char lfn[255]; Transcription *trans; Network *net; Boolean isPipe; int n=0; LogDouble currGenBeam; if (trace&T_TOP) { if (loadNetworks) printf("New network will be used for each file\n"); else printf("Label file will be used to align each file\n"); fflush(stdout); } CreateHeap(&netHeap,"Net heap",MSTAK,1,0,8000,80000); while (NumArgs()>0) { if (NextArg() != STRINGARG) HError(3219,"DoAlignment: Data file name expected"); datFN = GetStrArg(); if (trace&T_TOP) { printf("Aligning File: %s\n",datFN); fflush(stdout); } MakeFN(datFN,labInDir,labInExt,lfn); if (loadNetworks) { if ( (nf = FOpen(lfn,NetFilter,&isPipe)) == NULL) HError(3210,"DoAlignment: Cannot open Word Net file %s",lfn); if((wdNet = ReadLattice(nf,&netHeap,&vocab,TRUE,FALSE))==NULL) HError(3210,"DoAlignment: ReadLattice failed"); FClose(nf,isPipe); if (trace&T_TOP) { printf("Read lattice with %d nodes / %d arcs\n", wdNet->nn,wdNet->na); fflush(stdout); } } else { trans=LOpen(&netHeap,lfn,ifmt); wdNet=LatticeFromLabels(GetLabelList(trans,1),bndId, &vocab,&netHeap); if (trace&T_TOP) { printf("Created lattice with %d nodes / %d arcs from label file\n", wdNet->nn,wdNet->na); fflush(stdout); } } net=ExpandWordNet(&netHeap,wdNet,&vocab,&hset); ++n; currGenBeam = genBeam; if (genBeamInc == 0.0) ProcessFile (datFN, net, n, currGenBeam, FALSE); else { Boolean completed; completed = ProcessFile (datFN, net, n, currGenBeam, TRUE); currGenBeam += genBeamInc; while (!completed && (currGenBeam <= genBeamLim - genBeamInc)) { completed = ProcessFile (datFN, net, n, currGenBeam, TRUE); currGenBeam += genBeamInc; } if (!completed) ProcessFile (datFN, net, n, currGenBeam, FALSE); } if (update > 0 && n%update == 0) { if (trace&T_TOP) { printf("Transforming model set\n"); fflush(stdout); } DoAdaptation(rt, FALSE); ClearRegCompStats(&hset, rt); } ResetHeap(&netHeap); } } /* DoRecognition: use single network to recognise each input utterance */ void DoRecognition(void) { FILE *nf; Network *net; Boolean isPipe; int n=0; if ( (nf = FOpen(wdNetFn,NetFilter,&isPipe)) == NULL) HError(3210,"DoRecognition: Cannot open Word Net file %s",wdNetFn); if((wdNet = ReadLattice(nf,&ansHeap,&vocab,TRUE,FALSE))==NULL) /*//HNet*/ HError(3210,"DoAlignment: ReadLattice failed"); FClose(nf,isPipe); if (trace&T_TOP) { printf("Read lattice with %d nodes / %d arcs\n",wdNet->nn,wdNet->na); fflush(stdout); } CreateHeap(&netHeap,"Net heap",MSTAK,1,0, wdNet->na*sizeof(NetLink),wdNet->na*sizeof(NetLink)); net = ExpandWordNet(&netHeap,wdNet,&vocab,&hset); /*//HNet*/ ResetHeap(&ansHeap); if (trace&T_TOP) { printf("Created network with %d nodes / %d links\n", net->numNode,net->numLink); fflush(stdout); } if (trace & T_MEM){ printf("Memory State Before Recognition\n"); PrintAllHeapStats(); } if (NumArgs()==0) { /* Process audio */ while(TRUE){ printf("\nREADY[%d]>\n",++n); fflush(stdout); ProcessFile(NULL,net,n,genBeam, FALSE); if (update > 0 && n%update == 0) { if (trace&T_TOP) { printf("Transforming model set\n"); fflush(stdout); } DoAdaptation(rt, FALSE); ClearRegCompStats(&hset, rt); } } } else { /* Process files */ while (NumArgs()>0) { if (NextArg()!=STRINGARG) HError(3219,"DoRecognition: Data file name expected"); datFN = GetStrArg(); if (trace&T_TOP) { printf("File: %s\n",datFN); fflush(stdout); } ProcessFile(datFN,net,n++,genBeam,FALSE); if (update > 0 && n%update == 0) { if (trace&T_TOP) { printf("Transforming model set\n"); fflush(stdout); } DoAdaptation(rt, FALSE); ClearRegCompStats(&hset, rt); } } } } /* ----------------------------------------------------------- */ /* END: HVite.c */ /* ----------------------------------------------------------- */ .