1 /* 2 * asm/tbx.h 3 * 4 * Copyright (C) 2000-2012 Imagination Technologies. 5 * 6 * This program is free software; you can redistribute it and/or modify it under 7 * the terms of the GNU General Public License version 2 as published by the 8 * Free Software Foundation. 9 * 10 * Thread binary interface header 11 */ 12 13 #ifndef _ASM_METAG_TBX_H_ 14 #define _ASM_METAG_TBX_H_ 15 16 /* for CACHEW_* values */ 17 #include <asm/metag_isa.h> 18 /* for LINSYSEVENT_* addresses */ 19 #include <asm/metag_mem.h> 20 21 #ifdef TBI_1_4 22 #ifndef TBI_MUTEXES_1_4 23 #define TBI_MUTEXES_1_4 24 #endif 25 #ifndef TBI_SEMAPHORES_1_4 26 #define TBI_SEMAPHORES_1_4 27 #endif 28 #ifndef TBI_ASYNC_SWITCH_1_4 29 #define TBI_ASYNC_SWITCH_1_4 30 #endif 31 #ifndef TBI_FASTINT_1_4 32 #define TBI_FASTINT_1_4 33 #endif 34 #endif 35 36 37 /* Id values in the TBI system describe a segment using an arbitrary 38 integer value and flags in the bottom 8 bits, the SIGPOLL value is 39 used in cases where control over blocking or polling behaviour is 40 needed. */ 41 #define TBID_SIGPOLL_BIT 0x02 /* Set bit in an Id value to poll vs block */ 42 /* Extended segment identifiers use strings in the string table */ 43 #define TBID_IS_SEGSTR( Id ) (((Id) & (TBID_SEGTYPE_BITS>>1)) == 0) 44 45 /* Segment identifiers contain the following related bit-fields */ 46 #define TBID_SEGTYPE_BITS 0x0F /* One of the predefined segment types */ 47 #define TBID_SEGTYPE_S 0 48 #define TBID_SEGSCOPE_BITS 0x30 /* Indicates the scope of the segment */ 49 #define TBID_SEGSCOPE_S 4 50 #define TBID_SEGGADDR_BITS 0xC0 /* Indicates access possible via pGAddr */ 51 #define TBID_SEGGADDR_S 6 52 53 /* Segments of memory can only really contain a few types of data */ 54 #define TBID_SEGTYPE_TEXT 0x02 /* Code segment */ 55 #define TBID_SEGTYPE_DATA 0x04 /* Data segment */ 56 #define TBID_SEGTYPE_STACK 0x06 /* Stack segment */ 57 #define TBID_SEGTYPE_HEAP 0x0A /* Heap segment */ 58 #define TBID_SEGTYPE_ROOT 0x0C /* Root block segments */ 59 #define TBID_SEGTYPE_STRING 0x0E /* String table segment */ 60 61 /* Segments have one of three possible scopes */ 62 #define TBID_SEGSCOPE_INIT 0 /* Temporary area for initialisation phase */ 63 #define TBID_SEGSCOPE_LOCAL 1 /* Private to this thread */ 64 #define TBID_SEGSCOPE_GLOBAL 2 /* Shared globally throughout the system */ 65 #define TBID_SEGSCOPE_SHARED 3 /* Limited sharing between local/global */ 66 67 /* For segment specifier a further field in two of the remaining bits 68 indicates the usefulness of the pGAddr field in the segment descriptor 69 descriptor. */ 70 #define TBID_SEGGADDR_NULL 0 /* pGAddr is NULL -> SEGSCOPE_(LOCAL|INIT) */ 71 #define TBID_SEGGADDR_READ 1 /* Only read via pGAddr */ 72 #define TBID_SEGGADDR_WRITE 2 /* Full access via pGAddr */ 73 #define TBID_SEGGADDR_EXEC 3 /* Only execute via pGAddr */ 74 75 /* The following values are common to both segment and signal Id value and 76 live in the top 8 bits of the Id values. */ 77 78 /* The ISTAT bit indicates if segments are related to interrupt vs 79 background level interfaces a thread can still handle all triggers at 80 either level, but can also split these up if it wants to. */ 81 #define TBID_ISTAT_BIT 0x01000000 82 #define TBID_ISTAT_S 24 83 84 /* Privilege needed to access a segment is indicated by the next bit. 85 86 This bit is set to mirror the current privilege level when starting a 87 search for a segment - setting it yourself toggles the automatically 88 generated state which is only useful to emulate unprivileged behaviour 89 or access unprivileged areas of memory while at privileged level. */ 90 #define TBID_PSTAT_BIT 0x02000000 91 #define TBID_PSTAT_S 25 92 93 /* The top six bits of a signal/segment specifier identifies a thread within 94 the system. This represents a segments owner. */ 95 #define TBID_THREAD_BITS 0xFC000000 96 #define TBID_THREAD_S 26 97 98 /* Special thread id values */ 99 #define TBID_THREAD_NULL (-32) /* Never matches any thread/segment id used */ 100 #define TBID_THREAD_GLOBAL (-31) /* Things global to all threads */ 101 #define TBID_THREAD_HOST ( -1) /* Host interface */ 102 #define TBID_THREAD_EXTIO (TBID_THREAD_HOST) /* Host based ExtIO i/f */ 103 104 /* Virtual Id's are used for external thread interface structures or the 105 above special Id's */ 106 #define TBID_IS_VIRTTHREAD( Id ) ((Id) < 0) 107 108 /* Real Id's are used for actual hardware threads that are local */ 109 #define TBID_IS_REALTHREAD( Id ) ((Id) >= 0) 110 111 /* Generate a segment Id given Thread, Scope, and Type */ 112 #define TBID_SEG( Thread, Scope, Type ) (\ 113 ((Thread)<<TBID_THREAD_S) + ((Scope)<<TBID_SEGSCOPE_S) + (Type)) 114 115 /* Generate a signal Id given Thread and SigNum */ 116 #define TBID_SIG( Thread, SigNum ) (\ 117 ((Thread)<<TBID_THREAD_S) + ((SigNum)<<TBID_SIGNUM_S) + TBID_SIGNAL_BIT) 118 119 /* Generate an Id that solely represents a thread - useful for cache ops */ 120 #define TBID_THD( Thread ) ((Thread)<<TBID_THREAD_S) 121 #define TBID_THD_NULL ((TBID_THREAD_NULL) <<TBID_THREAD_S) 122 #define TBID_THD_GLOBAL ((TBID_THREAD_GLOBAL)<<TBID_THREAD_S) 123 124 /* Common exception handler (see TBID_SIGNUM_XXF below) receives hardware 125 generated fault codes TBIXXF_SIGNUM_xxF in it's SigNum parameter */ 126 #define TBIXXF_SIGNUM_IIF 0x01 /* General instruction fault */ 127 #define TBIXXF_SIGNUM_PGF 0x02 /* Privilege general fault */ 128 #define TBIXXF_SIGNUM_DHF 0x03 /* Data access watchpoint HIT */ 129 #define TBIXXF_SIGNUM_IGF 0x05 /* Code fetch general read failure */ 130 #define TBIXXF_SIGNUM_DGF 0x07 /* Data access general read/write fault */ 131 #define TBIXXF_SIGNUM_IPF 0x09 /* Code fetch page fault */ 132 #define TBIXXF_SIGNUM_DPF 0x0B /* Data access page fault */ 133 #define TBIXXF_SIGNUM_IHF 0x0D /* Instruction breakpoint HIT */ 134 #define TBIXXF_SIGNUM_DWF 0x0F /* Data access read-only fault */ 135 136 /* Hardware signals communicate events between processing levels within a 137 single thread all the _xxF cases are exceptions and are routed via a 138 common exception handler, _SWx are software trap events and kicks including 139 __TBISignal generated kicks, and finally _TRx are hardware triggers */ 140 #define TBID_SIGNUM_SW0 0x00 /* SWITCH GROUP 0 - Per thread user */ 141 #define TBID_SIGNUM_SW1 0x01 /* SWITCH GROUP 1 - Per thread system */ 142 #define TBID_SIGNUM_SW2 0x02 /* SWITCH GROUP 2 - Internal global request */ 143 #define TBID_SIGNUM_SW3 0x03 /* SWITCH GROUP 3 - External global request */ 144 #ifdef TBI_1_4 145 #define TBID_SIGNUM_FPE 0x04 /* Deferred exception - Any IEEE 754 exception */ 146 #define TBID_SIGNUM_FPD 0x05 /* Deferred exception - Denormal exception */ 147 /* Reserved 0x6 for a reserved deferred exception */ 148 #define TBID_SIGNUM_BUS 0x07 /* Deferred exception - Bus Error */ 149 /* Reserved 0x08-0x09 */ 150 #else 151 /* Reserved 0x04-0x09 */ 152 #endif 153 #define TBID_SIGNUM_SWS 0x0A /* KICK received with SigMask != 0 */ 154 #define TBID_SIGNUM_SWK 0x0B /* KICK received with SigMask == 0 */ 155 /* Reserved 0x0C-0x0F */ 156 #define TBID_SIGNUM_TRT 0x10 /* Timer trigger */ 157 #define TBID_SIGNUM_LWK 0x11 /* Low level kick (handler provided by TBI) */ 158 #define TBID_SIGNUM_XXF 0x12 /* Fault handler - receives ALL _xxF sigs */ 159 #ifdef TBI_1_4 160 #define TBID_SIGNUM_DFR 0x13 /* Deferred Exception handler */ 161 #else 162 #define TBID_SIGNUM_FPE 0x13 /* FPE Exception handler */ 163 #endif 164 /* External trigger one group 0x14 to 0x17 - per thread */ 165 #define TBID_SIGNUM_TR1(Thread) (0x14+(Thread)) 166 #define TBID_SIGNUM_T10 0x14 167 #define TBID_SIGNUM_T11 0x15 168 #define TBID_SIGNUM_T12 0x16 169 #define TBID_SIGNUM_T13 0x17 170 /* External trigger two group 0x18 to 0x1b - per thread */ 171 #define TBID_SIGNUM_TR2(Thread) (0x18+(Thread)) 172 #define TBID_SIGNUM_T20 0x18 173 #define TBID_SIGNUM_T21 0x19 174 #define TBID_SIGNUM_T22 0x1A 175 #define TBID_SIGNUM_T23 0x1B 176 #define TBID_SIGNUM_TR3 0x1C /* External trigger N-4 (global) */ 177 #define TBID_SIGNUM_TR4 0x1D /* External trigger N-3 (global) */ 178 #define TBID_SIGNUM_TR5 0x1E /* External trigger N-2 (global) */ 179 #define TBID_SIGNUM_TR6 0x1F /* External trigger N-1 (global) */ 180 #define TBID_SIGNUM_MAX 0x1F 181 182 /* Return the trigger register(TXMASK[I]/TXSTAT[I]) bits related to 183 each hardware signal, sometimes this is a many-to-one relationship. */ 184 #define TBI_TRIG_BIT(SigNum) (\ 185 ((SigNum) >= TBID_SIGNUM_TRT) ? 1<<((SigNum)-TBID_SIGNUM_TRT) :\ 186 ( ((SigNum) == TBID_SIGNUM_SWS) || \ 187 ((SigNum) == TBID_SIGNUM_SWK) ) ? \ 188 TXSTAT_KICK_BIT : TXSTATI_BGNDHALT_BIT ) 189 190 /* Return the hardware trigger vector number for entries in the 191 HWVEC0EXT table that will generate the required internal trigger. */ 192 #define TBI_TRIG_VEC(SigNum) (\ 193 ((SigNum) >= TBID_SIGNUM_T10) ? ((SigNum)-TBID_SIGNUM_TRT) : -1) 194 195 /* Default trigger masks for each thread at background/interrupt level */ 196 #define TBI_TRIGS_INIT( Thread ) (\ 197 TXSTAT_KICK_BIT + TBI_TRIG_BIT(TBID_SIGNUM_TR1(Thread)) ) 198 #define TBI_INTS_INIT( Thread ) (\ 199 TXSTAT_KICK_BIT + TXSTATI_BGNDHALT_BIT \ 200 + TBI_TRIG_BIT(TBID_SIGNUM_TR2(Thread)) ) 201 202 #ifndef __ASSEMBLY__ 203 /* A spin-lock location is a zero-initialised location in memory */ 204 typedef volatile int TBISPIN, *PTBISPIN; 205 206 /* A kick location is a hardware location you can write to 207 * in order to cause a kick 208 */ 209 typedef volatile int *PTBIKICK; 210 211 #if defined(METAC_1_0) || defined(METAC_1_1) 212 /* Macro to perform a kick */ 213 #define TBI_KICK( pKick ) do { pKick[0] = 1; } while (0) 214 #else 215 /* #define METAG_LIN_VALUES before including machine.h if required */ 216 #ifdef LINSYSEVENT_WR_COMBINE_FLUSH 217 /* Macro to perform a kick - write combiners must be flushed */ 218 #define TBI_KICK( pKick ) do {\ 219 volatile int *pFlush = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH; \ 220 pFlush[0] = 0; \ 221 pKick[0] = 1; } while (0) 222 #endif 223 #endif /* if defined(METAC_1_0) || defined(METAC_1_1) */ 224 #endif /* ifndef __ASSEMBLY__ */ 225 226 #ifndef __ASSEMBLY__ 227 /* 64-bit dual unit state value */ 228 typedef struct _tbidual_tag_ { 229 /* 32-bit value from a pair of registers in data or address units */ 230 int U0, U1; 231 } TBIDUAL, *PTBIDUAL; 232 #endif /* ifndef __ASSEMBLY__ */ 233 234 /* Byte offsets of fields within TBIDUAL */ 235 #define TBIDUAL_U0 (0) 236 #define TBIDUAL_U1 (4) 237 238 #define TBIDUAL_BYTES (8) 239 240 #define TBICTX_CRIT_BIT 0x0001 /* ASync state saved in TBICTX */ 241 #define TBICTX_SOFT_BIT 0x0002 /* Sync state saved in TBICTX (other bits 0) */ 242 #ifdef TBI_FASTINT_1_4 243 #define TBICTX_FINT_BIT 0x0004 /* Using Fast Interrupts */ 244 #endif 245 #define TBICTX_FPAC_BIT 0x0010 /* FPU state in TBICTX, FPU active on entry */ 246 #define TBICTX_XMCC_BIT 0x0020 /* Bit to identify a MECC task */ 247 #define TBICTX_CBUF_BIT 0x0040 /* Hardware catch buffer flag from TXSTATUS */ 248 #define TBICTX_CBRP_BIT 0x0080 /* Read pipeline dirty from TXDIVTIME */ 249 #define TBICTX_XDX8_BIT 0x0100 /* Saved DX.8 to DX.15 too */ 250 #define TBICTX_XAXX_BIT 0x0200 /* Save remaining AX registers to AX.7 */ 251 #define TBICTX_XHL2_BIT 0x0400 /* Saved hardware loop registers too */ 252 #define TBICTX_XTDP_BIT 0x0800 /* Saved DSP registers too */ 253 #define TBICTX_XEXT_BIT 0x1000 /* Set if TBICTX.Ext.Ctx contains extended 254 state save area, otherwise TBICTX.Ext.AX2 255 just holds normal A0.2 and A1.2 states */ 256 #define TBICTX_WAIT_BIT 0x2000 /* Causes wait for trigger - sticky toggle */ 257 #define TBICTX_XCBF_BIT 0x4000 /* Catch buffer or RD extracted into TBICTX */ 258 #define TBICTX_PRIV_BIT 0x8000 /* Set if system uses 'privileged' model */ 259 260 #ifdef METAC_1_0 261 #define TBICTX_XAX3_BIT 0x0200 /* Saved AX.5 to AX.7 for XAXX */ 262 #define TBICTX_AX_REGS 5 /* Ax.0 to Ax.4 are core GP regs on CHORUS */ 263 #else 264 #define TBICTX_XAX4_BIT 0x0200 /* Saved AX.4 to AX.7 for XAXX */ 265 #define TBICTX_AX_REGS 4 /* Default is Ax.0 to Ax.3 */ 266 #endif 267 268 #ifdef TBI_1_4 269 #define TBICTX_CFGFPU_FX16_BIT 0x00010000 /* Save FX.8 to FX.15 too */ 270 271 /* The METAC_CORE_ID_CONFIG field indicates omitted DSP resources */ 272 #define METAC_COREID_CFGXCTX_MASK( Value ) (\ 273 ( (((Value & METAC_COREID_CFGDSP_BITS)>> \ 274 METAC_COREID_CFGDSP_S ) == METAC_COREID_CFGDSP_MIN) ? \ 275 ~(TBICTX_XHL2_BIT+TBICTX_XTDP_BIT+ \ 276 TBICTX_XAXX_BIT+TBICTX_XDX8_BIT ) : ~0U ) ) 277 #endif 278 279 /* Extended context state provides a standardised method for registering the 280 arguments required by __TBICtxSave to save the additional register states 281 currently in use by non general purpose code. The state of the __TBIExtCtx 282 variable in the static space of the thread forms an extension of the base 283 context of the thread. 284 285 If ( __TBIExtCtx.Ctx.SaveMask == 0 ) then pExt is assumed to be NULL and 286 the empty state of __TBIExtCtx is represented by the fact that 287 TBICTX.SaveMask does not have the bit TBICTX_XEXT_BIT set. 288 289 If ( __TBIExtCtx.Ctx.SaveMask != 0 ) then pExt should point at a suitably 290 sized extended context save area (usually at the end of the stack space 291 allocated by the current routine). This space should allow for the 292 displaced state of A0.2 and A1.2 to be saved along with the other extended 293 states indicated via __TBIExtCtx.Ctx.SaveMask. */ 294 #ifndef __ASSEMBLY__ 295 typedef union _tbiextctx_tag_ { 296 long long Val; 297 TBIDUAL AX2; 298 struct _tbiextctxext_tag { 299 #ifdef TBI_1_4 300 short DspramSizes; /* DSPRAM sizes. Encoding varies between 301 TBICtxAlloc and the ECH scheme. */ 302 #else 303 short Reserved0; 304 #endif 305 short SaveMask; /* Flag bits for state saved */ 306 PTBIDUAL pExt; /* AX[2] state saved first plus Xxxx state */ 307 308 } Ctx; 309 310 } TBIEXTCTX, *PTBIEXTCTX; 311 312 /* Automatic registration of extended context save for __TBINestInts */ 313 extern TBIEXTCTX __TBIExtCtx; 314 #endif /* ifndef __ASSEMBLY__ */ 315 316 /* Byte offsets of fields within TBIEXTCTX */ 317 #define TBIEXTCTX_AX2 (0) 318 #define TBIEXTCTX_Ctx (0) 319 #define TBIEXTCTX_Ctx_SaveMask (TBIEXTCTX_Ctx + 2) 320 #define TBIEXTCTX_Ctx_pExt (TBIEXTCTX_Ctx + 2 + 2) 321 322 /* Extended context data size calculation constants */ 323 #define TBICTXEXT_BYTES (8) 324 #define TBICTXEXTBB8_BYTES (8*8) 325 #define TBICTXEXTAX3_BYTES (3*8) 326 #define TBICTXEXTAX4_BYTES (4*8) 327 #ifdef METAC_1_0 328 #define TBICTXEXTAXX_BYTES TBICTXEXTAX3_BYTES 329 #else 330 #define TBICTXEXTAXX_BYTES TBICTXEXTAX4_BYTES 331 #endif 332 #define TBICTXEXTHL2_BYTES (3*8) 333 #define TBICTXEXTTDR_BYTES (27*8) 334 #define TBICTXEXTTDP_BYTES TBICTXEXTTDR_BYTES 335 336 #ifdef TBI_1_4 337 #define TBICTXEXTFX8_BYTES (4*8) 338 #define TBICTXEXTFPAC_BYTES (1*4 + 2*2 + 4*8) 339 #define TBICTXEXTFACF_BYTES (3*8) 340 #endif 341 342 /* Maximum flag bits to be set via the TBICTX_EXTSET macro */ 343 #define TBICTXEXT_MAXBITS (TBICTX_XEXT_BIT| \ 344 TBICTX_XDX8_BIT|TBICTX_XAXX_BIT|\ 345 TBICTX_XHL2_BIT|TBICTX_XTDP_BIT ) 346 347 /* Maximum size of the extended context save area for current variant */ 348 #define TBICTXEXT_MAXBYTES (TBICTXEXT_BYTES+TBICTXEXTBB8_BYTES+\ 349 TBICTXEXTAXX_BYTES+TBICTXEXTHL2_BYTES+\ 350 TBICTXEXTTDP_BYTES ) 351 352 #ifdef TBI_FASTINT_1_4 353 /* Maximum flag bits to be set via the TBICTX_EXTSET macro */ 354 #define TBICTX2EXT_MAXBITS (TBICTX_XDX8_BIT|TBICTX_XAXX_BIT|\ 355 TBICTX_XHL2_BIT|TBICTX_XTDP_BIT ) 356 357 /* Maximum size of the extended context save area for current variant */ 358 #define TBICTX2EXT_MAXBYTES (TBICTXEXTBB8_BYTES+TBICTXEXTAXX_BYTES\ 359 +TBICTXEXTHL2_BYTES+TBICTXEXTTDP_BYTES ) 360 #endif 361 362 /* Specify extended resources being used by current routine, code must be 363 assembler generated to utilise extended resources- 364 365 MOV D0xxx,A0StP ; Perform alloca - routine should 366 ADD A0StP,A0StP,#SaveSize ; setup/use A0FrP to access locals 367 MOVT D1xxx,#SaveMask ; TBICTX_XEXT_BIT MUST be set 368 SETL [A1GbP+#OG(___TBIExtCtx)],D0xxx,D1xxx 369 370 NB: OG(___TBIExtCtx) is a special case supported for SETL/GETL operations 371 on 64-bit sizes structures only, other accesses must be based on use 372 of OGA(___TBIExtCtx). 373 374 At exit of routine- 375 376 MOV D0xxx,#0 ; Clear extended context save state 377 MOV D1xxx,#0 378 SETL [A1GbP+#OG(___TBIExtCtx)],D0xxx,D1xxx 379 SUB A0StP,A0StP,#SaveSize ; If original A0StP required 380 381 NB: Both the setting and clearing of the whole __TBIExtCtx MUST be done 382 atomically in one 64-bit write operation. 383 384 For simple interrupt handling only via __TBINestInts there should be no 385 impact of the __TBIExtCtx system. If pre-emptive scheduling is being 386 performed however (assuming __TBINestInts has already been called earlier 387 on) then the following logic will correctly call __TBICtxSave if required 388 and clear out the currently selected background task- 389 390 if ( __TBIExtCtx.Ctx.SaveMask & TBICTX_XEXT_BIT ) 391 { 392 / * Store extended states in pCtx * / 393 State.Sig.SaveMask |= __TBIExtCtx.Ctx.SaveMask; 394 395 (void) __TBICtxSave( State, (void *) __TBIExtCtx.Ctx.pExt ); 396 __TBIExtCtx.Val = 0; 397 } 398 399 and when restoring task states call __TBICtxRestore- 400 401 / * Restore state from pCtx * / 402 State.Sig.pCtx = pCtx; 403 State.Sig.SaveMask = pCtx->SaveMask; 404 405 if ( State.Sig.SaveMask & TBICTX_XEXT_BIT ) 406 { 407 / * Restore extended states from pCtx * / 408 __TBIExtCtx.Val = pCtx->Ext.Val; 409 410 (void) __TBICtxRestore( State, (void *) __TBIExtCtx.Ctx.pExt ); 411 } 412 413 */ 414 415 /* Critical thread state save area */ 416 #ifndef __ASSEMBLY__ 417 typedef struct _tbictx_tag_ { 418 /* TXSTATUS_FLAG_BITS and TXSTATUS_LSM_STEP_BITS from TXSTATUS */ 419 short Flags; 420 /* Mask indicates any extended context state saved; 0 -> Never run */ 421 short SaveMask; 422 /* Saved PC value */ 423 int CurrPC; 424 /* Saved critical register states */ 425 TBIDUAL DX[8]; 426 /* Background control register states - for cores without catch buffer 427 base in DIVTIME the TXSTATUS bits RPVALID and RPMASK are stored with 428 the real state TXDIVTIME in CurrDIVTIME */ 429 int CurrRPT, CurrBPOBITS, CurrMODE, CurrDIVTIME; 430 /* Saved AX register states */ 431 TBIDUAL AX[2]; 432 TBIEXTCTX Ext; 433 TBIDUAL AX3[TBICTX_AX_REGS-3]; 434 435 /* Any CBUF state to be restored by a handler return must be stored here. 436 Other extended state can be stored anywhere - see __TBICtxSave and 437 __TBICtxRestore. */ 438 439 } TBICTX, *PTBICTX; 440 441 #ifdef TBI_FASTINT_1_4 442 typedef struct _tbictx2_tag_ { 443 TBIDUAL AX[2]; /* AU.0, AU.1 */ 444 TBIDUAL DX[2]; /* DU.0, DU.4 */ 445 int CurrMODE; 446 int CurrRPT; 447 int CurrSTATUS; 448 void *CurrPC; /* PC in PC address space */ 449 } TBICTX2, *PTBICTX2; 450 /* TBICTX2 is followed by: 451 * TBICTXEXTCB0 if TXSTATUS.CBMarker 452 * TBIDUAL * TXSTATUS.IRPCount if TXSTATUS.IRPCount > 0 453 * TBICTXGP if using __TBIStdRootIntHandler or __TBIStdCtxSwitchRootIntHandler 454 */ 455 456 typedef struct _tbictxgp_tag_ { 457 short DspramSizes; 458 short SaveMask; 459 void *pExt; 460 TBIDUAL DX[6]; /* DU.1-DU.3, DU.5-DU.7 */ 461 TBIDUAL AX[2]; /* AU.2-AU.3 */ 462 } TBICTXGP, *PTBICTXGP; 463 464 #define TBICTXGP_DspramSizes (0) 465 #define TBICTXGP_SaveMask (TBICTXGP_DspramSizes + 2) 466 #define TBICTXGP_MAX_BYTES (2 + 2 + 4 + 8*(6+2)) 467 468 #endif 469 #endif /* ifndef __ASSEMBLY__ */ 470 471 /* Byte offsets of fields within TBICTX */ 472 #define TBICTX_Flags (0) 473 #define TBICTX_SaveMask (2) 474 #define TBICTX_CurrPC (4) 475 #define TBICTX_DX (2 + 2 + 4) 476 #define TBICTX_CurrRPT (2 + 2 + 4 + 8 * 8) 477 #define TBICTX_CurrMODE (2 + 2 + 4 + 8 * 8 + 4 + 4) 478 #define TBICTX_AX (2 + 2 + 4 + 8 * 8 + 4 + 4 + 4 + 4) 479 #define TBICTX_Ext (2 + 2 + 4 + 8 * 8 + 4 + 4 + 4 + 4 + 2 * 8) 480 #define TBICTX_Ext_AX2 (TBICTX_Ext + TBIEXTCTX_AX2) 481 #define TBICTX_Ext_AX2_U0 (TBICTX_Ext + TBIEXTCTX_AX2 + TBIDUAL_U0) 482 #define TBICTX_Ext_AX2_U1 (TBICTX_Ext + TBIEXTCTX_AX2 + TBIDUAL_U1) 483 #define TBICTX_Ext_Ctx_pExt (TBICTX_Ext + TBIEXTCTX_Ctx_pExt) 484 #define TBICTX_Ext_Ctx_SaveMask (TBICTX_Ext + TBIEXTCTX_Ctx_SaveMask) 485 486 #ifdef TBI_FASTINT_1_4 487 #define TBICTX2_BYTES (8 * 2 + 8 * 2 + 4 + 4 + 4 + 4) 488 #define TBICTXEXTCB0_BYTES (4 + 4 + 8) 489 490 #define TBICTX2_CRIT_MAX_BYTES (TBICTX2_BYTES + TBICTXEXTCB0_BYTES + 6 * TBIDUAL_BYTES) 491 #define TBI_SWITCH_NEXT_PC(PC, EXTRA) ((PC) + (EXTRA & 1) ? 8 : 4) 492 #endif 493 494 #ifndef __ASSEMBLY__ 495 /* Extended thread state save areas - catch buffer state element */ 496 typedef struct _tbictxextcb0_tag_ { 497 /* Flags data and address value - see METAC_CATCH_VALUES in machine.h */ 498 unsigned long CBFlags, CBAddr; 499 /* 64-bit data */ 500 TBIDUAL CBData; 501 502 } TBICTXEXTCB0, *PTBICTXEXTCB0; 503 504 /* Read pipeline state saved on later cores after single catch buffer slot */ 505 typedef struct _tbictxextrp6_tag_ { 506 /* RPMask is TXSTATUS_RPMASK_BITS only, reserved is undefined */ 507 unsigned long RPMask, Reserved0; 508 TBIDUAL CBData[6]; 509 510 } TBICTXEXTRP6, *PTBICTXEXTRP6; 511 512 /* Extended thread state save areas - 8 DU register pairs */ 513 typedef struct _tbictxextbb8_tag_ { 514 /* Remaining Data unit registers in 64-bit pairs */ 515 TBIDUAL UX[8]; 516 517 } TBICTXEXTBB8, *PTBICTXEXTBB8; 518 519 /* Extended thread state save areas - 3 AU register pairs */ 520 typedef struct _tbictxextbb3_tag_ { 521 /* Remaining Address unit registers in 64-bit pairs */ 522 TBIDUAL UX[3]; 523 524 } TBICTXEXTBB3, *PTBICTXEXTBB3; 525 526 /* Extended thread state save areas - 4 AU register pairs or 4 FX pairs */ 527 typedef struct _tbictxextbb4_tag_ { 528 /* Remaining Address unit or FPU registers in 64-bit pairs */ 529 TBIDUAL UX[4]; 530 531 } TBICTXEXTBB4, *PTBICTXEXTBB4; 532 533 /* Extended thread state save areas - Hardware loop states (max 2) */ 534 typedef struct _tbictxexthl2_tag_ { 535 /* Hardware looping register states */ 536 TBIDUAL Start, End, Count; 537 538 } TBICTXEXTHL2, *PTBICTXEXTHL2; 539 540 /* Extended thread state save areas - DSP register states */ 541 typedef struct _tbictxexttdp_tag_ { 542 /* DSP 32-bit accumulator register state (Bits 31:0 of ACX.0) */ 543 TBIDUAL Acc32[1]; 544 /* DSP > 32-bit accumulator bits 63:32 of ACX.0 (zero-extended) */ 545 TBIDUAL Acc64[1]; 546 /* Twiddle register state, and three phase increment states */ 547 TBIDUAL PReg[4]; 548 /* Modulo region size, padded to 64-bits */ 549 int CurrMRSIZE, Reserved0; 550 551 } TBICTXEXTTDP, *PTBICTXEXTTDP; 552 553 /* Extended thread state save areas - DSP register states including DSP RAM */ 554 typedef struct _tbictxexttdpr_tag_ { 555 /* DSP 32-bit accumulator register state (Bits 31:0 of ACX.0) */ 556 TBIDUAL Acc32[1]; 557 /* DSP 40-bit accumulator register state (Bits 39:8 of ACX.0) */ 558 TBIDUAL Acc40[1]; 559 /* DSP RAM Pointers */ 560 TBIDUAL RP0[2], WP0[2], RP1[2], WP1[2]; 561 /* DSP RAM Increments */ 562 TBIDUAL RPI0[2], WPI0[2], RPI1[2], WPI1[2]; 563 /* Template registers */ 564 unsigned long Tmplt[16]; 565 /* Modulo address region size and DSP RAM module region sizes */ 566 int CurrMRSIZE, CurrDRSIZE; 567 568 } TBICTXEXTTDPR, *PTBICTXEXTTDPR; 569 570 #ifdef TBI_1_4 571 /* The METAC_ID_CORE register state is a marker for the FPU 572 state that is then stored after this core header structure. */ 573 #define TBICTXEXTFPU_CONFIG_MASK ( (METAC_COREID_NOFPACC_BIT+ \ 574 METAC_COREID_CFGFPU_BITS ) << \ 575 METAC_COREID_CONFIG_BITS ) 576 577 /* Recorded FPU exception state from TXDEFR in DefrFpu */ 578 #define TBICTXEXTFPU_DEFRFPU_MASK (TXDEFR_FPU_FE_BITS) 579 580 /* Extended thread state save areas - FPU register states */ 581 typedef struct _tbictxextfpu_tag_ { 582 /* Stored METAC_CORE_ID CONFIG */ 583 int CfgFpu; 584 /* Stored deferred TXDEFR bits related to FPU 585 * 586 * This is encoded as follows in order to fit into 16-bits: 587 * DefrFPU:15 - 14 <= 0 588 * :13 - 8 <= TXDEFR:21-16 589 * : 7 - 6 <= 0 590 * : 5 - 0 <= TXDEFR:5-0 591 */ 592 short DefrFpu; 593 594 /* TXMODE bits related to FPU */ 595 short ModeFpu; 596 597 /* FPU Even/Odd register states */ 598 TBIDUAL FX[4]; 599 600 /* if CfgFpu & TBICTX_CFGFPU_FX16_BIT -> 1 then TBICTXEXTBB4 holds FX.8-15 */ 601 /* if CfgFpu & TBICTX_CFGFPU_NOACF_BIT -> 0 then TBICTXEXTFPACC holds state */ 602 } TBICTXEXTFPU, *PTBICTXEXTFPU; 603 604 /* Extended thread state save areas - FPU accumulator state */ 605 typedef struct _tbictxextfpacc_tag_ { 606 /* FPU accumulator register state - three 64-bit parts */ 607 TBIDUAL FAcc32[3]; 608 609 } TBICTXEXTFPACC, *PTBICTXEXTFPACC; 610 #endif 611 612 /* Prototype TBI structure */ 613 struct _tbi_tag_ ; 614 615 /* A 64-bit return value used commonly in the TBI APIs */ 616 typedef union _tbires_tag_ { 617 /* Save and load this value to get/set the whole result quickly */ 618 long long Val; 619 620 /* Parameter of a fnSigs or __TBICtx* call */ 621 struct _tbires_sig_tag_ { 622 /* TXMASK[I] bits zeroed upto and including current trigger level */ 623 unsigned short TrigMask; 624 /* Control bits for handlers - see PTBIAPIFN documentation below */ 625 unsigned short SaveMask; 626 /* Pointer to the base register context save area of the thread */ 627 PTBICTX pCtx; 628 } Sig; 629 630 /* Result of TBIThrdPrivId call */ 631 struct _tbires_thrdprivid_tag_ { 632 /* Basic thread identifier; just TBID_THREAD_BITS */ 633 int Id; 634 /* None thread number bits; TBID_ISTAT_BIT+TBID_PSTAT_BIT */ 635 int Priv; 636 } Thrd; 637 638 /* Parameter and Result of a __TBISwitch call */ 639 struct _tbires_switch_tag_ { 640 /* Parameter passed across context switch */ 641 void *pPara; 642 /* Thread context of other Thread includng restore flags */ 643 PTBICTX pCtx; 644 } Switch; 645 646 /* For extended S/W events only */ 647 struct _tbires_ccb_tag_ { 648 void *pCCB; 649 int COff; 650 } CCB; 651 652 struct _tbires_tlb_tag_ { 653 int Leaf; /* TLB Leaf data */ 654 int Flags; /* TLB Flags */ 655 } Tlb; 656 657 #ifdef TBI_FASTINT_1_4 658 struct _tbires_intr_tag_ { 659 short TrigMask; 660 short SaveMask; 661 PTBICTX2 pCtx; 662 } Intr; 663 #endif 664 665 } TBIRES, *PTBIRES; 666 #endif /* ifndef __ASSEMBLY__ */ 667 668 #ifndef __ASSEMBLY__ 669 /* Prototype for all signal handler functions, called via ___TBISyncTrigger or 670 ___TBIASyncTrigger. 671 672 State.Sig.TrigMask will indicate the bits set within TXMASKI at 673 the time of the handler call that have all been cleared to prevent 674 nested interrupt occuring immediately. 675 676 State.Sig.SaveMask is a bit-mask which will be set to Zero when a trigger 677 occurs at background level and TBICTX_CRIT_BIT and optionally 678 TBICTX_CBUF_BIT when a trigger occurs at interrupt level. 679 680 TBICTX_CBUF_BIT reflects the state of TXSTATUS_CBMARKER_BIT for 681 the interrupted background thread. 682 683 State.Sig.pCtx will point at a TBICTX structure generated to hold the 684 critical state of the interrupted thread at interrupt level and 685 should be set to NULL when called at background level. 686 687 Triggers will indicate the status of TXSTAT or TXSTATI sampled by the 688 code that called the handler. 689 690 InstOrSWSId is defined firstly as 'Inst' if the SigNum is TBID_SIGNUM_SWx 691 and hold the actual SWITCH instruction detected, secondly if SigNum 692 is TBID_SIGNUM_SWS the 'SWSId' is defined to hold the Id of the 693 software signal detected, in other cases the value of this 694 parameter is undefined. 695 696 pTBI points at the PTBI structure related to the thread and processing 697 level involved. 698 699 TBIRES return value at both processing levels is similar in terms of any 700 changes that the handler makes. By default the State argument value 701 passed in should be returned. 702 703 Sig.TrigMask value is bits to OR back into TXMASKI when the handler 704 completes to enable currently disabled interrupts. 705 706 Sig.SaveMask value is ignored. 707 708 Sig.pCtx is ignored. 709 710 */ 711 typedef TBIRES (*PTBIAPIFN)( TBIRES State, int SigNum, 712 int Triggers, int InstOrSWSId, 713 volatile struct _tbi_tag_ *pTBI ); 714 #endif /* ifndef __ASSEMBLY__ */ 715 716 #ifndef __ASSEMBLY__ 717 /* The global memory map is described by a list of segment descriptors */ 718 typedef volatile struct _tbiseg_tag_ { 719 volatile struct _tbiseg_tag_ *pLink; 720 int Id; /* Id of the segment */ 721 TBISPIN Lock; /* Spin-lock for struct (normally 0) */ 722 unsigned int Bytes; /* Size of region in bytes */ 723 void *pGAddr; /* Base addr of region in global space */ 724 void *pLAddr; /* Base addr of region in local space */ 725 int Data[2]; /* Segment specific data (may be extended) */ 726 727 } TBISEG, *PTBISEG; 728 #endif /* ifndef __ASSEMBLY__ */ 729 730 /* Offsets of fields in TBISEG structure */ 731 #define TBISEG_pLink ( 0) 732 #define TBISEG_Id ( 4) 733 #define TBISEG_Lock ( 8) 734 #define TBISEG_Bytes (12) 735 #define TBISEG_pGAddr (16) 736 #define TBISEG_pLAddr (20) 737 #define TBISEG_Data (24) 738 739 #ifndef __ASSEMBLY__ 740 typedef volatile struct _tbi_tag_ { 741 int SigMask; /* Bits set to represent S/W events */ 742 PTBIKICK pKick; /* Kick addr for S/W events */ 743 void *pCCB; /* Extended S/W events */ 744 PTBISEG pSeg; /* Related segment structure */ 745 PTBIAPIFN fnSigs[TBID_SIGNUM_MAX+1];/* Signal handler API table */ 746 } *PTBI, TBI; 747 #endif /* ifndef __ASSEMBLY__ */ 748 749 /* Byte offsets of fields within TBI */ 750 #define TBI_SigMask (0) 751 #define TBI_pKick (4) 752 #define TBI_pCCB (8) 753 #define TBI_pSeg (12) 754 #define TBI_fnSigs (16) 755 756 #ifdef TBI_1_4 757 #ifndef __ASSEMBLY__ 758 /* This handler should be used for TBID_SIGNUM_DFR */ 759 extern TBIRES __TBIHandleDFR ( TBIRES State, int SigNum, 760 int Triggers, int InstOrSWSId, 761 volatile struct _tbi_tag_ *pTBI ); 762 #endif 763 #endif 764 765 /* String table entry - special values */ 766 #define METAG_TBI_STRS (0x5300) /* Tag : If entry is valid */ 767 #define METAG_TBI_STRE (0x4500) /* Tag : If entry is end of table */ 768 #define METAG_TBI_STRG (0x4700) /* Tag : If entry is a gap */ 769 #define METAG_TBI_STRX (0x5A00) /* TransLen : If no translation present */ 770 771 #ifndef __ASSEMBLY__ 772 typedef volatile struct _tbistr_tag_ { 773 short Bytes; /* Length of entry in Bytes */ 774 short Tag; /* Normally METAG_TBI_STRS(0x5300) */ 775 short Len; /* Length of the string entry (incl null) */ 776 short TransLen; /* Normally METAG_TBI_STRX(0x5A00) */ 777 char String[8]; /* Zero terminated (may-be bigger) */ 778 779 } TBISTR, *PTBISTR; 780 #endif /* ifndef __ASSEMBLY__ */ 781 782 /* Cache size information - available as fields of Data[1] of global heap 783 segment */ 784 #define METAG_TBI_ICACHE_SIZE_S 0 /* see comments below */ 785 #define METAG_TBI_ICACHE_SIZE_BITS 0x0000000F 786 #define METAG_TBI_ICACHE_FILL_S 4 787 #define METAG_TBI_ICACHE_FILL_BITS 0x000000F0 788 #define METAG_TBI_DCACHE_SIZE_S 8 789 #define METAG_TBI_DCACHE_SIZE_BITS 0x00000F00 790 #define METAG_TBI_DCACHE_FILL_S 12 791 #define METAG_TBI_DCACHE_FILL_BITS 0x0000F000 792 793 /* METAG_TBI_xCACHE_SIZE 794 Describes the physical cache size rounded up to the next power of 2 795 relative to a 16K (2^14) cache. These sizes are encoded as a signed addend 796 to this base power of 2, for example 797 4K -> 2^12 -> -2 (i.e. 12-14) 798 8K -> 2^13 -> -1 799 16K -> 2^14 -> 0 800 32K -> 2^15 -> +1 801 64K -> 2^16 -> +2 802 128K -> 2^17 -> +3 803 804 METAG_TBI_xCACHE_FILL 805 Describes the physical cache size within the power of 2 area given by 806 the value above. For example a 10K cache may be represented as having 807 nearest size 16K with a fill of 10 sixteenths. This is encoded as the 808 number of unused 1/16ths, for example 809 0000 -> 0 -> 16/16 810 0001 -> 1 -> 15/16 811 0010 -> 2 -> 14/16 812 ... 813 1111 -> 15 -> 1/16 814 */ 815 816 #define METAG_TBI_CACHE_SIZE_BASE_LOG2 14 817 818 /* Each declaration made by this macro generates a TBISTR entry */ 819 #ifndef __ASSEMBLY__ 820 #define TBISTR_DECL( Name, Str ) \ 821 __attribute__ ((__section__ (".tbistr") )) const char Name[] = #Str 822 #endif 823 824 /* META timer values - see below for Timer support routines */ 825 #define TBI_TIMERWAIT_MIN (-16) /* Minimum 'recommended' period */ 826 #define TBI_TIMERWAIT_MAX (-0x7FFFFFFF) /* Maximum 'recommended' period */ 827 828 #ifndef __ASSEMBLY__ 829 /* These macros allow direct access from C to any register known to the 830 assembler or defined in machine.h. Example candidates are TXTACTCYC, 831 TXIDLECYC, and TXPRIVEXT. Note that when higher level macros and routines 832 like the timer and trigger handling features below these should be used in 833 preference to this direct low-level access mechanism. */ 834 #define TBI_GETREG( Reg ) __extension__ ({\ 835 int __GRValue; \ 836 __asm__ volatile ("MOV\t%0," #Reg "\t/* (*TBI_GETREG OK) */" : \ 837 "=r" (__GRValue) ); \ 838 __GRValue; }) 839 840 #define TBI_SETREG( Reg, Value ) do {\ 841 int __SRValue = Value; \ 842 __asm__ volatile ("MOV\t" #Reg ",%0\t/* (*TBI_SETREG OK) */" : \ 843 : "r" (__SRValue) ); } while (0) 844 845 #define TBI_SWAPREG( Reg, Value ) do {\ 846 int __XRValue = (Value); \ 847 __asm__ volatile ("SWAP\t" #Reg ",%0\t/* (*TBI_SWAPREG OK) */" : \ 848 "=r" (__XRValue) : "0" (__XRValue) ); \ 849 Value = __XRValue; } while (0) 850 851 /* Obtain and/or release global critical section lock given that interrupts 852 are already disabled and/or should remain disabled. */ 853 #define TBI_NOINTSCRITON do {\ 854 __asm__ volatile ("LOCK1\t\t/* (*TBI_NOINTSCRITON OK) */");} while (0) 855 #define TBI_NOINTSCRITOFF do {\ 856 __asm__ volatile ("LOCK0\t\t/* (*TBI_NOINTSCRITOFF OK) */");} while (0) 857 /* Optimised in-lining versions of the above macros */ 858 859 #define TBI_LOCK( TrigState ) do {\ 860 int __TRValue; \ 861 int __ALOCKHI = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \ 862 __asm__ volatile ("MOV %0,#0\t\t/* (*TBI_LOCK ... */\n\t" \ 863 "SWAP\t%0,TXMASKI\t/* ... */\n\t" \ 864 "LOCK2\t\t/* ... */\n\t" \ 865 "SETD\t[%1+#0x40],D1RtP /* ... OK) */" : \ 866 "=r&" (__TRValue) : "u" (__ALOCKHI) ); \ 867 TrigState = __TRValue; } while (0) 868 #define TBI_CRITON( TrigState ) do {\ 869 int __TRValue; \ 870 __asm__ volatile ("MOV %0,#0\t\t/* (*TBI_CRITON ... */\n\t" \ 871 "SWAP\t%0,TXMASKI\t/* ... */\n\t" \ 872 "LOCK1\t\t/* ... OK) */" : \ 873 "=r" (__TRValue) ); \ 874 TrigState = __TRValue; } while (0) 875 876 #define TBI_INTSX( TrigState ) do {\ 877 int __TRValue = TrigState; \ 878 __asm__ volatile ("SWAP\t%0,TXMASKI\t/* (*TBI_INTSX OK) */" : \ 879 "=r" (__TRValue) : "0" (__TRValue) ); \ 880 TrigState = __TRValue; } while (0) 881 882 #define TBI_UNLOCK( TrigState ) do {\ 883 int __TRValue = TrigState; \ 884 int __ALOCKHI = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \ 885 __asm__ volatile ("SETD\t[%1+#0x00],D1RtP\t/* (*TBI_UNLOCK ... */\n\t" \ 886 "LOCK0\t\t/* ... */\n\t" \ 887 "MOV\tTXMASKI,%0\t/* ... OK) */" : \ 888 : "r" (__TRValue), "u" (__ALOCKHI) ); } while (0) 889 890 #define TBI_CRITOFF( TrigState ) do {\ 891 int __TRValue = TrigState; \ 892 __asm__ volatile ("LOCK0\t\t/* (*TBI_CRITOFF ... */\n\t" \ 893 "MOV\tTXMASKI,%0\t/* ... OK) */" : \ 894 : "r" (__TRValue) ); } while (0) 895 896 #define TBI_TRIGSX( SrcDst ) do { TBI_SWAPREG( TXMASK, SrcDst );} while (0) 897 898 /* Composite macros to perform logic ops on INTS or TRIGS masks */ 899 #define TBI_INTSOR( Bits ) do {\ 900 int __TT = 0; TBI_INTSX(__TT); \ 901 __TT |= (Bits); TBI_INTSX(__TT); } while (0) 902 903 #define TBI_INTSAND( Bits ) do {\ 904 int __TT = 0; TBI_INTSX(__TT); \ 905 __TT &= (Bits); TBI_INTSX(__TT); } while (0) 906 907 #ifdef TBI_1_4 908 #define TBI_DEFRICTRLSOR( Bits ) do {\ 909 int __TT = TBI_GETREG( CT.20 ); \ 910 __TT |= (Bits); TBI_SETREG( CT.20, __TT); } while (0) 911 912 #define TBI_DEFRICTRLSAND( Bits ) do {\ 913 int __TT = TBI_GETREG( TXDEFR ); \ 914 __TT &= (Bits); TBI_SETREG( CT.20, __TT); } while (0) 915 #endif 916 917 #define TBI_TRIGSOR( Bits ) do {\ 918 int __TT = TBI_GETREG( TXMASK ); \ 919 __TT |= (Bits); TBI_SETREG( TXMASK, __TT); } while (0) 920 921 #define TBI_TRIGSAND( Bits ) do {\ 922 int __TT = TBI_GETREG( TXMASK ); \ 923 __TT &= (Bits); TBI_SETREG( TXMASK, __TT); } while (0) 924 925 /* Macros to disable and re-enable interrupts using TBI_INTSX, deliberate 926 traps and exceptions can still be handled within the critical section. */ 927 #define TBI_STOPINTS( Value ) do {\ 928 int __TT = TBI_GETREG( TXMASKI ); \ 929 __TT &= TXSTATI_BGNDHALT_BIT; TBI_INTSX( __TT ); \ 930 Value = __TT; } while (0) 931 #define TBI_RESTINTS( Value ) do {\ 932 int __TT = Value; TBI_INTSX( __TT ); } while (0) 933 934 /* Return pointer to segment list at current privilege level */ 935 PTBISEG __TBISegList( void ); 936 937 /* Search the segment list for a match given Id, pStart can be NULL */ 938 PTBISEG __TBIFindSeg( PTBISEG pStart, int Id ); 939 940 /* Prepare a new segment structure using space from within another */ 941 PTBISEG __TBINewSeg( PTBISEG pFromSeg, int Id, unsigned int Bytes ); 942 943 /* Prepare a new segment using any global or local heap segments available */ 944 PTBISEG __TBIMakeNewSeg( int Id, unsigned int Bytes ); 945 946 /* Insert a new segment into the segment list so __TBIFindSeg can locate it */ 947 void __TBIAddSeg( PTBISEG pSeg ); 948 #define __TBIADDSEG_DEF /* Some versions failed to define this */ 949 950 /* Return Id of current thread; TBID_ISTAT_BIT+TBID_THREAD_BITS */ 951 int __TBIThreadId( void ); 952 953 /* Return TBIRES.Thrd data for current thread */ 954 TBIRES __TBIThrdPrivId( void ); 955 956 /* Return pointer to current threads TBI root block. 957 Id implies whether Int or Background root block is required */ 958 PTBI __TBI( int Id ); 959 960 /* Try to set Mask bit using the spin-lock protocol, return 0 if fails and 961 new state if succeeds */ 962 int __TBIPoll( PTBISPIN pLock, int Mask ); 963 964 /* Set Mask bits via the spin-lock protocol in *pLock, return new state */ 965 int __TBISpin( PTBISPIN pLock, int Mask ); 966 967 /* Default handler set up for all TBI.fnSigs entries during initialisation */ 968 TBIRES __TBIUnExpXXX( TBIRES State, int SigNum, 969 int Triggers, int Inst, PTBI pTBI ); 970 971 /* Call this routine to service triggers at background processing level. The 972 TBID_POLL_BIT of the Id parameter value will be used to indicate that the 973 routine should return if no triggers need to be serviced initially. If this 974 bit is not set the routine will block until one trigger handler is serviced 975 and then behave like the poll case servicing any remaining triggers 976 actually outstanding before returning. Normally the State parameter should 977 be simply initialised to zero and the result should be ignored, other 978 values/options are for internal use only. */ 979 TBIRES __TBISyncTrigger( TBIRES State, int Id ); 980 981 /* Call this routine to enable processing of triggers by signal handlers at 982 interrupt level. The State parameter value passed is returned by this 983 routine. The State.Sig.TrigMask field also specifies the initial 984 state of the interrupt mask register TXMASKI to be setup by the call. 985 The other parts of the State parameter are ignored unless the PRIV bit is 986 set in the SaveMask field. In this case the State.Sig.pCtx field specifies 987 the base of the stack to which the interrupt system should switch into 988 as it saves the state of the previously executing code. In the case the 989 thread will be unprivileged as it continues execution at the return 990 point of this routine and it's future state will be effectively never 991 trusted to be valid. */ 992 TBIRES __TBIASyncTrigger( TBIRES State ); 993 994 /* Call this to swap soft threads executing at the background processing level. 995 The TBIRES returned to the new thread will be the same as the NextThread 996 value specified to the call. The NextThread.Switch.pCtx value specifies 997 which thread context to restore and the NextThread.Switch.Para value can 998 hold an arbitrary expression to be passed between the threads. The saved 999 state of the previous thread will be stored in a TBICTX descriptor created 1000 on it's stack and the address of this will be stored into the *rpSaveCtx 1001 location specified. */ 1002 TBIRES __TBISwitch( TBIRES NextThread, PTBICTX *rpSaveCtx ); 1003 1004 /* Call this to initialise a stack frame ready for further use, up to four 1005 32-bit arguments may be specified after the fixed args to be passed via 1006 the new stack pStack to the routine specified via fnMain. If the 1007 main-line routine ever returns the thread will operate as if main itself 1008 had returned and terminate with the return code given. */ 1009 typedef int (*PTBIMAINFN)( TBIRES Arg /*, <= 4 additional 32-bit args */ ); 1010 PTBICTX __TBISwitchInit( void *pStack, PTBIMAINFN fnMain, ... ); 1011 1012 /* Call this to resume a thread from a saved synchronous TBICTX state. 1013 The TBIRES returned to the new thread will be the same as the NextThread 1014 value specified to the call. The NextThread.Switch.pCtx value specifies 1015 which thread context to restore and the NextThread.Switch.Para value can 1016 hold an arbitrary expression to be passed between the threads. The context 1017 of the calling thread is lost and this routine never returns to the 1018 caller. The TrigsMask value supplied is ored into TXMASKI to enable 1019 interrupts after the context of the new thread is established. */ 1020 void __TBISyncResume( TBIRES NextThread, int TrigsMask ); 1021 1022 /* Call these routines to save and restore the extended states of 1023 scheduled tasks. */ 1024 void *__TBICtxSave( TBIRES State, void *pExt ); 1025 void *__TBICtxRestore( TBIRES State, void *pExt ); 1026 1027 #ifdef TBI_1_4 1028 #ifdef TBI_FASTINT_1_4 1029 /* Call these routines to copy the GP state to a separate buffer 1030 * Only necessary for context switching. 1031 */ 1032 PTBICTXGP __TBICtx2SaveCrit( PTBICTX2 pCurrentCtx, PTBICTX2 pSaveCtx ); 1033 void *__TBICtx2SaveGP( PTBICTXGP pCurrentCtxGP, PTBICTXGP pSaveCtxGP ); 1034 1035 /* Call these routines to save and restore the extended states of 1036 scheduled tasks. */ 1037 void *__TBICtx2Save( PTBICTXGP pCtxGP, short SaveMask, void *pExt ); 1038 void *__TBICtx2Restore( PTBICTX2 pCtx, short SaveMask, void *pExt ); 1039 #endif 1040 1041 /* If FPAC flag is set then significant FPU context exists. Call these routine 1042 to save and restore it */ 1043 void *__TBICtxFPUSave( TBIRES State, void *pExt ); 1044 void *__TBICtxFPURestore( TBIRES State, void *pExt ); 1045 1046 #ifdef TBI_FASTINT_1_4 1047 extern void *__TBICtx2FPUSave (PTBICTXGP, short, void*); 1048 extern void *__TBICtx2FPURestore (PTBICTXGP, short, void*); 1049 #endif 1050 #endif 1051 1052 #ifdef TBI_1_4 1053 /* Call these routines to save and restore DSPRAM. */ 1054 void *__TBIDspramSaveA (short DspramSizes, void *pExt); 1055 void *__TBIDspramSaveB (short DspramSizes, void *pExt); 1056 void *__TBIDspramRestoreA (short DspramSizes, void *pExt); 1057 void *__TBIDspramRestoreB (short DspramSizes, void *pExt); 1058 #endif 1059 1060 /* This routine should be used at the entrypoint of interrupt handlers to 1061 re-enable higher priority interrupts and/or save state from the previously 1062 executing background code. State is a TBIRES.Sig parameter with NoNestMask 1063 indicating the triggers (if any) that should remain disabled and SaveMask 1064 CBUF bit indicating the if the hardware catch buffer is dirty. Optionally 1065 any number of extended state bits X??? including XCBF can be specified to 1066 force a nested state save call to __TBICtxSave before the current routine 1067 continues. (In the latter case __TBICtxRestore should be called to restore 1068 any extended states before the background thread of execution is resumed) 1069 1070 By default (no X??? bits specified in SaveMask) this routine performs a 1071 sub-call to __TBICtxSave with the pExt and State parameters specified IF 1072 some triggers could be serviced while the current interrupt handler 1073 executes and the hardware catch buffer is actually dirty. In this case 1074 this routine provides the XCBF bit in State.Sig.SaveMask to force the 1075 __TBICtxSave to extract the current catch state. 1076 1077 The NoNestMask parameter should normally indicate that the same or lower 1078 triggers than those provoking the current handler call should not be 1079 serviced in nested calls, zero may be specified if all possible interrupts 1080 are to be allowed. 1081 1082 The TBIRES.Sig value returned will be similar to the State parameter 1083 specified with the XCBF bit ORed into it's SaveMask if a context save was 1084 required and fewer bits set in it's TrigMask corresponding to the same/lower 1085 priority interrupt triggers still not enabled. */ 1086 TBIRES __TBINestInts( TBIRES State, void *pExt, int NoNestMask ); 1087 1088 /* This routine causes the TBICTX structure specified in State.Sig.pCtx to 1089 be restored. This implies that execution will not return to the caller. 1090 The State.Sig.TrigMask field will be restored during the context switch 1091 such that any immediately occuring interrupts occur in the context of the 1092 newly specified task. The State.Sig.SaveMask parameter is ignored. */ 1093 void __TBIASyncResume( TBIRES State ); 1094 1095 /* Call this routine to enable fastest possible processing of one or more 1096 interrupt triggers via a unified signal handler. The handler concerned 1097 must simple return after servicing the related hardware. 1098 The State.Sig.TrigMask parameter indicates the interrupt triggers to be 1099 enabled and the Thin.Thin.fnHandler specifies the routine to call and 1100 the whole Thin parameter value will be passed to this routine unaltered as 1101 it's first parameter. */ 1102 void __TBIASyncThin( TBIRES State, TBIRES Thin ); 1103 1104 /* Do this before performing your own direct spin-lock access - use TBI_LOCK */ 1105 int __TBILock( void ); 1106 1107 /* Do this after performing your own direct spin-lock access - use TBI_UNLOCK */ 1108 void __TBIUnlock( int TrigState ); 1109 1110 /* Obtain and release global critical section lock - only stops execution 1111 of interrupts on this thread and similar critical section code on other 1112 local threads - use TBI_CRITON or TBI_CRITOFF */ 1113 int __TBICritOn( void ); 1114 void __TBICritOff( int TrigState ); 1115 1116 /* Change INTS (TXMASKI) - return old state - use TBI_INTSX */ 1117 int __TBIIntsX( int NewMask ); 1118 1119 /* Change TRIGS (TXMASK) - return old state - use TBI_TRIGSX */ 1120 int __TBITrigsX( int NewMask ); 1121 1122 /* This function initialises a timer for first use, only the TBID_ISTAT_BIT 1123 of the Id parameter is used to indicate which timer is to be modified. The 1124 Wait value should either be zero to disable the timer concerned or be in 1125 the recommended TBI_TIMERWAIT_* range to specify the delay required before 1126 the first timer trigger occurs. 1127 1128 The TBID_ISTAT_BIT of the Id parameter similar effects all other timer 1129 support functions (see below). */ 1130 void __TBITimerCtrl( int Id, int Wait ); 1131 1132 /* This routine returns a 64-bit time stamp value that is initialised to zero 1133 via a __TBITimerCtrl timer enabling call. */ 1134 long long __TBITimeStamp( int Id ); 1135 1136 /* To manage a periodic timer each period elapsed should be subracted from 1137 the current timer value to attempt to set up the next timer trigger. The 1138 Wait parameter should be a value in the recommended TBI_TIMERWAIT_* range. 1139 The return value is the new aggregate value that the timer was updated to, 1140 if this is less than zero then a timer trigger is guaranteed to be 1141 generated after the number of ticks implied, if a positive result is 1142 returned either itterative or step-wise corrective action must be taken to 1143 resynchronise the timer and hence provoke a future timer trigger. */ 1144 int __TBITimerAdd( int Id, int Wait ); 1145 1146 /* String table search function, pStart is first entry to check or NULL, 1147 pStr is string data to search for and MatchLen is either length of string 1148 to compare for an exact match or negative length to compare for partial 1149 match. */ 1150 const TBISTR *__TBIFindStr( const TBISTR *pStart, 1151 const char *pStr, int MatchLen ); 1152 1153 /* String table translate function, pStr is text to translate and Len is 1154 it's length. Value returned may not be a string pointer if the 1155 translation value is really some other type, 64-bit alignment of the return 1156 pointer is guaranteed so almost any type including a structure could be 1157 located with this routine. */ 1158 const void *__TBITransStr( const char *pStr, int Len ); 1159 1160 1161 1162 /* Arbitrary physical memory access windows, use different Channels to avoid 1163 conflict/thrashing within a single piece of code. */ 1164 void *__TBIPhysAccess( int Channel, int PhysAddr, int Bytes ); 1165 void __TBIPhysRelease( int Channel, void *pLinAddr ); 1166 1167 #ifdef METAC_1_0 1168 /* Data cache function nullified because data cache is off */ 1169 #define TBIDCACHE_FLUSH( pAddr ) 1170 #define TBIDCACHE_PRELOAD( Type, pAddr ) ((Type) (pAddr)) 1171 #define TBIDCACHE_REFRESH( Type, pAddr ) ((Type) (pAddr)) 1172 #endif 1173 #ifdef METAC_1_1 1174 /* To flush a single cache line from the data cache using a linear address */ 1175 #define TBIDCACHE_FLUSH( pAddr ) ((volatile char *) \ 1176 (((unsigned int) (pAddr))>>LINSYSLFLUSH_S))[0] = 0 1177 1178 extern void * __builtin_dcache_preload (void *); 1179 1180 /* Try to ensure that the data at the address concerned is in the cache */ 1181 #define TBIDCACHE_PRELOAD( Type, Addr ) \ 1182 ((Type) __builtin_dcache_preload ((void *)(Addr))) 1183 1184 extern void * __builtin_dcache_refresh (void *); 1185 1186 /* Flush any old version of data from address and re-load a new copy */ 1187 #define TBIDCACHE_REFRESH( Type, Addr ) __extension__ ({ \ 1188 Type __addr = (Type)(Addr); \ 1189 (void)__builtin_dcache_refresh ((void *)(((unsigned int)(__addr))>>6)); \ 1190 __addr; }) 1191 1192 #endif 1193 #ifndef METAC_1_0 1194 #ifndef METAC_1_1 1195 /* Support for DCACHE builtin */ 1196 extern void __builtin_dcache_flush (void *); 1197 1198 /* To flush a single cache line from the data cache using a linear address */ 1199 #define TBIDCACHE_FLUSH( Addr ) \ 1200 __builtin_dcache_flush ((void *)(Addr)) 1201 1202 extern void * __builtin_dcache_preload (void *); 1203 1204 /* Try to ensure that the data at the address concerned is in the cache */ 1205 #define TBIDCACHE_PRELOAD( Type, Addr ) \ 1206 ((Type) __builtin_dcache_preload ((void *)(Addr))) 1207 1208 extern void * __builtin_dcache_refresh (void *); 1209 1210 /* Flush any old version of data from address and re-load a new copy */ 1211 #define TBIDCACHE_REFRESH( Type, Addr ) \ 1212 ((Type) __builtin_dcache_refresh ((void *)(Addr))) 1213 1214 #endif 1215 #endif 1216 1217 /* Flush the MMCU cache */ 1218 #define TBIMCACHE_FLUSH() { ((volatile int *) LINSYSCFLUSH_MMCU)[0] = 0; } 1219 1220 #ifdef METAC_2_1 1221 /* Obtain the MMU table entry for the specified address */ 1222 #define TBIMTABLE_LEAFDATA(ADDR) TBIXCACHE_RD((int)(ADDR) & (-1<<6)) 1223 1224 #ifndef __ASSEMBLY__ 1225 /* Obtain the full MMU table entry for the specified address */ 1226 #define TBIMTABLE_DATA(ADDR) __extension__ ({ TBIRES __p; \ 1227 __p.Val = TBIXCACHE_RL((int)(ADDR) & (-1<<6)); \ 1228 __p; }) 1229 #endif 1230 #endif 1231 1232 /* Combine a physical base address, and a linear address 1233 * Internal use only 1234 */ 1235 #define _TBIMTABLE_LIN2PHYS(PHYS, LIN, LMASK) (void*)(((int)(PHYS)&0xFFFFF000)\ 1236 +((int)(LIN)&(LMASK))) 1237 1238 /* Convert a linear to a physical address */ 1239 #define TBIMTABLE_LIN2PHYS(LEAFDATA, ADDR) \ 1240 (((LEAFDATA) & CRLINPHY0_VAL_BIT) \ 1241 ? _TBIMTABLE_LIN2PHYS(LEAFDATA, ADDR, 0x00000FFF) \ 1242 : 0) 1243 1244 /* Debug support - using external debugger or host */ 1245 void __TBIDumpSegListEntries( void ); 1246 void __TBILogF( const char *pFmt, ... ); 1247 void __TBIAssert( const char *pFile, int LineNum, const char *pExp ); 1248 void __TBICont( const char *pMsg, ... ); /* TBIAssert -> 'wait for continue' */ 1249 1250 /* Array of signal name data for debug messages */ 1251 extern const char __TBISigNames[]; 1252 #endif /* ifndef __ASSEMBLY__ */ 1253 1254 1255 1256 /* Scale of sub-strings in the __TBISigNames string list */ 1257 #define TBI_SIGNAME_SCALE 4 1258 #define TBI_SIGNAME_SCALE_S 2 1259 1260 #define TBI_1_3 1261 1262 #ifdef TBI_1_3 1263 1264 #ifndef __ASSEMBLY__ 1265 #define TBIXCACHE_RD(ADDR) __extension__ ({\ 1266 void * __Addr = (void *)(ADDR); \ 1267 int __Data; \ 1268 __asm__ volatile ( "CACHERD\t%0,[%1+#0]" : \ 1269 "=r" (__Data) : "r" (__Addr) ); \ 1270 __Data; }) 1271 1272 #define TBIXCACHE_RL(ADDR) __extension__ ({\ 1273 void * __Addr = (void *)(ADDR); \ 1274 long long __Data; \ 1275 __asm__ volatile ( "CACHERL\t%0,%t0,[%1+#0]" : \ 1276 "=d" (__Data) : "r" (__Addr) ); \ 1277 __Data; }) 1278 1279 #define TBIXCACHE_WD(ADDR, DATA) do {\ 1280 void * __Addr = (void *)(ADDR); \ 1281 int __Data = DATA; \ 1282 __asm__ volatile ( "CACHEWD\t[%0+#0],%1" : \ 1283 : "r" (__Addr), "r" (__Data) ); } while(0) 1284 1285 #define TBIXCACHE_WL(ADDR, DATA) do {\ 1286 void * __Addr = (void *)(ADDR); \ 1287 long long __Data = DATA; \ 1288 __asm__ volatile ( "CACHEWL\t[%0+#0],%1,%t1" : \ 1289 : "r" (__Addr), "r" (__Data) ); } while(0) 1290 1291 #ifdef TBI_4_0 1292 1293 #define TBICACHE_FLUSH_L1D_L2(ADDR) \ 1294 TBIXCACHE_WD(ADDR, CACHEW_FLUSH_L1D_L2) 1295 #define TBICACHE_WRITEBACK_L1D_L2(ADDR) \ 1296 TBIXCACHE_WD(ADDR, CACHEW_WRITEBACK_L1D_L2) 1297 #define TBICACHE_INVALIDATE_L1D(ADDR) \ 1298 TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1D) 1299 #define TBICACHE_INVALIDATE_L1D_L2(ADDR) \ 1300 TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1D_L2) 1301 #define TBICACHE_INVALIDATE_L1DTLB(ADDR) \ 1302 TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1DTLB) 1303 #define TBICACHE_INVALIDATE_L1I(ADDR) \ 1304 TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1I) 1305 #define TBICACHE_INVALIDATE_L1ITLB(ADDR) \ 1306 TBIXCACHE_WD(ADDR, CACHEW_INVALIDATE_L1ITLB) 1307 1308 #endif /* TBI_4_0 */ 1309 #endif /* ifndef __ASSEMBLY__ */ 1310 1311 /* 1312 * Calculate linear PC value from real PC and Minim mode control, the LSB of 1313 * the result returned indicates if address compression has occured. 1314 */ 1315 #ifndef __ASSEMBLY__ 1316 #define METAG_LINPC( PCVal ) (\ 1317 ( (TBI_GETREG(TXPRIVEXT) & TXPRIVEXT_MINIMON_BIT) != 0 ) ? ( \ 1318 ( ((PCVal) & 0x00900000) == 0x00900000 ) ? \ 1319 (((PCVal) & 0xFFE00000) + (((PCVal) & 0x001FFFFC)>>1) + 1) : \ 1320 ( ((PCVal) & 0x00800000) == 0x00000000 ) ? \ 1321 (((PCVal) & 0xFF800000) + (((PCVal) & 0x007FFFFC)>>1) + 1) : \ 1322 (PCVal) ) \ 1323 : (PCVal) ) 1324 #define METAG_LINPC_X2BIT 0x00000001 /* Make (Size>>1) if compressed */ 1325 1326 /* Convert an arbitrary Linear address into a valid Minim PC or return 0 */ 1327 #define METAG_PCMINIM( LinVal ) (\ 1328 (((LinVal) & 0x00980000) == 0x00880000) ? \ 1329 (((LinVal) & 0xFFE00000) + (((LinVal) & 0x000FFFFE)<<1)) : \ 1330 (((LinVal) & 0x00C00000) == 0x00000000) ? \ 1331 (((LinVal) & 0xFF800000) + (((LinVal) & 0x003FFFFE)<<1)) : 0 ) 1332 1333 /* Reverse a METAG_LINPC conversion step to return the original PCVal */ 1334 #define METAG_PCLIN( LinVal ) ( 0xFFFFFFFC & (\ 1335 ( (LinVal & METAG_LINPC_X2BIT) != 0 ) ? METAG_PCMINIM( LinVal ) : \ 1336 (LinVal) )) 1337 1338 /* 1339 * Flush the MMCU Table cache privately for each thread. On cores that do not 1340 * support per-thread flushing it will flush all threads mapping data. 1341 */ 1342 #define TBIMCACHE_TFLUSH(Thread) do {\ 1343 ((volatile int *)( LINSYSCFLUSH_TxMMCU_BASE + \ 1344 (LINSYSCFLUSH_TxMMCU_STRIDE*(Thread)) ))[0] = 0; \ 1345 } while(0) 1346 1347 /* 1348 * To flush a single linear-matched cache line from the code cache. In 1349 * cases where Minim is possible the METAC_LINPC operation must be used 1350 * to pre-process the address being flushed. 1351 */ 1352 #define TBIICACHE_FLUSH( pAddr ) TBIXCACHE_WD (pAddr, CACHEW_ICACHE_BIT) 1353 1354 /* To flush a single linear-matched mapping from code/data MMU table cache */ 1355 #define TBIMCACHE_AFLUSH( pAddr, SegType ) \ 1356 TBIXCACHE_WD(pAddr, CACHEW_TLBFLUSH_BIT + ( \ 1357 ((SegType) == TBID_SEGTYPE_TEXT) ? CACHEW_ICACHE_BIT : 0 )) 1358 1359 /* 1360 * To flush translation data corresponding to a range of addresses without 1361 * using TBITCACHE_FLUSH to flush all of this threads translation data. It 1362 * is necessary to know what stride (>= 4K) must be used to flush a specific 1363 * region. 1364 * 1365 * For example direct mapped regions use the maximum page size (512K) which may 1366 * mean that only one flush is needed to cover the sub-set of the direct 1367 * mapped area used since it was setup. 1368 * 1369 * The function returns the stride on which flushes should be performed. 1370 * 1371 * If 0 is returned then the region is not subject to MMU caching, if -1 is 1372 * returned then this indicates that only TBIMCACHE_TFLUSH can be used to 1373 * flush the region concerned rather than TBIMCACHE_AFLUSH which this 1374 * function is designed to support. 1375 */ 1376 int __TBIMMUCacheStride( const void *pStart, int Bytes ); 1377 1378 /* 1379 * This function will use the above lower level functions to achieve a MMU 1380 * table data flush in an optimal a fashion as possible. On a system that 1381 * supports linear address based caching this function will also call the 1382 * code or data cache flush functions to maintain address/data coherency. 1383 * 1384 * SegType should be TBID_SEGTYPE_TEXT if the address range is for code or 1385 * any other value such as TBID_SEGTYPE_DATA for data. If an area is 1386 * used in both ways then call this function twice; once for each. 1387 */ 1388 void __TBIMMUCacheFlush( const void *pStart, int Bytes, int SegType ); 1389 1390 /* 1391 * Cached Core mode setup and flush functions allow one code and one data 1392 * region of the corresponding global or local cache partion size to be 1393 * locked into the corresponding cache memory. This prevents normal LRU 1394 * logic discarding the code or data and avoids write-thru bandwidth in 1395 * data areas. Code mappings are selected by specifying TBID_SEGTYPE_TEXT 1396 * for SegType, otherwise data mappings are created. 1397 * 1398 * Mode supplied should always contain the VALID bit and WINx selection data. 1399 * Data areas will be mapped read-only if the WRITE bit is not added. 1400 * 1401 * The address returned by the Opt function will either be the same as that 1402 * passed in (if optimisation cannot be supported) or the base of the new core 1403 * cached region in linear address space. The returned address must be passed 1404 * into the End function to remove the mapping when required. If a non-core 1405 * cached memory address is passed into it the End function has no effect. 1406 * Note that the region accessed MUST be flushed from the appropriate cache 1407 * before the End function is called to deliver correct operation. 1408 */ 1409 void *__TBICoreCacheOpt( const void *pStart, int Bytes, int SegType, int Mode ); 1410 void __TBICoreCacheEnd( const void *pOpt, int Bytes, int SegType ); 1411 1412 /* 1413 * Optimise physical access channel and flush side effects before releasing 1414 * the channel. If pStart is NULL the whole region must be flushed and this is 1415 * done automatically by the channel release function if optimisation is 1416 * enabled. Flushing the specific region that may have been accessed before 1417 * release should optimises this process. On physically cached systems we do 1418 * not flush the code/data caches only the MMU table data needs flushing. 1419 */ 1420 void __TBIPhysOptim( int Channel, int IMode, int DMode ); 1421 void __TBIPhysFlush( int Channel, const void *pStart, int Bytes ); 1422 #endif 1423 #endif /* ifdef TBI_1_3 */ 1424 1425 #endif /* _ASM_METAG_TBX_H_ */ 1426