1 /* -*- mode: C; c-basic-offset: 3; -*- */
2
3 #include <stdio.h> // fprintf
4 #include <assert.h> // assert
5 #if defined(__APPLE__)
6 #include <machine/endian.h>
7 #define __BYTE_ORDER BYTE_ORDER
8 #define __LITTLE_ENDIAN LITTLE_ENDIAN
9 #else
10 #include <endian.h>
11 #endif
12 #include <inttypes.h>
13 #include "vbits.h"
14 #include "vtest.h"
15
16
17 /* Return the bits of V if they fit into 64-bit. If V has fewer than
18 64 bits, the bit pattern is zero-extended to the left. */
19 static uint64_t
get_bits64(vbits_t v)20 get_bits64(vbits_t v)
21 {
22 switch (v.num_bits) {
23 case 1: return v.bits.u32;
24 case 8: return v.bits.u8;
25 case 16: return v.bits.u16;
26 case 32: return v.bits.u32;
27 case 64: return v.bits.u64;
28 case 128:
29 case 256:
30 /* fall through */
31 default:
32 panic(__func__);
33 }
34 }
35
36 void
print_vbits(FILE * fp,vbits_t v)37 print_vbits(FILE *fp, vbits_t v)
38 {
39 switch (v.num_bits) {
40 case 1: fprintf(fp, "%08x", v.bits.u32); break;
41 case 8: fprintf(fp, "%02x", v.bits.u8); break;
42 case 16: fprintf(fp, "%04x", v.bits.u16); break;
43 case 32: fprintf(fp, "%08x", v.bits.u32); break;
44 case 64: fprintf(fp, "%016"PRIx64, v.bits.u64); break;
45 case 128:
46 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
47 fprintf(fp, "%016"PRIx64, v.bits.u128[1]);
48 fprintf(fp, "%016"PRIx64, v.bits.u128[0]);
49 } else {
50 fprintf(fp, "%016"PRIx64, v.bits.u128[0]);
51 fprintf(fp, "%016"PRIx64, v.bits.u128[1]);
52 }
53 break;
54 case 256:
55 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
56 fprintf(fp, "%016"PRIx64, v.bits.u256[3]);
57 fprintf(fp, "%016"PRIx64, v.bits.u256[2]);
58 fprintf(fp, "%016"PRIx64, v.bits.u256[1]);
59 fprintf(fp, "%016"PRIx64, v.bits.u256[0]);
60 } else {
61 fprintf(fp, "%016"PRIx64, v.bits.u256[0]);
62 fprintf(fp, "%016"PRIx64, v.bits.u256[1]);
63 fprintf(fp, "%016"PRIx64, v.bits.u256[2]);
64 fprintf(fp, "%016"PRIx64, v.bits.u256[3]);
65 }
66 break;
67 default:
68 panic(__func__);
69 }
70 }
71
72
73 /* Return a value where all bits are set to undefined. */
74 vbits_t
undefined_vbits(unsigned num_bits)75 undefined_vbits(unsigned num_bits)
76 {
77 vbits_t new = { .num_bits = num_bits };
78
79 switch (num_bits) {
80 case 1: new.bits.u32 = 0x01; break;
81 case 8: new.bits.u8 = 0xff; break;
82 case 16: new.bits.u16 = 0xffff; break;
83 case 32: new.bits.u32 = ~0; break;
84 case 64: new.bits.u64 = ~0ull; break;
85 case 128: new.bits.u128[0] = ~0ull;
86 new.bits.u128[1] = ~0ull;
87 break;
88 case 256: new.bits.u256[0] = ~0ull;
89 new.bits.u256[1] = ~0ull;
90 new.bits.u256[2] = ~0ull;
91 new.bits.u256[3] = ~0ull;
92 break;
93 default:
94 panic(__func__);
95 }
96 return new;
97 }
98
99
100 /* Return a value where all bits are set to defined. */
101 vbits_t
defined_vbits(unsigned num_bits)102 defined_vbits(unsigned num_bits)
103 {
104 vbits_t new = { .num_bits = num_bits };
105
106 switch (num_bits) {
107 case 1: new.bits.u32 = 0x0; break;
108 case 8: new.bits.u8 = 0x0; break;
109 case 16: new.bits.u16 = 0x0; break;
110 case 32: new.bits.u32 = 0x0; break;
111 case 64: new.bits.u64 = 0x0; break;
112 case 128: new.bits.u128[0] = 0x0;
113 new.bits.u128[1] = 0x0;
114 break;
115 case 256: new.bits.u256[0] = 0x0;
116 new.bits.u256[1] = 0x0;
117 new.bits.u256[2] = 0x0;
118 new.bits.u256[3] = 0x0;
119 break;
120 default:
121 panic(__func__);
122 }
123 return new;
124 }
125
126
127 /* Return 1, if equal. */
128 int
equal_vbits(vbits_t v1,vbits_t v2)129 equal_vbits(vbits_t v1, vbits_t v2)
130 {
131 assert(v1.num_bits == v2.num_bits);
132
133 switch (v1.num_bits) {
134 case 1: return v1.bits.u32 == v2.bits.u32;
135 case 8: return v1.bits.u8 == v2.bits.u8;
136 case 16: return v1.bits.u16 == v2.bits.u16;
137 case 32: return v1.bits.u32 == v2.bits.u32;
138 case 64: return v1.bits.u64 == v2.bits.u64;
139 case 128: return v1.bits.u128[0] == v2.bits.u128[0] &&
140 v1.bits.u128[1] == v2.bits.u128[1];
141 case 256: return v1.bits.u256[0] == v2.bits.u256[0] &&
142 v1.bits.u256[1] == v2.bits.u256[1] &&
143 v1.bits.u256[2] == v2.bits.u256[2] &&
144 v1.bits.u256[3] == v2.bits.u256[3];
145 default:
146 panic(__func__);
147 }
148 }
149
150
151 /* Truncate the bit pattern in V1 to NUM_BITS bits */
152 vbits_t
truncate_vbits(vbits_t v,unsigned num_bits)153 truncate_vbits(vbits_t v, unsigned num_bits)
154 {
155 assert(num_bits <= v.num_bits);
156
157 if (num_bits == v.num_bits) return v;
158
159 vbits_t new = { .num_bits = num_bits };
160
161 if (num_bits <= 64) {
162 uint64_t bits;
163
164 if (v.num_bits <= 64)
165 bits = get_bits64(v);
166 else if (v.num_bits == 128)
167 if (__BYTE_ORDER == __LITTLE_ENDIAN)
168 bits = v.bits.u128[0];
169 else
170 bits = v.bits.u128[1];
171 else if (v.num_bits == 256)
172 if (__BYTE_ORDER == __LITTLE_ENDIAN)
173 bits = v.bits.u256[0];
174 else
175 bits = v.bits.u256[3];
176 else
177 panic(__func__);
178
179 switch (num_bits) {
180 case 1: new.bits.u32 = bits & 0x01; break;
181 case 8: new.bits.u8 = bits & 0xff; break;
182 case 16: new.bits.u16 = bits & 0xffff; break;
183 case 32: new.bits.u32 = bits & ~0u; break;
184 case 64: new.bits.u64 = bits & ~0ll; break;
185 default:
186 panic(__func__);
187 }
188 return new;
189 }
190
191 if (num_bits == 128) {
192 assert(v.num_bits == 256);
193 /* From 256 bits to 128 */
194 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
195 new.bits.u128[0] = v.bits.u256[0];
196 new.bits.u128[1] = v.bits.u256[1];
197 } else {
198 new.bits.u128[0] = v.bits.u256[2];
199 new.bits.u128[1] = v.bits.u256[3];
200 }
201 return new;
202 }
203
204 /* Cannot truncate to 256 bits from something larger */
205 panic(__func__);
206 }
207
208
209 /* Helper function to compute left_vbits */
210 static uint64_t
left64(uint64_t x)211 left64(uint64_t x)
212 {
213 // left(x) = x | -x
214 return x | (~x + 1);
215 }
216
217
218 vbits_t
left_vbits(vbits_t v,unsigned num_bits)219 left_vbits(vbits_t v, unsigned num_bits)
220 {
221 assert(num_bits >= v.num_bits);
222
223 vbits_t new = { .num_bits = num_bits };
224
225 if (v.num_bits <= 64) {
226 uint64_t bits = left64(get_bits64(v));
227
228 switch (num_bits) {
229 case 8: new.bits.u8 = bits & 0xff; break;
230 case 16: new.bits.u16 = bits & 0xffff; break;
231 case 32: new.bits.u32 = bits & ~0u; break;
232 case 64: new.bits.u64 = bits & ~0ll; break;
233 case 128:
234 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
235 new.bits.u128[0] = bits;
236 if (bits & (1ull << 63)) { // MSB is set
237 new.bits.u128[1] = ~0ull;
238 } else {
239 new.bits.u128[1] = 0;
240 }
241 } else {
242 new.bits.u128[1] = bits;
243 if (bits & (1ull << 63)) { // MSB is set
244 new.bits.u128[0] = ~0ull;
245 } else {
246 new.bits.u128[0] = 0;
247 }
248 }
249 break;
250 case 256:
251 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
252 new.bits.u256[0] = bits;
253 if (bits & (1ull << 63)) { // MSB is set
254 new.bits.u256[1] = ~0ull;
255 new.bits.u256[2] = ~0ull;
256 new.bits.u256[3] = ~0ull;
257 } else {
258 new.bits.u256[1] = 0;
259 new.bits.u256[2] = 0;
260 new.bits.u256[3] = 0;
261 }
262 } else {
263 new.bits.u256[3] = bits;
264 if (bits & (1ull << 63)) { // MSB is set
265 new.bits.u256[0] = ~0ull;
266 new.bits.u256[1] = ~0ull;
267 new.bits.u256[2] = ~0ull;
268 } else {
269 new.bits.u256[0] = 0;
270 new.bits.u256[1] = 0;
271 new.bits.u256[2] = 0;
272 }
273 }
274 break;
275 default:
276 panic(__func__);
277 }
278 return new;
279 }
280
281 if (v.num_bits == 128) {
282 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
283 if (v.bits.u128[1] != 0) {
284 new.bits.u128[0] = v.bits.u128[0];
285 new.bits.u128[1] = left64(v.bits.u128[1]);
286 } else {
287 new.bits.u128[0] = left64(v.bits.u128[0]);
288 if (new.bits.u128[0] & (1ull << 63)) { // MSB is set
289 new.bits.u128[1] = ~0ull;
290 } else {
291 new.bits.u128[1] = 0;
292 }
293 }
294 } else {
295 if (v.bits.u128[0] != 0) {
296 new.bits.u128[0] = left64(v.bits.u128[0]);
297 new.bits.u128[1] = v.bits.u128[1];
298 } else {
299 new.bits.u128[1] = left64(v.bits.u128[1]);
300 if (new.bits.u128[1] & (1ull << 63)) { // MSB is set
301 new.bits.u128[0] = ~0ull;
302 } else {
303 new.bits.u128[0] = 0;
304 }
305 }
306 }
307 if (num_bits == 128) return new;
308
309 assert(num_bits == 256);
310
311 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
312 uint64_t b1 = new.bits.u128[1];
313 uint64_t b0 = new.bits.u128[0];
314
315 new.bits.u256[0] = b0;
316 new.bits.u256[1] = b1;
317
318 if (new.bits.u256[1] & (1ull << 63)) { // MSB is set
319 new.bits.u256[2] = ~0ull;
320 new.bits.u256[3] = ~0ull;
321 } else {
322 new.bits.u256[2] = 0;
323 new.bits.u256[3] = 0;
324 }
325 } else {
326 uint64_t b1 = new.bits.u128[0];
327 uint64_t b0 = new.bits.u128[1];
328
329 new.bits.u256[2] = b0;
330 new.bits.u256[3] = b1;
331
332 if (new.bits.u256[2] & (1ull << 63)) { // MSB is set
333 new.bits.u256[0] = ~0ull;
334 new.bits.u256[1] = ~0ull;
335 } else {
336 new.bits.u256[0] = 0;
337 new.bits.u256[1] = 0;
338 }
339 }
340 return new;
341 }
342
343 panic(__func__);
344 }
345
346
347 vbits_t
or_vbits(vbits_t v1,vbits_t v2)348 or_vbits(vbits_t v1, vbits_t v2)
349 {
350 assert(v1.num_bits == v2.num_bits);
351
352 vbits_t new = { .num_bits = v1.num_bits };
353
354 switch (v1.num_bits) {
355 case 8: new.bits.u8 = v1.bits.u8 | v2.bits.u8; break;
356 case 16: new.bits.u16 = v1.bits.u16 | v2.bits.u16; break;
357 case 32: new.bits.u32 = v1.bits.u32 | v2.bits.u32; break;
358 case 64: new.bits.u64 = v1.bits.u64 | v2.bits.u64; break;
359 case 128: new.bits.u128[0] = v1.bits.u128[0] | v2.bits.u128[0];
360 new.bits.u128[1] = v1.bits.u128[1] | v2.bits.u128[1];
361 break;
362 case 256: new.bits.u256[0] = v1.bits.u256[0] | v2.bits.u256[0];
363 new.bits.u256[1] = v1.bits.u256[1] | v2.bits.u256[1];
364 new.bits.u256[2] = v1.bits.u256[2] | v2.bits.u256[2];
365 new.bits.u256[3] = v1.bits.u256[3] | v2.bits.u256[3];
366 break;
367 default:
368 panic(__func__);
369 }
370
371 return new;
372 }
373
374
375 vbits_t
and_vbits(vbits_t v1,vbits_t v2)376 and_vbits(vbits_t v1, vbits_t v2)
377 {
378 assert(v1.num_bits == v2.num_bits);
379
380 vbits_t new = { .num_bits = v1.num_bits };
381
382 switch (v1.num_bits) {
383 case 8: new.bits.u8 = v1.bits.u8 & v2.bits.u8; break;
384 case 16: new.bits.u16 = v1.bits.u16 & v2.bits.u16; break;
385 case 32: new.bits.u32 = v1.bits.u32 & v2.bits.u32; break;
386 case 64: new.bits.u64 = v1.bits.u64 & v2.bits.u64; break;
387 case 128: new.bits.u128[0] = v1.bits.u128[0] & v2.bits.u128[0];
388 new.bits.u128[1] = v1.bits.u128[1] & v2.bits.u128[1];
389 break;
390 case 256: new.bits.u256[0] = v1.bits.u256[0] & v2.bits.u256[0];
391 new.bits.u256[1] = v1.bits.u256[1] & v2.bits.u256[1];
392 new.bits.u256[2] = v1.bits.u256[2] & v2.bits.u256[2];
393 new.bits.u256[3] = v1.bits.u256[3] & v2.bits.u256[3];
394 break;
395 default:
396 panic(__func__);
397 }
398
399 return new;
400 }
401
402
403 vbits_t
concat_vbits(vbits_t v1,vbits_t v2)404 concat_vbits(vbits_t v1, vbits_t v2)
405 {
406 assert(v1.num_bits == v2.num_bits);
407
408 vbits_t new = { .num_bits = v1.num_bits * 2 };
409
410 switch (v1.num_bits) {
411 case 8: new.bits.u16 = (v1.bits.u8 << 8) | v2.bits.u8; break;
412 case 16: new.bits.u32 = (v1.bits.u16 << 16) | v2.bits.u16; break;
413 case 32: new.bits.u64 = v1.bits.u32;
414 new.bits.u64 = (new.bits.u64 << 32) | v2.bits.u32; break;
415 case 64:
416 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
417 new.bits.u128[0] = v2.bits.u64;
418 new.bits.u128[1] = v1.bits.u64;
419 } else {
420 new.bits.u128[0] = v1.bits.u64;
421 new.bits.u128[1] = v2.bits.u64;
422 }
423 break;
424 case 128:
425 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
426 new.bits.u256[0] = v2.bits.u128[0];
427 new.bits.u256[1] = v2.bits.u128[1];
428 new.bits.u256[2] = v1.bits.u128[0];
429 new.bits.u256[3] = v1.bits.u128[1];
430 } else {
431 new.bits.u256[0] = v1.bits.u128[0];
432 new.bits.u256[1] = v1.bits.u128[1];
433 new.bits.u256[2] = v2.bits.u128[0];
434 new.bits.u256[3] = v2.bits.u128[1];
435 }
436 break;
437 case 256: /* Fall through */
438 default:
439 panic(__func__);
440 }
441
442 return new;
443 }
444
445
446 vbits_t
upper_vbits(vbits_t v)447 upper_vbits(vbits_t v)
448 {
449 vbits_t new = { .num_bits = v.num_bits / 2 };
450
451 switch (v.num_bits) {
452 case 16: new.bits.u8 = v.bits.u16 >> 8; break;
453 case 32: new.bits.u16 = v.bits.u32 >> 16; break;
454 case 64: new.bits.u32 = v.bits.u64 >> 32; break;
455 case 128:
456 if (__BYTE_ORDER == __LITTLE_ENDIAN)
457 new.bits.u64 = v.bits.u128[1];
458 else
459 new.bits.u64 = v.bits.u128[0];
460 break;
461 case 256:
462 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
463 new.bits.u128[0] = v.bits.u256[2];
464 new.bits.u128[1] = v.bits.u256[3];
465 } else {
466 new.bits.u128[0] = v.bits.u256[0];
467 new.bits.u128[1] = v.bits.u256[1];
468 }
469 break;
470 case 8:
471 default:
472 panic(__func__);
473 }
474
475 return new;
476 }
477
478
479 vbits_t
zextend_vbits(vbits_t v,unsigned num_bits)480 zextend_vbits(vbits_t v, unsigned num_bits)
481 {
482 assert(num_bits >= v.num_bits);
483
484 if (num_bits == v.num_bits) return v;
485
486 vbits_t new = { .num_bits = num_bits };
487
488 if (v.num_bits <= 64) {
489 uint64_t bits = get_bits64(v);
490
491 switch (num_bits) {
492 case 8: new.bits.u8 = bits; break;
493 case 16: new.bits.u16 = bits; break;
494 case 32: new.bits.u32 = bits; break;
495 case 64: new.bits.u64 = bits; break;
496 case 128:
497 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
498 new.bits.u128[0] = bits;
499 new.bits.u128[1] = 0;
500 } else {
501 new.bits.u128[0] = 0;
502 new.bits.u128[1] = bits;
503 }
504 break;
505 case 256:
506 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
507 new.bits.u256[0] = bits;
508 new.bits.u256[1] = 0;
509 new.bits.u256[2] = 0;
510 new.bits.u256[3] = 0;
511 } else {
512 new.bits.u256[0] = 0;
513 new.bits.u256[1] = 0;
514 new.bits.u256[2] = 0;
515 new.bits.u256[3] = bits;
516 }
517 break;
518 default:
519 panic(__func__);
520 }
521 return new;
522 }
523
524 if (v.num_bits == 128) {
525 assert(num_bits == 256);
526
527 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
528 new.bits.u256[0] = v.bits.u128[0];
529 new.bits.u256[1] = v.bits.u128[1];
530 new.bits.u256[2] = 0;
531 new.bits.u256[3] = 0;
532 } else {
533 new.bits.u256[0] = 0;
534 new.bits.u256[1] = 0;
535 new.bits.u256[2] = v.bits.u128[1];
536 new.bits.u256[3] = v.bits.u128[0];
537 }
538 return new;
539 }
540
541 /* Cannot zero-extend a 256-bit value to something larger */
542 panic(__func__);
543 }
544
545
546 vbits_t
sextend_vbits(vbits_t v,unsigned num_bits)547 sextend_vbits(vbits_t v, unsigned num_bits)
548 {
549 assert(num_bits >= v.num_bits);
550
551 int sextend = 0;
552
553 switch (v.num_bits) {
554 case 8: if (v.bits.u8 == 0x80) sextend = 1; break;
555 case 16: if (v.bits.u16 == 0x8000) sextend = 1; break;
556 case 32: if (v.bits.u32 == 0x80000000) sextend = 1; break;
557 case 64: if (v.bits.u64 == (1ull << 63)) sextend = 1; break;
558 case 128: if (v.bits.u128[1] == (1ull << 63)) sextend = 1; break;
559 case 256: if (v.bits.u256[3] == (1ull << 63)) sextend = 1; break;
560
561 default:
562 panic(__func__);
563 }
564
565 return sextend ? left_vbits(v, num_bits) : zextend_vbits(v, num_bits);
566 }
567
568
569 vbits_t
onehot_vbits(unsigned bitno,unsigned num_bits)570 onehot_vbits(unsigned bitno, unsigned num_bits)
571 {
572 assert(bitno < num_bits);
573
574 vbits_t new = { .num_bits = num_bits };
575
576 switch (num_bits) {
577 case 1: new.bits.u32 = 1 << bitno; break;
578 case 8: new.bits.u8 = 1 << bitno; break;
579 case 16: new.bits.u16 = 1 << bitno; break;
580 case 32: new.bits.u32 = 1u << bitno; break;
581 case 64: new.bits.u64 = 1ull << bitno; break;
582 case 128:
583 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
584 if (bitno < 64) {
585 new.bits.u128[0] = 1ull << bitno;
586 new.bits.u128[1] = 0;
587 } else {
588 new.bits.u128[0] = 0;
589 new.bits.u128[1] = 1ull << (bitno - 64);
590 }
591 } else {
592 if (bitno < 64) {
593 new.bits.u128[0] = 0;
594 new.bits.u128[1] = 1ull << bitno;
595 } else {
596 new.bits.u128[0] = 1ull << (bitno - 64);
597 new.bits.u128[1] = 0;
598 }
599 }
600 break;
601 case 256:
602 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
603 if (bitno < 64) {
604 new.bits.u256[0] = 1ull << bitno;
605 new.bits.u256[1] = 0;
606 new.bits.u256[2] = 0;
607 new.bits.u256[3] = 0;
608 } else if (bitno < 128) {
609 new.bits.u256[0] = 0;
610 new.bits.u256[1] = 1ull << (bitno - 64);
611 new.bits.u256[2] = 0;
612 new.bits.u256[3] = 0;
613 } else if (bitno < 192) {
614 new.bits.u256[0] = 0;
615 new.bits.u256[1] = 0;
616 new.bits.u256[2] = 1ull << (bitno - 128);
617 new.bits.u256[3] = 0;
618 } else {
619 new.bits.u256[0] = 0;
620 new.bits.u256[1] = 0;
621 new.bits.u256[2] = 0;
622 new.bits.u256[3] = 1ull << (bitno - 192);
623 }
624 } else {
625 if (bitno < 64) {
626 new.bits.u256[0] = 0;
627 new.bits.u256[1] = 0;
628 new.bits.u256[2] = 0;
629 new.bits.u256[3] = 1ull << bitno;
630 } else if (bitno < 128) {
631 new.bits.u256[0] = 0;
632 new.bits.u256[1] = 0;
633 new.bits.u256[2] = 1ull << (bitno - 64);
634 new.bits.u256[3] = 0;
635 } else if (bitno < 192) {
636 new.bits.u256[0] = 0;
637 new.bits.u256[1] = 1ull << (bitno - 128);
638 new.bits.u256[2] = 0;
639 new.bits.u256[3] = 0;
640 } else {
641 new.bits.u256[0] = 1ull << (bitno - 192);
642 new.bits.u256[1] = 0;
643 new.bits.u256[2] = 0;
644 new.bits.u256[3] = 0;
645 }
646 }
647 break;
648 default:
649 panic(__func__);
650 }
651 return new;
652 }
653
654
655 int
completely_defined_vbits(vbits_t v)656 completely_defined_vbits(vbits_t v)
657 {
658 return equal_vbits(v, defined_vbits(v.num_bits));
659 }
660
661
662 vbits_t
shl_vbits(vbits_t v,unsigned shift_amount)663 shl_vbits(vbits_t v, unsigned shift_amount)
664 {
665 assert(shift_amount < v.num_bits);
666
667 vbits_t new = v;
668
669 switch (v.num_bits) {
670 case 8: new.bits.u8 <<= shift_amount; break;
671 case 16: new.bits.u16 <<= shift_amount; break;
672 case 32: new.bits.u32 <<= shift_amount; break;
673 case 64: new.bits.u64 <<= shift_amount; break;
674 case 128: /* fall through */
675 case 256: /* fall through */
676 default:
677 panic(__func__);
678 }
679
680 return new;
681 }
682
683
684 vbits_t
shr_vbits(vbits_t v,unsigned shift_amount)685 shr_vbits(vbits_t v, unsigned shift_amount)
686 {
687 assert(shift_amount < v.num_bits);
688
689 vbits_t new = v;
690
691 switch (v.num_bits) {
692 case 8: new.bits.u8 >>= shift_amount; break;
693 case 16: new.bits.u16 >>= shift_amount; break;
694 case 32: new.bits.u32 >>= shift_amount; break;
695 case 64: new.bits.u64 >>= shift_amount; break;
696 case 128: /* fall through */
697 case 256: /* fall through */
698 default:
699 panic(__func__);
700 }
701
702 return new;
703 }
704
705
706 vbits_t
sar_vbits(vbits_t v,unsigned shift_amount)707 sar_vbits(vbits_t v, unsigned shift_amount)
708 {
709 assert(shift_amount < v.num_bits);
710
711 vbits_t new = v;
712 int msb;
713
714 switch (v.num_bits) {
715 case 8:
716 new.bits.u8 >>= shift_amount;
717 msb = (v.bits.u8 & 0x80) != 0;
718 break;
719 case 16:
720 new.bits.u16 >>= shift_amount;
721 msb = (v.bits.u16 & 0x8000) != 0;
722 break;
723 case 32:
724 new.bits.u32 >>= shift_amount;
725 msb = (v.bits.u32 & (1u << 31)) != 0;
726 break;
727 case 64:
728 new.bits.u64 >>= shift_amount;
729 msb = (v.bits.u64 & (1ull << 63)) != 0;
730 break;
731 case 128: /* fall through */
732 case 256: /* fall through */
733 default:
734 panic(__func__);
735 }
736
737 if (msb)
738 new = left_vbits(new, new.num_bits);
739 return new;
740 }
741
742 /* Return a value for the POWER Iop_CmpORD class iops */
743 vbits_t
cmpord_vbits(unsigned v1_num_bits,unsigned v2_num_bits)744 cmpord_vbits(unsigned v1_num_bits, unsigned v2_num_bits)
745 {
746 vbits_t new = { .num_bits = v1_num_bits };
747
748 /* Size of values being compared must be the same */
749 assert( v1_num_bits == v2_num_bits);
750
751 /* Comparison only produces 32-bit or 64-bit value where
752 * the lower 3 bits are set to indicate, less than, equal and greater then.
753 */
754 switch (v1_num_bits) {
755 case 32:
756 new.bits.u32 = 0xE;
757 break;
758
759 case 64:
760 new.bits.u64 = 0xE;
761 break;
762
763 default:
764 panic(__func__);
765 }
766
767 return new;
768 }
769