1 /* -*- mode: C; c-basic-offset: 3; -*- */
2
3 /*
4 This file is part of MemCheck, a heavyweight Valgrind tool for
5 detecting memory errors.
6
7 Copyright (C) 2012-2015 Florian Krohm
8
9 This program is free software; you can redistribute it and/or
10 modify it under the terms of the GNU General Public License as
11 published by the Free Software Foundation; either version 2 of the
12 License, or (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
22 02111-1307, USA.
23
24 The GNU General Public License is contained in the file COPYING.
25 */
26
27 #include <stdio.h> // fprintf
28 #include <assert.h> // assert
29 #if defined(__APPLE__)
30 #include <machine/endian.h>
31 #define __BYTE_ORDER BYTE_ORDER
32 #define __LITTLE_ENDIAN LITTLE_ENDIAN
33 #elif defined(__sun)
34 #define __LITTLE_ENDIAN 1234
35 #define __BIG_ENDIAN 4321
36 # if defined(_LITTLE_ENDIAN)
37 # define __BYTE_ORDER __LITTLE_ENDIAN
38 # else
39 # define __BYTE_ORDER __BIG_ENDIAN
40 # endif
41 #else
42 #include <endian.h>
43 #endif
44 #include <inttypes.h>
45 #include "vbits.h"
46 #include "vtest.h"
47
48
49 /* Return the bits of V if they fit into 64-bit. If V has fewer than
50 64 bits, the bit pattern is zero-extended to the left. */
51 static uint64_t
get_bits64(vbits_t v)52 get_bits64(vbits_t v)
53 {
54 switch (v.num_bits) {
55 case 1: return v.bits.u32;
56 case 8: return v.bits.u8;
57 case 16: return v.bits.u16;
58 case 32: return v.bits.u32;
59 case 64: return v.bits.u64;
60 case 128:
61 case 256:
62 /* fall through */
63 default:
64 panic(__func__);
65 }
66 }
67
68 void
print_vbits(FILE * fp,vbits_t v)69 print_vbits(FILE *fp, vbits_t v)
70 {
71 switch (v.num_bits) {
72 case 1: fprintf(fp, "%08x", v.bits.u32); break;
73 case 8: fprintf(fp, "%02x", v.bits.u8); break;
74 case 16: fprintf(fp, "%04x", v.bits.u16); break;
75 case 32: fprintf(fp, "%08x", v.bits.u32); break;
76 case 64: fprintf(fp, "%016"PRIx64, v.bits.u64); break;
77 case 128:
78 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
79 fprintf(fp, "%016"PRIx64, v.bits.u128[1]);
80 fprintf(fp, "%016"PRIx64, v.bits.u128[0]);
81 } else {
82 fprintf(fp, "%016"PRIx64, v.bits.u128[0]);
83 fprintf(fp, "%016"PRIx64, v.bits.u128[1]);
84 }
85 break;
86 case 256:
87 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
88 fprintf(fp, "%016"PRIx64, v.bits.u256[3]);
89 fprintf(fp, "%016"PRIx64, v.bits.u256[2]);
90 fprintf(fp, "%016"PRIx64, v.bits.u256[1]);
91 fprintf(fp, "%016"PRIx64, v.bits.u256[0]);
92 } else {
93 fprintf(fp, "%016"PRIx64, v.bits.u256[0]);
94 fprintf(fp, "%016"PRIx64, v.bits.u256[1]);
95 fprintf(fp, "%016"PRIx64, v.bits.u256[2]);
96 fprintf(fp, "%016"PRIx64, v.bits.u256[3]);
97 }
98 break;
99 default:
100 panic(__func__);
101 }
102 }
103
104
105 /* Return a value where all bits are set to undefined. */
106 vbits_t
undefined_vbits(unsigned num_bits)107 undefined_vbits(unsigned num_bits)
108 {
109 vbits_t new = { .num_bits = num_bits };
110
111 switch (num_bits) {
112 case 1: new.bits.u32 = 0x01; break;
113 case 8: new.bits.u8 = 0xff; break;
114 case 16: new.bits.u16 = 0xffff; break;
115 case 32: new.bits.u32 = ~0; break;
116 case 64: new.bits.u64 = ~0ull; break;
117 case 128: new.bits.u128[0] = ~0ull;
118 new.bits.u128[1] = ~0ull;
119 break;
120 case 256: new.bits.u256[0] = ~0ull;
121 new.bits.u256[1] = ~0ull;
122 new.bits.u256[2] = ~0ull;
123 new.bits.u256[3] = ~0ull;
124 break;
125 default:
126 panic(__func__);
127 }
128 return new;
129 }
130
131
132 /* Return a value where all bits are set to defined. */
133 vbits_t
defined_vbits(unsigned num_bits)134 defined_vbits(unsigned num_bits)
135 {
136 vbits_t new = { .num_bits = num_bits };
137
138 switch (num_bits) {
139 case 1: new.bits.u32 = 0x0; break;
140 case 8: new.bits.u8 = 0x0; break;
141 case 16: new.bits.u16 = 0x0; break;
142 case 32: new.bits.u32 = 0x0; break;
143 case 64: new.bits.u64 = 0x0; break;
144 case 128: new.bits.u128[0] = 0x0;
145 new.bits.u128[1] = 0x0;
146 break;
147 case 256: new.bits.u256[0] = 0x0;
148 new.bits.u256[1] = 0x0;
149 new.bits.u256[2] = 0x0;
150 new.bits.u256[3] = 0x0;
151 break;
152 default:
153 panic(__func__);
154 }
155 return new;
156 }
157
158
159 /* Return 1, if equal. */
160 int
equal_vbits(vbits_t v1,vbits_t v2)161 equal_vbits(vbits_t v1, vbits_t v2)
162 {
163 assert(v1.num_bits == v2.num_bits);
164
165 switch (v1.num_bits) {
166 case 1: return v1.bits.u32 == v2.bits.u32;
167 case 8: return v1.bits.u8 == v2.bits.u8;
168 case 16: return v1.bits.u16 == v2.bits.u16;
169 case 32: return v1.bits.u32 == v2.bits.u32;
170 case 64: return v1.bits.u64 == v2.bits.u64;
171 case 128: return v1.bits.u128[0] == v2.bits.u128[0] &&
172 v1.bits.u128[1] == v2.bits.u128[1];
173 case 256: return v1.bits.u256[0] == v2.bits.u256[0] &&
174 v1.bits.u256[1] == v2.bits.u256[1] &&
175 v1.bits.u256[2] == v2.bits.u256[2] &&
176 v1.bits.u256[3] == v2.bits.u256[3];
177 default:
178 panic(__func__);
179 }
180 }
181
182
183 /* Truncate the bit pattern in V1 to NUM_BITS bits */
184 vbits_t
truncate_vbits(vbits_t v,unsigned num_bits)185 truncate_vbits(vbits_t v, unsigned num_bits)
186 {
187 assert(num_bits <= v.num_bits);
188
189 if (num_bits == v.num_bits) return v;
190
191 vbits_t new = { .num_bits = num_bits };
192
193 if (num_bits <= 64) {
194 uint64_t bits;
195
196 if (v.num_bits <= 64)
197 bits = get_bits64(v);
198 else if (v.num_bits == 128)
199 if (__BYTE_ORDER == __LITTLE_ENDIAN)
200 bits = v.bits.u128[0];
201 else
202 bits = v.bits.u128[1];
203 else if (v.num_bits == 256)
204 if (__BYTE_ORDER == __LITTLE_ENDIAN)
205 bits = v.bits.u256[0];
206 else
207 bits = v.bits.u256[3];
208 else
209 panic(__func__);
210
211 switch (num_bits) {
212 case 1: new.bits.u32 = bits & 0x01; break;
213 case 8: new.bits.u8 = bits & 0xff; break;
214 case 16: new.bits.u16 = bits & 0xffff; break;
215 case 32: new.bits.u32 = bits & ~0u; break;
216 case 64: new.bits.u64 = bits & ~0ll; break;
217 default:
218 panic(__func__);
219 }
220 return new;
221 }
222
223 if (num_bits == 128) {
224 assert(v.num_bits == 256);
225 /* From 256 bits to 128 */
226 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
227 new.bits.u128[0] = v.bits.u256[0];
228 new.bits.u128[1] = v.bits.u256[1];
229 } else {
230 new.bits.u128[0] = v.bits.u256[2];
231 new.bits.u128[1] = v.bits.u256[3];
232 }
233 return new;
234 }
235
236 /* Cannot truncate to 256 bits from something larger */
237 panic(__func__);
238 }
239
240
241 /* Helper function to compute left_vbits */
242 static uint64_t
left64(uint64_t x)243 left64(uint64_t x)
244 {
245 // left(x) = x | -x
246 return x | (~x + 1);
247 }
248
249
250 vbits_t
left_vbits(vbits_t v,unsigned num_bits)251 left_vbits(vbits_t v, unsigned num_bits)
252 {
253 assert(num_bits >= v.num_bits);
254
255 vbits_t new = { .num_bits = num_bits };
256
257 if (v.num_bits <= 64) {
258 uint64_t bits = left64(get_bits64(v));
259
260 switch (num_bits) {
261 case 8: new.bits.u8 = bits & 0xff; break;
262 case 16: new.bits.u16 = bits & 0xffff; break;
263 case 32: new.bits.u32 = bits & ~0u; break;
264 case 64: new.bits.u64 = bits & ~0ll; break;
265 case 128:
266 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
267 new.bits.u128[0] = bits;
268 if (bits & (1ull << 63)) { // MSB is set
269 new.bits.u128[1] = ~0ull;
270 } else {
271 new.bits.u128[1] = 0;
272 }
273 } else {
274 new.bits.u128[1] = bits;
275 if (bits & (1ull << 63)) { // MSB is set
276 new.bits.u128[0] = ~0ull;
277 } else {
278 new.bits.u128[0] = 0;
279 }
280 }
281 break;
282 case 256:
283 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
284 new.bits.u256[0] = bits;
285 if (bits & (1ull << 63)) { // MSB is set
286 new.bits.u256[1] = ~0ull;
287 new.bits.u256[2] = ~0ull;
288 new.bits.u256[3] = ~0ull;
289 } else {
290 new.bits.u256[1] = 0;
291 new.bits.u256[2] = 0;
292 new.bits.u256[3] = 0;
293 }
294 } else {
295 new.bits.u256[3] = bits;
296 if (bits & (1ull << 63)) { // MSB is set
297 new.bits.u256[0] = ~0ull;
298 new.bits.u256[1] = ~0ull;
299 new.bits.u256[2] = ~0ull;
300 } else {
301 new.bits.u256[0] = 0;
302 new.bits.u256[1] = 0;
303 new.bits.u256[2] = 0;
304 }
305 }
306 break;
307 default:
308 panic(__func__);
309 }
310 return new;
311 }
312
313 if (v.num_bits == 128) {
314 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
315 if (v.bits.u128[1] != 0) {
316 new.bits.u128[0] = v.bits.u128[0];
317 new.bits.u128[1] = left64(v.bits.u128[1]);
318 } else {
319 new.bits.u128[0] = left64(v.bits.u128[0]);
320 if (new.bits.u128[0] & (1ull << 63)) { // MSB is set
321 new.bits.u128[1] = ~0ull;
322 } else {
323 new.bits.u128[1] = 0;
324 }
325 }
326 } else {
327 if (v.bits.u128[0] != 0) {
328 new.bits.u128[0] = left64(v.bits.u128[0]);
329 new.bits.u128[1] = v.bits.u128[1];
330 } else {
331 new.bits.u128[1] = left64(v.bits.u128[1]);
332 if (new.bits.u128[1] & (1ull << 63)) { // MSB is set
333 new.bits.u128[0] = ~0ull;
334 } else {
335 new.bits.u128[0] = 0;
336 }
337 }
338 }
339 if (num_bits == 128) return new;
340
341 assert(num_bits == 256);
342
343 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
344 uint64_t b1 = new.bits.u128[1];
345 uint64_t b0 = new.bits.u128[0];
346
347 new.bits.u256[0] = b0;
348 new.bits.u256[1] = b1;
349
350 if (new.bits.u256[1] & (1ull << 63)) { // MSB is set
351 new.bits.u256[2] = ~0ull;
352 new.bits.u256[3] = ~0ull;
353 } else {
354 new.bits.u256[2] = 0;
355 new.bits.u256[3] = 0;
356 }
357 } else {
358 uint64_t b1 = new.bits.u128[0];
359 uint64_t b0 = new.bits.u128[1];
360
361 new.bits.u256[2] = b0;
362 new.bits.u256[3] = b1;
363
364 if (new.bits.u256[2] & (1ull << 63)) { // MSB is set
365 new.bits.u256[0] = ~0ull;
366 new.bits.u256[1] = ~0ull;
367 } else {
368 new.bits.u256[0] = 0;
369 new.bits.u256[1] = 0;
370 }
371 }
372 return new;
373 }
374
375 panic(__func__);
376 }
377
378
379 vbits_t
or_vbits(vbits_t v1,vbits_t v2)380 or_vbits(vbits_t v1, vbits_t v2)
381 {
382 assert(v1.num_bits == v2.num_bits);
383
384 vbits_t new = { .num_bits = v1.num_bits };
385
386 switch (v1.num_bits) {
387 case 8: new.bits.u8 = v1.bits.u8 | v2.bits.u8; break;
388 case 16: new.bits.u16 = v1.bits.u16 | v2.bits.u16; break;
389 case 32: new.bits.u32 = v1.bits.u32 | v2.bits.u32; break;
390 case 64: new.bits.u64 = v1.bits.u64 | v2.bits.u64; break;
391 case 128: new.bits.u128[0] = v1.bits.u128[0] | v2.bits.u128[0];
392 new.bits.u128[1] = v1.bits.u128[1] | v2.bits.u128[1];
393 break;
394 case 256: new.bits.u256[0] = v1.bits.u256[0] | v2.bits.u256[0];
395 new.bits.u256[1] = v1.bits.u256[1] | v2.bits.u256[1];
396 new.bits.u256[2] = v1.bits.u256[2] | v2.bits.u256[2];
397 new.bits.u256[3] = v1.bits.u256[3] | v2.bits.u256[3];
398 break;
399 default:
400 panic(__func__);
401 }
402
403 return new;
404 }
405
406
407 vbits_t
and_vbits(vbits_t v1,vbits_t v2)408 and_vbits(vbits_t v1, vbits_t v2)
409 {
410 assert(v1.num_bits == v2.num_bits);
411
412 vbits_t new = { .num_bits = v1.num_bits };
413
414 switch (v1.num_bits) {
415 case 8: new.bits.u8 = v1.bits.u8 & v2.bits.u8; break;
416 case 16: new.bits.u16 = v1.bits.u16 & v2.bits.u16; break;
417 case 32: new.bits.u32 = v1.bits.u32 & v2.bits.u32; break;
418 case 64: new.bits.u64 = v1.bits.u64 & v2.bits.u64; break;
419 case 128: new.bits.u128[0] = v1.bits.u128[0] & v2.bits.u128[0];
420 new.bits.u128[1] = v1.bits.u128[1] & v2.bits.u128[1];
421 break;
422 case 256: new.bits.u256[0] = v1.bits.u256[0] & v2.bits.u256[0];
423 new.bits.u256[1] = v1.bits.u256[1] & v2.bits.u256[1];
424 new.bits.u256[2] = v1.bits.u256[2] & v2.bits.u256[2];
425 new.bits.u256[3] = v1.bits.u256[3] & v2.bits.u256[3];
426 break;
427 default:
428 panic(__func__);
429 }
430
431 return new;
432 }
433
434
435 vbits_t
concat_vbits(vbits_t v1,vbits_t v2)436 concat_vbits(vbits_t v1, vbits_t v2)
437 {
438 assert(v1.num_bits == v2.num_bits);
439
440 vbits_t new = { .num_bits = v1.num_bits * 2 };
441
442 switch (v1.num_bits) {
443 case 8: new.bits.u16 = v1.bits.u8;
444 new.bits.u16 = (new.bits.u16 << 8) | v2.bits.u8; break;
445 case 16: new.bits.u32 = v1.bits.u16;
446 new.bits.u32 = (new.bits.u32 << 16) | v2.bits.u16; break;
447 case 32: new.bits.u64 = v1.bits.u32;
448 new.bits.u64 = (new.bits.u64 << 32) | v2.bits.u32; break;
449 case 64:
450 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
451 new.bits.u128[0] = v2.bits.u64;
452 new.bits.u128[1] = v1.bits.u64;
453 } else {
454 new.bits.u128[0] = v1.bits.u64;
455 new.bits.u128[1] = v2.bits.u64;
456 }
457 break;
458 case 128:
459 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
460 new.bits.u256[0] = v2.bits.u128[0];
461 new.bits.u256[1] = v2.bits.u128[1];
462 new.bits.u256[2] = v1.bits.u128[0];
463 new.bits.u256[3] = v1.bits.u128[1];
464 } else {
465 new.bits.u256[0] = v1.bits.u128[0];
466 new.bits.u256[1] = v1.bits.u128[1];
467 new.bits.u256[2] = v2.bits.u128[0];
468 new.bits.u256[3] = v2.bits.u128[1];
469 }
470 break;
471 case 256: /* Fall through */
472 default:
473 panic(__func__);
474 }
475
476 return new;
477 }
478
479
480 vbits_t
upper_vbits(vbits_t v)481 upper_vbits(vbits_t v)
482 {
483 vbits_t new = { .num_bits = v.num_bits / 2 };
484
485 switch (v.num_bits) {
486 case 16: new.bits.u8 = v.bits.u16 >> 8; break;
487 case 32: new.bits.u16 = v.bits.u32 >> 16; break;
488 case 64: new.bits.u32 = v.bits.u64 >> 32; break;
489 case 128:
490 if (__BYTE_ORDER == __LITTLE_ENDIAN)
491 new.bits.u64 = v.bits.u128[1];
492 else
493 new.bits.u64 = v.bits.u128[0];
494 break;
495 case 256:
496 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
497 new.bits.u128[0] = v.bits.u256[2];
498 new.bits.u128[1] = v.bits.u256[3];
499 } else {
500 new.bits.u128[0] = v.bits.u256[0];
501 new.bits.u128[1] = v.bits.u256[1];
502 }
503 break;
504 case 8:
505 default:
506 panic(__func__);
507 }
508
509 return new;
510 }
511
512
513 vbits_t
zextend_vbits(vbits_t v,unsigned num_bits)514 zextend_vbits(vbits_t v, unsigned num_bits)
515 {
516 assert(num_bits >= v.num_bits);
517
518 if (num_bits == v.num_bits) return v;
519
520 vbits_t new = { .num_bits = num_bits };
521
522 if (v.num_bits <= 64) {
523 uint64_t bits = get_bits64(v);
524
525 switch (num_bits) {
526 case 8: new.bits.u8 = bits; break;
527 case 16: new.bits.u16 = bits; break;
528 case 32: new.bits.u32 = bits; break;
529 case 64: new.bits.u64 = bits; break;
530 case 128:
531 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
532 new.bits.u128[0] = bits;
533 new.bits.u128[1] = 0;
534 } else {
535 new.bits.u128[0] = 0;
536 new.bits.u128[1] = bits;
537 }
538 break;
539 case 256:
540 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
541 new.bits.u256[0] = bits;
542 new.bits.u256[1] = 0;
543 new.bits.u256[2] = 0;
544 new.bits.u256[3] = 0;
545 } else {
546 new.bits.u256[0] = 0;
547 new.bits.u256[1] = 0;
548 new.bits.u256[2] = 0;
549 new.bits.u256[3] = bits;
550 }
551 break;
552 default:
553 panic(__func__);
554 }
555 return new;
556 }
557
558 if (v.num_bits == 128) {
559 assert(num_bits == 256);
560
561 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
562 new.bits.u256[0] = v.bits.u128[0];
563 new.bits.u256[1] = v.bits.u128[1];
564 new.bits.u256[2] = 0;
565 new.bits.u256[3] = 0;
566 } else {
567 new.bits.u256[0] = 0;
568 new.bits.u256[1] = 0;
569 new.bits.u256[2] = v.bits.u128[1];
570 new.bits.u256[3] = v.bits.u128[0];
571 }
572 return new;
573 }
574
575 /* Cannot zero-extend a 256-bit value to something larger */
576 panic(__func__);
577 }
578
579
580 vbits_t
sextend_vbits(vbits_t v,unsigned num_bits)581 sextend_vbits(vbits_t v, unsigned num_bits)
582 {
583 assert(num_bits >= v.num_bits);
584
585 int sextend = 0;
586
587 switch (v.num_bits) {
588 case 8: if (v.bits.u8 == 0x80) sextend = 1; break;
589 case 16: if (v.bits.u16 == 0x8000) sextend = 1; break;
590 case 32: if (v.bits.u32 == 0x80000000) sextend = 1; break;
591 case 64: if (v.bits.u64 == (1ull << 63)) sextend = 1; break;
592 case 128: if (v.bits.u128[1] == (1ull << 63)) sextend = 1; break;
593 case 256: if (v.bits.u256[3] == (1ull << 63)) sextend = 1; break;
594
595 default:
596 panic(__func__);
597 }
598
599 return sextend ? left_vbits(v, num_bits) : zextend_vbits(v, num_bits);
600 }
601
602
603 vbits_t
onehot_vbits(unsigned bitno,unsigned num_bits)604 onehot_vbits(unsigned bitno, unsigned num_bits)
605 {
606 assert(bitno < num_bits);
607
608 vbits_t new = { .num_bits = num_bits };
609
610 switch (num_bits) {
611 case 1: new.bits.u32 = 1 << bitno; break;
612 case 8: new.bits.u8 = 1 << bitno; break;
613 case 16: new.bits.u16 = 1 << bitno; break;
614 case 32: new.bits.u32 = 1u << bitno; break;
615 case 64: new.bits.u64 = 1ull << bitno; break;
616 case 128:
617 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
618 if (bitno < 64) {
619 new.bits.u128[0] = 1ull << bitno;
620 new.bits.u128[1] = 0;
621 } else {
622 new.bits.u128[0] = 0;
623 new.bits.u128[1] = 1ull << (bitno - 64);
624 }
625 } else {
626 if (bitno < 64) {
627 new.bits.u128[0] = 0;
628 new.bits.u128[1] = 1ull << bitno;
629 } else {
630 new.bits.u128[0] = 1ull << (bitno - 64);
631 new.bits.u128[1] = 0;
632 }
633 }
634 break;
635 case 256:
636 if (__BYTE_ORDER == __LITTLE_ENDIAN) {
637 if (bitno < 64) {
638 new.bits.u256[0] = 1ull << bitno;
639 new.bits.u256[1] = 0;
640 new.bits.u256[2] = 0;
641 new.bits.u256[3] = 0;
642 } else if (bitno < 128) {
643 new.bits.u256[0] = 0;
644 new.bits.u256[1] = 1ull << (bitno - 64);
645 new.bits.u256[2] = 0;
646 new.bits.u256[3] = 0;
647 } else if (bitno < 192) {
648 new.bits.u256[0] = 0;
649 new.bits.u256[1] = 0;
650 new.bits.u256[2] = 1ull << (bitno - 128);
651 new.bits.u256[3] = 0;
652 } else {
653 new.bits.u256[0] = 0;
654 new.bits.u256[1] = 0;
655 new.bits.u256[2] = 0;
656 new.bits.u256[3] = 1ull << (bitno - 192);
657 }
658 } else {
659 if (bitno < 64) {
660 new.bits.u256[0] = 0;
661 new.bits.u256[1] = 0;
662 new.bits.u256[2] = 0;
663 new.bits.u256[3] = 1ull << bitno;
664 } else if (bitno < 128) {
665 new.bits.u256[0] = 0;
666 new.bits.u256[1] = 0;
667 new.bits.u256[2] = 1ull << (bitno - 64);
668 new.bits.u256[3] = 0;
669 } else if (bitno < 192) {
670 new.bits.u256[0] = 0;
671 new.bits.u256[1] = 1ull << (bitno - 128);
672 new.bits.u256[2] = 0;
673 new.bits.u256[3] = 0;
674 } else {
675 new.bits.u256[0] = 1ull << (bitno - 192);
676 new.bits.u256[1] = 0;
677 new.bits.u256[2] = 0;
678 new.bits.u256[3] = 0;
679 }
680 }
681 break;
682 default:
683 panic(__func__);
684 }
685 return new;
686 }
687
688
689 int
completely_defined_vbits(vbits_t v)690 completely_defined_vbits(vbits_t v)
691 {
692 return equal_vbits(v, defined_vbits(v.num_bits));
693 }
694
695
696 vbits_t
shl_vbits(vbits_t v,unsigned shift_amount)697 shl_vbits(vbits_t v, unsigned shift_amount)
698 {
699 assert(shift_amount < v.num_bits);
700
701 vbits_t new = v;
702
703 switch (v.num_bits) {
704 case 8: new.bits.u8 <<= shift_amount; break;
705 case 16: new.bits.u16 <<= shift_amount; break;
706 case 32: new.bits.u32 <<= shift_amount; break;
707 case 64: new.bits.u64 <<= shift_amount; break;
708 case 128: /* fall through */
709 case 256: /* fall through */
710 default:
711 panic(__func__);
712 }
713
714 return new;
715 }
716
717
718 vbits_t
shr_vbits(vbits_t v,unsigned shift_amount)719 shr_vbits(vbits_t v, unsigned shift_amount)
720 {
721 assert(shift_amount < v.num_bits);
722
723 vbits_t new = v;
724
725 switch (v.num_bits) {
726 case 8: new.bits.u8 >>= shift_amount; break;
727 case 16: new.bits.u16 >>= shift_amount; break;
728 case 32: new.bits.u32 >>= shift_amount; break;
729 case 64: new.bits.u64 >>= shift_amount; break;
730 case 128: /* fall through */
731 case 256: /* fall through */
732 default:
733 panic(__func__);
734 }
735
736 return new;
737 }
738
739
740 vbits_t
sar_vbits(vbits_t v,unsigned shift_amount)741 sar_vbits(vbits_t v, unsigned shift_amount)
742 {
743 assert(shift_amount < v.num_bits);
744
745 vbits_t new = v;
746 int msb;
747
748 switch (v.num_bits) {
749 case 8:
750 new.bits.u8 >>= shift_amount;
751 msb = (v.bits.u8 & 0x80) != 0;
752 break;
753 case 16:
754 new.bits.u16 >>= shift_amount;
755 msb = (v.bits.u16 & 0x8000) != 0;
756 break;
757 case 32:
758 new.bits.u32 >>= shift_amount;
759 msb = (v.bits.u32 & (1u << 31)) != 0;
760 break;
761 case 64:
762 new.bits.u64 >>= shift_amount;
763 msb = (v.bits.u64 & (1ull << 63)) != 0;
764 break;
765 case 128: /* fall through */
766 case 256: /* fall through */
767 default:
768 panic(__func__);
769 }
770
771 if (msb)
772 new = left_vbits(new, new.num_bits);
773 return new;
774 }
775
776 /* Return a value for the POWER Iop_CmpORD class iops */
777 vbits_t
cmpord_vbits(unsigned v1_num_bits,unsigned v2_num_bits)778 cmpord_vbits(unsigned v1_num_bits, unsigned v2_num_bits)
779 {
780 vbits_t new = { .num_bits = v1_num_bits };
781
782 /* Size of values being compared must be the same */
783 assert( v1_num_bits == v2_num_bits);
784
785 /* Comparison only produces 32-bit or 64-bit value where
786 * the lower 3 bits are set to indicate, less than, equal and greater then.
787 */
788 switch (v1_num_bits) {
789 case 32:
790 new.bits.u32 = 0xE;
791 break;
792
793 case 64:
794 new.bits.u64 = 0xE;
795 break;
796
797 default:
798 panic(__func__);
799 }
800
801 return new;
802 }
803