/external/chromium-trace/catapult/third_party/pyserial/serial/ |
D | sermsdos.py | 84 bytesize = EIGHTBITS, argument 101 self.bytesize = str(bytesize) 119 self.bytesize, self.stop, self.retry, self.filename) 137 self.parity, self.bytesize, self.stop, self.retry, 195 , self.baud, self.parity, self.bytesize, self.stop,
|
D | serialutil.py | 241 bytesize=EIGHTBITS, # number of data bits argument 271 self.bytesize = bytesize 354 def setByteSize(self, bytesize): argument 356 … if bytesize not in self.BYTESIZES: raise ValueError("Not a valid byte size: %r" % (bytesize,)) 357 self._bytesize = bytesize 364 bytesize = property(getByteSize, setByteSize, doc="Byte size setting") variable in SerialBase 514 self.bytesize,
|
D | rfc2217.py | 1132 backup = self.serial.bytesize 1136 self.serial.bytesize = datasize 1140 self.serial.bytesize = backup 1143 … self.logger.info("%s data size: %s" % (datasize and 'set' or 'get', self.serial.bytesize)) 1144 … self.rfc2217SendSubnegotiation(SERVER_SET_DATASIZE, struct.pack("!B", self.serial.bytesize))
|
D | serialjava.py | 246 bytesize=EIGHTBITS, # number of databits
|
/external/llvm-project/lldb/utils/lui/ |
D | lldbutil.py | 66 def int_to_bytearray(val, bytesize): argument 74 if bytesize == 1: 79 if bytesize == 2: 81 elif bytesize == 4: 83 elif bytesize == 4: 92 def bytearray_to_int(bytes, bytesize): argument 100 if bytesize == 1: 105 if bytesize == 2: 107 elif bytesize == 4: 109 elif bytesize == 4:
|
/external/llvm-project/lldb/packages/Python/lldbsuite/test/ |
D | lldbutil.py | 113 def int_to_bytearray(val, bytesize): argument 121 if bytesize == 1: 126 if bytesize == 2: 128 elif bytesize == 4: 130 elif bytesize == 4: 139 def bytearray_to_int(bytes, bytesize): argument 147 if bytesize == 1: 152 if bytesize == 2: 154 elif bytesize == 4: 156 elif bytesize == 4:
|
/external/chromium-trace/catapult/third_party/polymer/components/promise-polyfill/ |
D | Gruntfile.js | 37 bytesize: { property
|
/external/python/cpython2/Lib/test/ |
D | test_struct.py | 199 self.bytesize = struct.calcsize(format) 200 self.bitsize = self.bytesize * 8 227 expected = ("\x00" * (self.bytesize - len(expected)) + 232 self.assertEqual(len(expected), self.bytesize) 261 for j in range(self.bytesize):
|
/external/tensorflow/tensorflow/python/framework/ |
D | meta_graph.py | 992 bytesize = 0 1011 bytesize += value.node_def.ByteSize() 1012 if bytesize >= (1 << 31) or bytesize < 0: 1015 graph._copy_functions_to_graph_def(graph_def, bytesize) # pylint: disable=protected-access
|
/external/chromium-trace/catapult/third_party/pyserial/serial/tools/ |
D | miniterm.py | 218 self.serial.bytesize, 408 self.serial.bytesize = serial.EIGHTBITS 411 self.serial.bytesize = serial.SEVENBITS 659 miniterm.serial.bytesize,
|
/external/flatbuffers/src/ |
D | reflection.cpp | 448 element_size = elemobjectdef->bytesize(); in CopyTable() 475 subobjectdef.bytesize()); in CopyTable() 509 offset, obj.bytesize()); in VerifyStruct() 519 return !p || v.VerifyVectorOrString(p, obj.bytesize()); in VerifyVectorOfStructs() 538 return v.VerifyFromPointer(elem, elem_obj->bytesize()); in VerifyUnion()
|
D | idl_parser.cpp | 659 field.value.offset = static_cast<voffset_t>(struct_def.bytesize); in AddField() 660 struct_def.bytesize += size; in AddField() 1025 FLATBUFFERS_ASSERT(val.constant.length() == struct_def.bytesize); in SerializeStruct() 1028 struct_def.bytesize); in SerializeStruct() 1221 struct_def.bytesize); in ParseTable() 1222 builder_.PopBytes(struct_def.bytesize); in ParseTable() 1355 v->Data(), v->Data() + v->size() * type.struct_def->bytesize, in ParseVector() 1356 type.struct_def->bytesize, in ParseVector() 1362 for (size_t i = 0; i < type.struct_def->bytesize; i++) { in ParseVector() 2277 if (!struct_def->bytesize) return Error("size 0 structs not allowed"); in ParseDecl() [all …]
|
D | idl_gen_lobster.cpp | 316 NumToString(struct_def.bytesize) + ")\n"; in StructBuilderBody()
|
/external/python/cpython3/Lib/test/ |
D | test_struct.py | 182 self.bytesize = struct.calcsize(format) 183 self.bitsize = self.bytesize * 8 211 expected = (b"\x00" * (self.bytesize - len(expected)) + 216 self.assertEqual(len(expected), self.bytesize) 245 for j in range(self.bytesize):
|
/external/flatbuffers/include/flatbuffers/ |
D | idl.h | 328 bytesize(0) {} in StructDef() 331 auto padding = PaddingBytes(bytesize, min_align); in PadLastField() 332 bytesize += padding; in PadLastField() 348 size_t bytesize; // Size if fixed. member 462 ? type.struct_def->bytesize in InlineSize()
|
D | reflection_generated.h | 718 int32_t bytesize() const { in bytesize() function 763 void add_bytesize(int32_t bytesize) { in add_bytesize() 764 fbb_.AddElement<int32_t>(Object::VT_BYTESIZE, bytesize, 0); in add_bytesize() 792 int32_t bytesize = 0, 798 builder_.add_bytesize(bytesize); 812 int32_t bytesize = 0, 825 bytesize,
|
/external/flatbuffers/reflection/ |
D | reflection.fbs | 81 bytesize:int; // For structs.
|
/external/flatbuffers/lobster/ |
D | flatbuffers.lobster | 90 // First, store the object bytesize: 92 // Second, store the vtable bytesize:
|
/external/python/cpython3/Lib/multiprocessing/ |
D | connection.py | 236 bytesize = itemsize * len(m) 239 elif offset > bytesize: 243 if bytesize < offset + size:
|
/external/tensorflow/tensorflow/stream_executor/cuda/ |
D | cuda_9_0.inc | 381 CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize) { 385 return func_ptr(dptr, bytesize); 413 CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize) { 417 return func_ptr(pp, bytesize); 427 CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, 432 return func_ptr(pp, bytesize, Flags); 450 CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, 455 return func_ptr(dptr, bytesize, flags); 510 CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, 515 return func_ptr(p, bytesize, Flags);
|
D | cuda_11_2.inc | 447 CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize) { 451 return func_ptr(dptr, bytesize); 479 CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize) { 483 return func_ptr(pp, bytesize); 493 CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, 498 return func_ptr(pp, bytesize, Flags); 516 CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, 521 return func_ptr(dptr, bytesize, flags); 576 CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, 581 return func_ptr(p, bytesize, Flags); [all …]
|
D | cuda_10_1.inc | 391 CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize) { 395 return func_ptr(dptr, bytesize); 423 CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize) { 427 return func_ptr(pp, bytesize); 437 CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, 442 return func_ptr(pp, bytesize, Flags); 460 CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, 465 return func_ptr(dptr, bytesize, flags); 520 CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, 525 return func_ptr(p, bytesize, Flags);
|
D | cuda_10_0.inc | 391 CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize) { 395 return func_ptr(dptr, bytesize); 423 CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize) { 427 return func_ptr(pp, bytesize); 437 CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, 442 return func_ptr(pp, bytesize, Flags); 460 CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, 465 return func_ptr(dptr, bytesize, flags); 520 CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, 525 return func_ptr(p, bytesize, Flags);
|
D | cuda_10_2.inc | 399 CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize) { 403 return func_ptr(dptr, bytesize); 431 CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize) { 435 return func_ptr(pp, bytesize); 445 CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, 450 return func_ptr(pp, bytesize, Flags); 468 CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, 473 return func_ptr(dptr, bytesize, flags); 528 CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, 533 return func_ptr(p, bytesize, Flags);
|
D | cuda_11_0.inc | 406 CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize) { 410 return func_ptr(dptr, bytesize); 438 CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize) { 442 return func_ptr(pp, bytesize); 452 CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, 457 return func_ptr(pp, bytesize, Flags); 475 CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, 480 return func_ptr(dptr, bytesize, flags); 535 CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, 540 return func_ptr(p, bytesize, Flags);
|