/external/gemmlowp/meta/generators/ |
D | zip_Nx8_neon.py | 21 def __init__(self, input_address, load, aggregator): argument 22 self.input_address = input_address 27 def GenerateZipLanes(emitter, registers, zip_lanes, input_address, stride): argument 41 last_address_register = input_address 44 lanes.append(ZipLane(input_address, registers.DoubleRegister(), 77 emitter.DereferenceIncrement(lane.input_address, alignment)) 102 emitter.Dereference(lane.input_address, None)) 107 emitter.Dereference(lane.input_address, None)) 112 emitter.DereferenceIncrement(lane.input_address, None)) 116 emitter.Dereference(lane.input_address, None)) [all …]
|
D | transform_kernels_common.py | 57 def Transform(self, emitter, registers, input_address, elements, argument 64 emitter.EmitVLoadAE(8, elements, load, input_address, None) 65 emitter.EmitPldOffset(input_address, emitter.ImmediateConstant(16)) 103 def Transform(self, emitter, registers, input_address, elements, argument 110 emitter.EmitVLoadAE(8, elements, load, input_address, None) 111 emitter.EmitPldOffset(input_address, emitter.ImmediateConstant(32)) 172 def Transform(self, emitter, registers, input_address, elements, argument 179 emitter.EmitVLoadAE(32, elements, load, input_address, None) 180 emitter.EmitPldOffset(input_address, emitter.ImmediateConstant(64)) 253 def Transform(self, emitter, registers, input_address, elements, argument [all …]
|
D | mul_Nx8_Mx8_neon.py | 20 def __init__(self, input_address): argument 21 self.input_address = input_address 74 emitter.DereferenceIncrement(left_lanes.input_address, 64)) 77 emitter.DereferenceIncrement(right_lanes.input_address, 64)) 79 emitter.EmitPldOffset(left_lanes.input_address, emitter.ImmediateConstant(64)) 80 emitter.EmitPldOffset(right_lanes.input_address, 120 emitter.DereferenceIncrement(left_lanes.input_address, 64)) 123 emitter.DereferenceIncrement(right_lanes.input_address, 64)) 125 emitter.EmitPldOffset(left_lanes.input_address, emitter.ImmediateConstant(64)) 126 emitter.EmitPldOffset(right_lanes.input_address, [all …]
|
D | streams_common.py | 37 def _GenerateInputs(emitter, registers, lanes_count, input_address, stride): argument 40 last_address_register = input_address 43 inputs.append(input_address) 69 for (row, input_address) in zip(block, inputs): 70 emitter.EmitVLoadE(8, elements_count, row, input_address, None) 206 elements_count, aggregators, input_address, argument 219 block, input_address, stride) 250 input_address = registers.MapOutputParameter('in') 272 aggregators, input_address, stride, 282 leftovers, aggregators, input_address,
|
D | neon_emitter.py | 717 input_address, stride): argument 722 input_deref = self.Dereference(input_address, None) 723 input_deref_increment = self.DereferenceIncrement(input_address, None) 729 self.EmitPld(input_address) 735 self.EmitPld(input_address) 746 self.EmitPld(input_address) 758 self.EmitPld(input_address) 771 self.EmitPld(input_address) 785 self.EmitPld(input_address) 793 self.EmitPld(input_address)
|
D | neon_emitter_64.py | 1134 input_address, stride): argument 1138 input_deref = self.Dereference(input_address, None) 1139 input_deref_increment = self.DereferenceIncrement(input_address, None) 1145 self.EmitPld(input_address) 1153 self.EmitPld(input_address) 1161 self.EmitPld(input_address) 1169 self.EmitPld(input_address) 1184 self.EmitPld(input_address) 1200 self.EmitPld(input_address) 1218 self.EmitPld(input_address) [all …]
|
D | quantized_mul_kernels_common.py | 19 def _ReadParams(emitter, registers, input_address, elements, min_register): argument 25 emitter.EmitVLoadAE(registers_count * 4, 32, registers, input_address, 64)
|
/external/tensorflow/tensorflow/core/protobuf/ |
D | conv_autotuning.proto | 28 int64 input_address = 9; field
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | ir_emitter_unnested.cc | 2701 llvm::Value* input_address = llvm_ir::EmitAllocaAtFunctionEntry( in EmitScatter() local 2707 Store(input_ir_value, input_address); in EmitScatter() 2711 *desc.update_computation, output_address, input_address); in EmitScatter() 2714 {output_address, input_address}, in EmitScatter() 4646 llvm::AllocaInst* input_address = in EmitTileElementForReduction() local 4654 Store(input_ir_value, input_address); in EmitTileElementForReduction() 4658 *reducers[i], {partial_result_address, input_address}, in EmitTileElementForReduction()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | ir_emitter.cc | 1578 llvm::Value* input_address = BitCast( in EmitInnerLoopForVectorizedReduction() local 1583 BitCast(input_address, accumulator[i]->getType()); in EmitInnerLoopForVectorizedReduction() 1594 input_address = ConstInBoundsGEP1_32(reduced_result->getType(), in EmitInnerLoopForVectorizedReduction()
|