/external/vulkan-validation-layers/libs/glm/detail/ |
D | glm.cpp | 83 template struct tvec2<float32, lowp>; 94 template struct tvec2<float32, mediump>; 105 template struct tvec2<float32, highp>; 117 template struct tvec3<float32, lowp>; 128 template struct tvec3<float32, mediump>; 139 template struct tvec3<float32, highp>; 151 template struct tvec4<float32, lowp>; 162 template struct tvec4<float32, mediump>; 173 template struct tvec4<float32, highp>; 177 template struct tmat2x2<float32, lowp>; [all …]
|
D | type_float.hpp | 37 typedef float float32; typedef 81 typedef float float32; typedef 87 GLM_STATIC_ASSERT(sizeof(glm::float32) == 4, "float32 size isn't 4 bytes on this platform");
|
/external/deqp/external/vulkancts/framework/vulkan/ |
D | vkTypeUtil.hpp | 37 v.color.float32[0] = r; in makeClearValueColorF32() 38 v.color.float32[1] = g; in makeClearValueColorF32() 39 v.color.float32[2] = b; in makeClearValueColorF32() 40 v.color.float32[3] = a; in makeClearValueColorF32() 57 v.color.float32[0] = color[0]; in makeClearValueColor() 58 v.color.float32[1] = color[1]; in makeClearValueColor() 59 v.color.float32[2] = color[2]; in makeClearValueColor() 60 v.color.float32[3] = color[3]; in makeClearValueColor()
|
/external/opencv3/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/Data/ |
D | box.ply | 6 property float32 x 7 property float32 y 8 property float32 z
|
/external/opencv3/samples/python2/ |
D | deconvolution.py | 46 w = np.minimum(np.float32(dist)/d, 1.0) 50 kern = np.ones((1, d), np.float32) 52 A = np.float32([[c, -s, 0], [s, c, 0]]) 61 kern = np.float32(kern) / 255.0 82 img = np.float32(img)/255.0
|
D | digits_video.py | 71 c1 = np.float32([m['m10'], m['m01']]) / m['m00'] 72 c0 = np.float32([SZ/2, SZ/2]) 74 A = np.zeros((2, 3), np.float32)
|
D | letter_recog.py | 32 a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') }) 47 new_samples = np.zeros((sample_n * self.class_n, var_n+1), np.float32) 71 return np.float32( [self.model.predict(s) for s in samples] ) 136 self.model.train(samples, np.float32(new_responses), None, params = params)
|
D | stereo_match.py | 54 disp = stereo.compute(imgL, imgR).astype(np.float32) / 16.0 59 Q = np.float32([[1, 0, 0, -0.5*w],
|
D | fitline.py | 41 p1 = np.float32(p1) 67 vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)
|
D | lk_track.py | 52 p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2) 77 for x, y in np.float32(p).reshape(-1, 2):
|
D | digits.py | 64 M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) 124 return np.float32(digits).reshape(-1, SZ*SZ) / 255.0 146 return np.float32(samples)
|
D | plane_ar.py | 32 ar_verts = np.float32([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0], 84 quad_3d = np.float32([[x0, y0, 0], [x1, y0, 0], [x1, y1, 0], [x0, y1, 0]])
|
/external/deqp/external/vulkancts/modules/vulkan/pipeline/ |
D | vktPipelineClearUtil.cpp | 125 clearValue.color.float32[0] = defaultColor.x(); in defaultClearValue() 126 clearValue.color.float32[1] = defaultColor.y(); in defaultClearValue() 127 clearValue.color.float32[2] = defaultColor.z(); in defaultClearValue() 128 clearValue.color.float32[3] = defaultColor.w(); in defaultClearValue()
|
/external/vulkan-validation-layers/libs/glm/ |
D | fwd.hpp | 1330 typedef detail::float32 lowp_float32; 1338 typedef detail::float32 lowp_float32_t; 1346 typedef float32 lowp_f32; 1354 typedef detail::float32 lowp_float32; 1362 typedef detail::float32 lowp_float32_t; 1370 typedef float32 lowp_f32; 1379 typedef detail::float32 lowp_float32; 1387 typedef detail::float32 lowp_float32_t; 1395 typedef float32 lowp_f32; 1404 typedef detail::float32 mediump_float32; [all …]
|
/external/webrtc/webrtc/base/ |
D | macconversion.cc | 77 Float32 float32; in p_convertCFNumberToInt() local 79 static_cast<void*>(&float32)); in p_convertCFNumberToInt() 80 if (converted) *i = static_cast<int>(float32); in p_convertCFNumberToInt()
|
/external/opencv3/doc/py_tutorials/py_imgproc/py_geometric_transformations/ |
D | py_geometric_transformations.markdown | 46 You can take make it into a Numpy array of type np.float32 and pass it into **cv2.warpAffine()** 55 M = np.float32([[1,0,100],[0,1,50]]) 112 pts1 = np.float32([[50,50],[200,50],[50,200]]) 113 pts2 = np.float32([[10,100],[200,50],[100,250]]) 141 pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]]) 142 pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
|
/external/chromium-trace/catapult/telemetry/telemetry/internal/image_processing/ |
D | screen_finder.py | 180 lines = cv_util.ExtendLines(np.float32(hlines[0]), 10000) \ 196 corners = np.empty((4, 2), np.float32) 242 intersections = np.empty((0, 3), np.float32) 250 point = np.float32(point) 275 min_dist = np.zeros(4, np.float32) 326 corners = np.empty((0, 2), np.float32) 362 sorted_corners = np.empty((4, 2), np.float32) 616 real_corners = np.empty((4, 2), np.float32) 761 self._avg_corners = np.asfarray(corners, np.float32) 801 target = np.zeros((4, 2), np.float32)
|
/external/v8/test/cctest/compiler/ |
D | test-run-native-calls.cc | 22 typedef float float32; typedef 345 Node* MakeConstant(RawMachineAssembler& raw, float32 value) { in MakeConstant() 400 void ArgsBuffer<float32>::Mutate() { in Mutate() 405 output = std::numeric_limits<float32>::quiet_NaN(); in Mutate() 876 ArgsBuffer<float32>::Sig sig(2); in TEST() 888 RunSelect<float32, 0>(desc); in TEST() 889 RunSelect<float32, 1>(desc); in TEST() 932 ArgsBuffer<float32>::Sig sig(count); in TEST() 934 RunSelect<float32, 0>(desc); in TEST() 935 RunSelect<float32, 1>(desc); in TEST() [all …]
|
/external/opencv3/doc/py_tutorials/py_ml/py_svm/py_svm_opencv/ |
D | py_svm_opencv.markdown | 26 M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) 78 M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) 105 trainData = np.float32(hogdata).reshape(-1,64) 106 responses = np.float32(np.repeat(np.arange(10),250)[:,np.newaxis]) 116 testData = np.float32(hogdata).reshape(-1,bin_n*4)
|
/external/opencv3/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/ |
D | py_kmeans_opencv.markdown | 14 -# **samples** : It should be of **np.float32** data type, and each feature should be put in a 57 z = np.float32(z) 62 of np.float32 type. 127 # convert to np.float32 128 Z = np.float32(Z) 169 # convert to np.float32 170 Z = np.float32(Z)
|
/external/v8/src/wasm/ |
D | encoder.cc | 207 uint16_t float32 = 0; in IndexVars() local 217 float32++; in IndexVars() 224 e->local_float32_count_ = float32; in IndexVars() 226 float64 = param + int32 + int64 + float32; in IndexVars() 227 float32 = param + int32 + int64; in IndexVars() 240 var_index[i] = float32++; in IndexVars()
|
/external/deqp/modules/glshared/ |
D | glsShaderLibraryCase.cpp | 791 case TYPE_FLOAT: gl.uniform1fv(loc, 1, &val.elements[elemNdx].float32); break; in setUniformValue() 792 case TYPE_FLOAT_VEC2: gl.uniform2fv(loc, 1, &val.elements[elemNdx].float32); break; in setUniformValue() 793 case TYPE_FLOAT_VEC3: gl.uniform3fv(loc, 1, &val.elements[elemNdx].float32); break; in setUniformValue() 794 case TYPE_FLOAT_VEC4: gl.uniform4fv(loc, 1, &val.elements[elemNdx].float32); break; in setUniformValue() 795 …case TYPE_FLOAT_MAT2: gl.uniformMatrix2fv(loc, 1, GL_FALSE, &val.elements[elemNdx].float32); brea… in setUniformValue() 796 …case TYPE_FLOAT_MAT3: gl.uniformMatrix3fv(loc, 1, GL_FALSE, &val.elements[elemNdx].float32); brea… in setUniformValue() 797 …case TYPE_FLOAT_MAT4: gl.uniformMatrix4fv(loc, 1, GL_FALSE, &val.elements[elemNdx].float32); brea… in setUniformValue() 810 …case TYPE_FLOAT_MAT2X3: gl.uniformMatrix2x3fv(loc, 1, GL_FALSE, &val.elements[elemNdx].float32); b… in setUniformValue() 811 …case TYPE_FLOAT_MAT2X4: gl.uniformMatrix2x4fv(loc, 1, GL_FALSE, &val.elements[elemNdx].float32); b… in setUniformValue() 812 …case TYPE_FLOAT_MAT3X2: gl.uniformMatrix3x2fv(loc, 1, GL_FALSE, &val.elements[elemNdx].float32); b… in setUniformValue() [all …]
|
/external/vulkan-validation-layers/libs/glm/gtc/ |
D | type_precision.hpp | 605 typedef detail::float32 float32; typedef 614 typedef detail::float32 float32_t; 623 typedef float32 f32;
|
/external/webrtc/webrtc/modules/audio_coding/neteq/test/delay_tool/ |
D | parse_delay_file.m | 55 clock = fread(fid, 1, '*float32'); 58 % read int32 + float32 in one go 71 clock = fread(fid, 1, '*float32');
|
/external/opencv3/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/ |
D | py_knn_opencv.markdown | 36 train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400) 37 test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400) 73 1.1 MB in this case. Then while loading, you can convert back into float32. 95 data= np.loadtxt('letter-recognition.data', dtype= 'float32', delimiter = ',',
|