Home
last modified time | relevance | path

Searched refs:ops (Results 1 – 25 of 4678) sorted by relevance

12345678910>>...188

/external/wpa_supplicant_8/src/pae/
Dieee802_1x_secy_ops.c30 struct ieee802_1x_kay_ctx *ops; in secy_cp_control_protect_frames() local
37 ops = kay->ctx; in secy_cp_control_protect_frames()
38 if (!ops || !ops->enable_protect_frames) { in secy_cp_control_protect_frames()
44 return ops->enable_protect_frames(ops->ctx, enabled); in secy_cp_control_protect_frames()
50 struct ieee802_1x_kay_ctx *ops; in secy_cp_control_encrypt() local
57 ops = kay->ctx; in secy_cp_control_encrypt()
58 if (!ops || !ops->enable_encrypt) { in secy_cp_control_encrypt()
64 return ops->enable_encrypt(ops->ctx, enabled); in secy_cp_control_encrypt()
70 struct ieee802_1x_kay_ctx *ops; in secy_cp_control_replay() local
77 ops = kay->ctx; in secy_cp_control_replay()
[all …]
/external/libnl/lib/
Dcache_mngt.c44 struct nl_cache_ops *ops; in __nl_cache_ops_lookup() local
46 for (ops = cache_ops; ops; ops = ops->co_next) in __nl_cache_ops_lookup()
47 if (!strcmp(ops->co_name, name)) in __nl_cache_ops_lookup()
48 return ops; in __nl_cache_ops_lookup()
57 void nl_cache_ops_get(struct nl_cache_ops *ops) in nl_cache_ops_get() argument
59 ops->co_refcnt++; in nl_cache_ops_get()
66 void nl_cache_ops_put(struct nl_cache_ops *ops) in nl_cache_ops_put() argument
68 ops->co_refcnt--; in nl_cache_ops_put()
82 struct nl_cache_ops *ops; in nl_cache_ops_lookup() local
85 ops = __nl_cache_ops_lookup(name); in nl_cache_ops_lookup()
[all …]
Dobject.c55 struct nl_object *nl_object_alloc(struct nl_object_ops *ops) in nl_object_alloc() argument
59 if (ops->oo_size < sizeof(*new)) in nl_object_alloc()
62 new = calloc(1, ops->oo_size); in nl_object_alloc()
69 new->ce_ops = ops; in nl_object_alloc()
70 if (ops->oo_constructor) in nl_object_alloc()
71 ops->oo_constructor(new); in nl_object_alloc()
87 struct nl_cache_ops *ops; in nl_object_alloc_name() local
89 ops = nl_cache_ops_lookup_safe(kind); in nl_object_alloc_name()
90 if (!ops) in nl_object_alloc_name()
93 *result = nl_object_alloc(ops->co_obj_ops); in nl_object_alloc_name()
[all …]
/external/skqp/src/compute/hs/gen/
Dmain.c456 hsg_op(struct hsg_op * ops, struct hsg_op const opcode) in hsg_op() argument
460 *ops = opcode; in hsg_op()
462 return ops+1; in hsg_op()
467 hsg_exit(struct hsg_op * ops) in hsg_exit() argument
469 return hsg_op(ops,EXIT()); in hsg_exit()
474 hsg_end(struct hsg_op * ops) in hsg_end() argument
476 return hsg_op(ops,END()); in hsg_end()
481 hsg_begin(struct hsg_op * ops) in hsg_begin() argument
483 return hsg_op(ops,BEGIN()); in hsg_begin()
488 hsg_else(struct hsg_op * ops) in hsg_else() argument
[all …]
Dtarget_cuda.c107 struct hsg_op const * const ops, in hsg_target_cuda() argument
110 switch (ops->type) in hsg_target_cuda()
308 struct hsg_merge const * const m = merge + ops->a; in hsg_target_cuda()
313 if (ops->a == 0) in hsg_target_cuda()
330 struct hsg_merge const * const m = merge + ops->a; in hsg_target_cuda()
340 if (ops->a == 0) in hsg_target_cuda()
355 struct hsg_merge const * const m = merge + ops->a; in hsg_target_cuda()
367 struct hsg_merge const * const m = merge + ops->a; in hsg_target_cuda()
384 uint32_t const span_left = (merge[0].warps << ops->a) / 2; in hsg_target_cuda()
385 uint32_t const span_right = 1 << ops->b; in hsg_target_cuda()
[all …]
Dtarget_opencl.c123 struct hsg_op const * const ops, in hsg_target_opencl() argument
126 switch (ops->type) in hsg_target_opencl()
286 struct hsg_merge const * const m = merge + ops->a; in hsg_target_opencl()
299 struct hsg_merge const * const m = merge + ops->a; in hsg_target_opencl()
316 struct hsg_merge const * const m = merge + ops->a; in hsg_target_opencl()
328 struct hsg_merge const * const m = merge + ops->a; in hsg_target_opencl()
346 ops->a,ops->b); in hsg_target_opencl()
352 ops->a); in hsg_target_opencl()
359 ops->a); in hsg_target_opencl()
366 ops->a); in hsg_target_opencl()
[all …]
Dtarget_glsl.c128 struct hsg_op const * const ops, in hsg_target_glsl() argument
131 switch (ops->type) in hsg_target_glsl()
300 struct hsg_merge const * const m = merge + ops->a; in hsg_target_glsl()
346 struct hsg_merge const * const m = merge + ops->a; in hsg_target_glsl()
394 ops->a,ops->b, in hsg_target_glsl()
395 ops->a,ops->b); in hsg_target_glsl()
398 sprintf(filename,"hs_fm_%u_%u.comp",ops->a,ops->b); in hsg_target_glsl()
408 ops->a,ops->b); in hsg_target_glsl()
419 ops->a); in hsg_target_glsl()
428 ops->a, in hsg_target_glsl()
[all …]
/external/tensorflow/tensorflow/python/ops/
Dstandard_ops.py31 from tensorflow.python.ops import array_grad
32 from tensorflow.python.ops import cudnn_rnn_grad
33 from tensorflow.python.ops import data_flow_grad
34 from tensorflow.python.ops import manip_grad
35 from tensorflow.python.ops import math_grad
36 from tensorflow.python.ops import random_grad
37 from tensorflow.python.ops import rnn_grad
38 from tensorflow.python.ops import sparse_grad
39 from tensorflow.python.ops import state_grad
40 from tensorflow.python.ops import tensor_array_grad
[all …]
Dtensor_array_grad.py20 from tensorflow.python.framework import ops
21 from tensorflow.python.ops import array_ops
22 from tensorflow.python.ops import tensor_array_ops
26 ops.NotDifferentiable("TensorArray")
27 ops.NotDifferentiable("TensorArrayGrad")
28 ops.NotDifferentiable("TensorArraySize")
29 ops.NotDifferentiable("TensorArrayClose")
31 ops.NotDifferentiable("TensorArrayV2")
32 ops.NotDifferentiable("TensorArrayGradV2")
33 ops.NotDifferentiable("TensorArraySizeV2")
[all …]
/external/libnl/lib/genl/
Dmngt.c36 static struct genl_cmd *lookup_cmd(struct genl_ops *ops, int cmd_id) in lookup_cmd() argument
41 for (i = 0; i < ops->o_ncmds; i++) { in lookup_cmd()
42 cmd = &ops->o_cmds[i]; in lookup_cmd()
51 struct genl_ops *ops, struct nl_cache_ops *cache_ops, void *arg) in cmd_msg_parser() argument
61 if (!(cmd = lookup_cmd(ops, ghdr->cmd))) in cmd_msg_parser()
72 GENL_HDRSIZE(ops->o_hdrsize), in cmd_msg_parser()
92 static int genl_msg_parser(struct nl_cache_ops *ops, struct sockaddr_nl *who, in genl_msg_parser() argument
95 if (ops->co_genl == NULL) in genl_msg_parser()
98 return cmd_msg_parser(who, nlh, ops->co_genl, ops, pp); in genl_msg_parser()
103 struct genl_ops *ops; in lookup_family() local
[all …]
/external/tensorflow/tensorflow/python/ops/risc/
Drisc_grad.py21 from tensorflow.python.framework import ops
24 @ops.RegisterGradient("RiscAbs")
31 @ops.RegisterGradient("RiscAdd")
38 @ops.RegisterGradient("RiscBinaryArithmetic")
45 @ops.RegisterGradient("RiscBinaryComparison")
52 @ops.RegisterGradient("RiscBitcast")
59 @ops.RegisterGradient("RiscBroadcast")
66 @ops.RegisterGradient("RiscCast")
73 @ops.RegisterGradient("RiscCholesky")
80 @ops.RegisterGradient("RiscCeil")
[all …]
/external/rust/crates/grpcio-sys/
Dgrpc_wrap.cc475 grpc_op ops[6]; in grpcwrap_call_start_unary() local
476 memset(ops, 0, sizeof(ops)); in grpcwrap_call_start_unary()
477 ops[0].op = GRPC_OP_SEND_INITIAL_METADATA; in grpcwrap_call_start_unary()
479 ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count; in grpcwrap_call_start_unary()
480 ops[0].data.send_initial_metadata.metadata = in grpcwrap_call_start_unary()
482 ops[0].flags = initial_metadata_flags; in grpcwrap_call_start_unary()
483 ops[0].reserved = nullptr; in grpcwrap_call_start_unary()
485 ops[1].op = GRPC_OP_SEND_MESSAGE; in grpcwrap_call_start_unary()
487 ops[1].data.send_message.send_message = ctx->send_message; in grpcwrap_call_start_unary()
488 ops[1].flags = write_flags; in grpcwrap_call_start_unary()
[all …]
/external/tensorflow/tensorflow/c/experimental/filesystem/
Dmodular_filesystem_registration.cc52 static Status ValidateABI(const TF_FilesystemPluginOps* ops) { in ValidateABI() argument
54 CheckABI(ops->filesystem_ops_abi, TF_FILESYSTEM_OPS_ABI, "filesystem")); in ValidateABI()
56 if (ops->random_access_file_ops != nullptr) in ValidateABI()
57 TF_RETURN_IF_ERROR(CheckABI(ops->random_access_file_ops_abi, in ValidateABI()
61 if (ops->writable_file_ops != nullptr) in ValidateABI()
62 TF_RETURN_IF_ERROR(CheckABI(ops->writable_file_ops_abi, in ValidateABI()
65 if (ops->read_only_memory_region_ops != nullptr) in ValidateABI()
66 TF_RETURN_IF_ERROR(CheckABI(ops->read_only_memory_region_ops_abi, in ValidateABI()
85 static void ValidateAPI(const TF_FilesystemPluginOps* ops) { in ValidateAPI() argument
86 CheckAPI(ops->filesystem_ops_api, TF_FILESYSTEM_OPS_API, "filesystem"); in ValidateAPI()
[all …]
/external/libnl/lib/route/link/
Dapi.c55 struct rtnl_link_info_ops *ops; in __rtnl_link_info_ops_lookup() local
57 nl_list_for_each_entry(ops, &info_ops, io_list) in __rtnl_link_info_ops_lookup()
58 if (!strcmp(ops->io_name, name)) in __rtnl_link_info_ops_lookup()
59 return ops; in __rtnl_link_info_ops_lookup()
79 struct rtnl_link_info_ops *ops; in rtnl_link_info_ops_lookup() local
82 if ((ops = __rtnl_link_info_ops_lookup(name))) in rtnl_link_info_ops_lookup()
83 ops->io_refcnt++; in rtnl_link_info_ops_lookup()
86 return ops; in rtnl_link_info_ops_lookup()
93 void rtnl_link_info_ops_put(struct rtnl_link_info_ops *ops) in rtnl_link_info_ops_put() argument
95 if (ops) in rtnl_link_info_ops_put()
[all …]
/external/tensorflow/tensorflow/python/data/experimental/
D__init__.py98 from tensorflow.python.data.experimental.ops.batching import dense_to_ragged_batch
99 from tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch
100 from tensorflow.python.data.experimental.ops.batching import map_and_batch
101 from tensorflow.python.data.experimental.ops.batching import map_and_batch_with_legacy_function
102 from tensorflow.python.data.experimental.ops.batching import unbatch
103 from tensorflow.python.data.experimental.ops.cardinality import assert_cardinality
104 from tensorflow.python.data.experimental.ops.cardinality import cardinality
105 from tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY
106 from tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY
107 from tensorflow.python.data.experimental.ops.counter import Counter
[all …]
/external/swiftshader/third_party/SPIRV-Tools/test/reduce/
Dstructured_loop_to_selection_test.cpp67 const auto ops = StructuredLoopToSelectionReductionOpportunityFinder() in TEST() local
69 ASSERT_EQ(1, ops.size()); in TEST()
71 ASSERT_TRUE(ops[0]->PreconditionHolds()); in TEST()
72 ops[0]->TryToApply(); in TEST()
213 const auto ops = StructuredLoopToSelectionReductionOpportunityFinder() in TEST() local
215 ASSERT_EQ(4, ops.size()); in TEST()
217 ASSERT_TRUE(ops[0]->PreconditionHolds()); in TEST()
218 ops[0]->TryToApply(); in TEST()
315 ASSERT_TRUE(ops[1]->PreconditionHolds()); in TEST()
316 ops[1]->TryToApply(); in TEST()
[all …]
/external/angle/third_party/vulkan-deps/spirv-tools/src/test/reduce/
Dstructured_loop_to_selection_test.cpp67 const auto ops = StructuredLoopToSelectionReductionOpportunityFinder() in TEST() local
69 ASSERT_EQ(1, ops.size()); in TEST()
71 ASSERT_TRUE(ops[0]->PreconditionHolds()); in TEST()
72 ops[0]->TryToApply(); in TEST()
213 const auto ops = StructuredLoopToSelectionReductionOpportunityFinder() in TEST() local
215 ASSERT_EQ(4, ops.size()); in TEST()
217 ASSERT_TRUE(ops[0]->PreconditionHolds()); in TEST()
218 ops[0]->TryToApply(); in TEST()
315 ASSERT_TRUE(ops[1]->PreconditionHolds()); in TEST()
316 ops[1]->TryToApply(); in TEST()
[all …]
/external/deqp-deps/SPIRV-Tools/test/reduce/
Dstructured_loop_to_selection_test.cpp67 const auto ops = StructuredLoopToSelectionReductionOpportunityFinder() in TEST() local
69 ASSERT_EQ(1, ops.size()); in TEST()
71 ASSERT_TRUE(ops[0]->PreconditionHolds()); in TEST()
72 ops[0]->TryToApply(); in TEST()
213 const auto ops = StructuredLoopToSelectionReductionOpportunityFinder() in TEST() local
215 ASSERT_EQ(4, ops.size()); in TEST()
217 ASSERT_TRUE(ops[0]->PreconditionHolds()); in TEST()
218 ops[0]->TryToApply(); in TEST()
315 ASSERT_TRUE(ops[1]->PreconditionHolds()); in TEST()
316 ops[1]->TryToApply(); in TEST()
[all …]
/external/grpc-grpc/src/csharp/ext/
Dgrpc_csharp_ext.c531 const grpc_op* ops,
538 const grpc_op* ops, in grpcsharp_call_start_batch_nop() argument
545 const grpc_op* ops, in grpcsharp_call_start_batch_default() argument
549 return grpc_call_start_batch(call, ops, nops, tag, reserved); in grpcsharp_call_start_batch_default()
556 const grpc_op* ops, in grpcsharp_call_start_batch() argument
559 return g_call_start_batch_func(call, ops, nops, tag, reserved); in grpcsharp_call_start_batch()
567 grpc_op ops[6]; in grpcsharp_call_start_unary() local
568 memset(ops, 0, sizeof(ops)); in grpcsharp_call_start_unary()
569 ops[0].op = GRPC_OP_SEND_INITIAL_METADATA; in grpcsharp_call_start_unary()
572 ops[0].data.send_initial_metadata.count = ctx->send_initial_metadata.count; in grpcsharp_call_start_unary()
[all …]
/external/tensorflow/tensorflow/compiler/tf2xla/
Dfunctionalize_control_flow_test.cc107 auto x = ops::Placeholder(scope.WithOpName("x"), DT_INT32); in BuildCondGraph()
108 auto y = ops::Placeholder(scope.WithOpName("y"), DT_INT32); in BuildCondGraph()
109 auto less = ops::Less(scope.WithOpName("cond/Less"), y, x); in BuildCondGraph()
110 auto switch_1 = ops::Switch(scope.WithOpName("cond/Switch"), less, less); in BuildCondGraph()
113 ops::Identity(scope.WithOpName("cond/Identity"), switch_1.output_true); in BuildCondGraph()
114 auto seventeen = ops::Const<int32>( in BuildCondGraph()
116 auto switch_2 = ops::Switch(scope.WithOpName("cond/Switch"), y, less); in BuildCondGraph()
117 auto mul = ops::Multiply(scope.WithOpName("cond/Mul"), switch_2.output_true, in BuildCondGraph()
121 ops::Identity(scope.WithOpName("cond/Identity"), switch_1.output_false); in BuildCondGraph()
122 auto twenty_three = ops::Const<int32>( in BuildCondGraph()
[all …]
/external/rust/crates/ring/src/ec/suite_b/
Dprivate_key.rs18 use super::{ops::*, verify_affine_point_is_on_the_curve};
28 ops: &PrivateKeyOps, in random_scalar()
31 let num_limbs = ops.common.num_limbs; in random_scalar()
34 generate_private_scalar_bytes(ops, rng, bytes)?; in random_scalar()
35 scalar_from_big_endian_bytes(ops, bytes) in random_scalar()
39 ops: &PrivateKeyOps, in generate_private_scalar_bytes()
75 if check_scalar_big_endian_bytes(ops, candidate).is_err() { in generate_private_scalar_bytes()
93 pub fn private_key_as_scalar(ops: &PrivateKeyOps, private_key: &ec::Seed) -> Scalar { in private_key_as_scalar()
95 scalar_from_big_endian_bytes(ops, private_key.bytes_less_safe()).unwrap() in private_key_as_scalar()
99 ops: &PrivateKeyOps, in check_scalar_big_endian_bytes()
[all …]
/external/mesa3d/src/gallium/winsys/svga/drm/
Dvmw_fence.c89 vmw_fence_ops(struct pb_fence_ops *ops) in vmw_fence_ops() argument
91 assert(ops); in vmw_fence_ops()
92 return (struct vmw_fence_ops *)ops; in vmw_fence_ops()
104 vmw_fences_release(struct vmw_fence_ops *ops) in vmw_fences_release() argument
108 mtx_lock(&ops->mutex); in vmw_fences_release()
109 LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list) in vmw_fences_release()
111 mtx_unlock(&ops->mutex); in vmw_fences_release()
130 struct vmw_fence_ops *ops = NULL; in vmw_fences_signal() local
136 ops = vmw_fence_ops(fence_ops); in vmw_fences_signal()
137 mtx_lock(&ops->mutex); in vmw_fences_signal()
[all …]
/external/tensorflow/tensorflow/python/ops/signal/
Dsignal.py42 from tensorflow.python.ops.signal.dct_ops import dct
43 from tensorflow.python.ops.signal.fft_ops import fft
44 from tensorflow.python.ops.signal.fft_ops import fft2d
45 from tensorflow.python.ops.signal.fft_ops import fft3d
46 from tensorflow.python.ops.signal.fft_ops import fftshift
47 from tensorflow.python.ops.signal.fft_ops import rfft
48 from tensorflow.python.ops.signal.fft_ops import rfft2d
49 from tensorflow.python.ops.signal.fft_ops import rfft3d
50 from tensorflow.python.ops.signal.dct_ops import idct
51 from tensorflow.python.ops.signal.fft_ops import ifft
[all …]
/external/tensorflow/tensorflow/compiler/jit/
Dencapsulate_xla_computations_pass_test.cc41 auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32); in MakeOuterGraph()
42 auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT); in MakeOuterGraph()
43 auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32); in MakeOuterGraph()
44 auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT); in MakeOuterGraph()
45 auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE); in MakeOuterGraph()
46 auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE); in MakeOuterGraph()
47 auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE); in MakeOuterGraph()
76 ops::XlaClusterOutput(scope.WithOpName("Out0"), Output(launch, 0)); in MakeOuterGraph()
78 ops::XlaClusterOutput(scope.WithOpName("Out1"), Output(launch, 1)); in MakeOuterGraph()
80 ops::XlaClusterOutput(scope.WithOpName("Out2"), Output(launch, 2)); in MakeOuterGraph()
[all …]
/external/tensorflow/tensorflow/python/ops/linalg/
Dlinalg.py23 from tensorflow.python.ops.linalg import adjoint_registrations as _adjoint_registrations
24 from tensorflow.python.ops.linalg import cholesky_registrations as _cholesky_registrations
25 from tensorflow.python.ops.linalg import inverse_registrations as _inverse_registrations
26 from tensorflow.python.ops.linalg import linear_operator_algebra as _linear_operator_algebra
27 from tensorflow.python.ops.linalg import matmul_registrations as _matmul_registrations
28 from tensorflow.python.ops.linalg import solve_registrations as _solve_registrations
29 from tensorflow.python.ops.linalg.linalg_impl import *
30 from tensorflow.python.ops.linalg.linear_operator import *
31 from tensorflow.python.ops.linalg.linear_operator_block_diag import *
32 from tensorflow.python.ops.linalg.linear_operator_block_lower_triangular import *
[all …]

12345678910>>...188