1 use std::borrow::Cow;
2 use std::cell::Cell;
3 use std::convert::TryFrom;
4 use std::ops::Deref;
5
6 use gccjit::{
7 BinaryOp,
8 Block,
9 ComparisonOp,
10 Context,
11 Function,
12 LValue,
13 RValue,
14 ToRValue,
15 Type,
16 UnaryOp,
17 };
18 use rustc_apfloat::{ieee, Float, Round, Status};
19 use rustc_codegen_ssa::MemFlags;
20 use rustc_codegen_ssa::common::{
21 AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
22 };
23 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
24 use rustc_codegen_ssa::mir::place::PlaceRef;
25 use rustc_codegen_ssa::traits::{
26 BackendTypes,
27 BaseTypeMethods,
28 BuilderMethods,
29 ConstMethods,
30 DerivedTypeMethods,
31 LayoutTypeMethods,
32 HasCodegen,
33 OverflowOp,
34 StaticBuilderMethods,
35 };
36 use rustc_data_structures::fx::FxHashSet;
37 use rustc_middle::bug;
38 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
39 use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
40 use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
41 use rustc_span::Span;
42 use rustc_span::def_id::DefId;
43 use rustc_target::abi::{
44 self,
45 call::FnAbi,
46 Align,
47 HasDataLayout,
48 Size,
49 TargetDataLayout,
50 WrappingRange,
51 };
52 use rustc_target::spec::{HasTargetSpec, Target};
53
54 use crate::common::{SignType, TypeReflection, type_is_pointer};
55 use crate::context::CodegenCx;
56 use crate::intrinsic::llvm;
57 use crate::type_of::LayoutGccExt;
58
59 // TODO(antoyo)
60 type Funclet = ();
61
62 // TODO(antoyo): remove this variable.
63 static mut RETURN_VALUE_COUNT: usize = 0;
64
65 enum ExtremumOperation {
66 Max,
67 Min,
68 }
69
70 pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
71 pub cx: &'a CodegenCx<'gcc, 'tcx>,
72 pub block: Block<'gcc>,
73 stack_var_count: Cell<usize>,
74 }
75
76 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self77 fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
78 Builder {
79 cx,
80 block,
81 stack_var_count: Cell::new(0),
82 }
83 }
84
atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc>85 fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
86 let size = src.get_type().get_size();
87
88 let func = self.current_func();
89
90 let load_ordering =
91 match order {
92 // TODO(antoyo): does this make sense?
93 AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
94 _ => order,
95 };
96 let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size));
97 let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
98 let return_value = func.new_local(None, previous_value.get_type(), "return_value");
99 self.llbb().add_assignment(None, previous_var, previous_value);
100 self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
101
102 let while_block = func.new_block("while");
103 let after_block = func.new_block("after_while");
104 self.llbb().end_with_jump(None, while_block);
105
106 // NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the
107 // state need to be updated.
108 self.switch_to_block(while_block);
109
110 let comparison_operator =
111 match operation {
112 ExtremumOperation::Max => ComparisonOp::LessThan,
113 ExtremumOperation::Min => ComparisonOp::GreaterThan,
114 };
115
116 let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
117 let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
118 let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
119 let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
120
121 while_block.end_with_conditional(None, cond, while_block, after_block);
122
123 // NOTE: since jumps were added in a place rustc does not expect, the current block in the
124 // state need to be updated.
125 self.switch_to_block(after_block);
126
127 return_value.to_rvalue()
128 }
129
compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc>130 fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
131 let size = src.get_type().get_size();
132 let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
133 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
134 let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
135 let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
136
137 let void_ptr_type = self.context.new_type::<*mut ()>();
138 let volatile_void_ptr_type = void_ptr_type.make_volatile();
139 let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
140 let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
141
142 // NOTE: not sure why, but we have the wrong type here.
143 let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
144 let src = self.context.new_cast(None, src, int_type);
145 self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
146 }
147
assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>)148 pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
149 self.llbb().add_assignment(None, lvalue, value);
150 }
151
check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]>152 fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
153 let mut all_args_match = true;
154 let mut param_types = vec![];
155 let param_count = func.get_param_count();
156 for (index, arg) in args.iter().enumerate().take(param_count) {
157 let param = func.get_param(index as i32);
158 let param = param.to_rvalue().get_type();
159 if param != arg.get_type() {
160 all_args_match = false;
161 }
162 param_types.push(param);
163 }
164
165 if all_args_match {
166 return Cow::Borrowed(args);
167 }
168
169 let casted_args: Vec<_> = param_types
170 .into_iter()
171 .zip(args.iter())
172 .enumerate()
173 .map(|(_i, (expected_ty, &actual_val))| {
174 let actual_ty = actual_val.get_type();
175 if expected_ty != actual_ty {
176 self.bitcast(actual_val, expected_ty)
177 }
178 else {
179 actual_val
180 }
181 })
182 .collect();
183
184 debug_assert_eq!(casted_args.len(), args.len());
185
186 Cow::Owned(casted_args)
187 }
188
check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]>189 fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
190 let mut all_args_match = true;
191 let mut param_types = vec![];
192 let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
193 for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
194 let param = gcc_func.get_param_type(index);
195 if param != arg.get_type() {
196 all_args_match = false;
197 }
198 param_types.push(param);
199 }
200
201 let mut on_stack_param_indices = FxHashSet::default();
202 if let Some(indices) = self.on_stack_params.borrow().get(&gcc_func) {
203 on_stack_param_indices = indices.clone();
204 }
205
206 if all_args_match {
207 return Cow::Borrowed(args);
208 }
209
210 let func_name = format!("{:?}", func_ptr);
211
212 let mut casted_args: Vec<_> = param_types
213 .into_iter()
214 .zip(args.iter())
215 .enumerate()
216 .map(|(index, (expected_ty, &actual_val))| {
217 if llvm::ignore_arg_cast(&func_name, index, args.len()) {
218 return actual_val;
219 }
220
221 let actual_ty = actual_val.get_type();
222 if expected_ty != actual_ty {
223 if !actual_ty.is_vector() && !expected_ty.is_vector() && (actual_ty.is_integral() && expected_ty.is_integral()) || (actual_ty.get_pointee().is_some() && expected_ty.get_pointee().is_some()) {
224 self.context.new_cast(None, actual_val, expected_ty)
225 }
226 else if on_stack_param_indices.contains(&index) {
227 actual_val.dereference(None).to_rvalue()
228 }
229 else {
230 assert!(!((actual_ty.is_vector() && !expected_ty.is_vector()) || (!actual_ty.is_vector() && expected_ty.is_vector())), "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, expected_ty.is_vector(), func_ptr, index);
231 // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
232 // TODO: remove bitcast now that vector types can be compared?
233 self.bitcast(actual_val, expected_ty)
234 }
235 }
236 else {
237 actual_val
238 }
239 })
240 .collect();
241
242 // NOTE: to take into account variadic functions.
243 for i in casted_args.len()..args.len() {
244 casted_args.push(args[i]);
245 }
246
247 Cow::Owned(casted_args)
248 }
249
check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc>250 fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
251 let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
252 let stored_ty = self.cx.val_ty(val);
253 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
254
255 if dest_ptr_ty == stored_ptr_ty {
256 ptr
257 }
258 else {
259 self.bitcast(ptr, stored_ptr_ty)
260 }
261 }
262
current_func(&self) -> Function<'gcc>263 pub fn current_func(&self) -> Function<'gcc> {
264 self.block.get_function()
265 }
266
function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc>267 fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
268 // TODO(antoyo): remove when the API supports a different type for functions.
269 let func: Function<'gcc> = self.cx.rvalue_as_function(func);
270 let args = self.check_call("call", func, args);
271
272 // gccjit requires to use the result of functions, even when it's not used.
273 // That's why we assign the result to a local or call add_eval().
274 let return_type = func.get_return_type();
275 let void_type = self.context.new_type::<()>();
276 let current_func = self.block.get_function();
277 if return_type != void_type {
278 unsafe { RETURN_VALUE_COUNT += 1 };
279 let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
280 self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
281 result.to_rvalue()
282 }
283 else {
284 self.block.add_eval(None, self.cx.context.new_call(None, func, &args));
285 // Return dummy value when not having return value.
286 self.context.new_rvalue_from_long(self.isize_type, 0)
287 }
288 }
289
function_ptr_call(&mut self, typ: Type<'gcc>, mut func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc>290 fn function_ptr_call(&mut self, typ: Type<'gcc>, mut func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
291 let gcc_func =
292 match func_ptr.get_type().dyncast_function_ptr_type() {
293 Some(func) => func,
294 None => {
295 // NOTE: due to opaque pointers now being used, we need to cast here.
296 let new_func_type = typ.dyncast_function_ptr_type().expect("function ptr");
297 func_ptr = self.context.new_cast(None, func_ptr, typ);
298 new_func_type
299 },
300 };
301 let func_name = format!("{:?}", func_ptr);
302 let previous_arg_count = args.len();
303 let orig_args = args;
304 let args = {
305 let function_address_names = self.function_address_names.borrow();
306 let original_function_name = function_address_names.get(&func_ptr);
307 llvm::adjust_intrinsic_arguments(&self, gcc_func, args.into(), &func_name, original_function_name)
308 };
309 let args_adjusted = args.len() != previous_arg_count;
310 let args = self.check_ptr_call("call", func_ptr, &*args);
311
312 // gccjit requires to use the result of functions, even when it's not used.
313 // That's why we assign the result to a local or call add_eval().
314 let return_type = gcc_func.get_return_type();
315 let void_type = self.context.new_type::<()>();
316 let current_func = self.block.get_function();
317
318 if return_type != void_type {
319 unsafe { RETURN_VALUE_COUNT += 1 };
320 let return_value = self.cx.context.new_call_through_ptr(None, func_ptr, &args);
321 let return_value = llvm::adjust_intrinsic_return_value(&self, return_value, &func_name, &args, args_adjusted, orig_args);
322 let result = current_func.new_local(None, return_value.get_type(), &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
323 self.block.add_assignment(None, result, return_value);
324 result.to_rvalue()
325 }
326 else {
327 #[cfg(not(feature="master"))]
328 if gcc_func.get_param_count() == 0 {
329 // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
330 self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
331 }
332 else {
333 self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
334 }
335 #[cfg(feature="master")]
336 self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
337 // Return dummy value when not having return value.
338 let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
339 self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
340 result.to_rvalue()
341 }
342 }
343
overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc>344 pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
345 // gccjit requires to use the result of functions, even when it's not used.
346 // That's why we assign the result to a local.
347 let return_type = self.context.new_type::<bool>();
348 let current_func = self.block.get_function();
349 // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
350 unsafe { RETURN_VALUE_COUNT += 1 };
351 let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
352 self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
353 result.to_rvalue()
354 }
355 }
356
357 impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
358 type CodegenCx = CodegenCx<'gcc, 'tcx>;
359 }
360
361 impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
tcx(&self) -> TyCtxt<'tcx>362 fn tcx(&self) -> TyCtxt<'tcx> {
363 self.cx.tcx()
364 }
365 }
366
367 impl HasDataLayout for Builder<'_, '_, '_> {
data_layout(&self) -> &TargetDataLayout368 fn data_layout(&self) -> &TargetDataLayout {
369 self.cx.data_layout()
370 }
371 }
372
373 impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
374 type LayoutOfResult = TyAndLayout<'tcx>;
375
376 #[inline]
handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> !377 fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
378 self.cx.handle_layout_err(err, span, ty)
379 }
380 }
381
382 impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
383 type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
384
385 #[inline]
handle_fn_abi_err( &self, err: FnAbiError<'tcx>, span: Span, fn_abi_request: FnAbiRequest<'tcx>, ) -> !386 fn handle_fn_abi_err(
387 &self,
388 err: FnAbiError<'tcx>,
389 span: Span,
390 fn_abi_request: FnAbiRequest<'tcx>,
391 ) -> ! {
392 self.cx.handle_fn_abi_err(err, span, fn_abi_request)
393 }
394 }
395
396 impl<'a, 'gcc, 'tcx> Deref for Builder<'a, 'gcc, 'tcx> {
397 type Target = CodegenCx<'gcc, 'tcx>;
398
deref<'b>(&'b self) -> &'a Self::Target399 fn deref<'b>(&'b self) -> &'a Self::Target {
400 self.cx
401 }
402 }
403
404 impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
405 type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
406 type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
407 type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
408 type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
409 type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
410
411 type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
412 type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
413 type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
414 }
415
416 impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Builder<'a, 'gcc, 'tcx>417 fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Builder<'a, 'gcc, 'tcx> {
418 Builder::with_cx(cx, block)
419 }
420
llbb(&self) -> Block<'gcc>421 fn llbb(&self) -> Block<'gcc> {
422 self.block
423 }
424
append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc>425 fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
426 let func = cx.rvalue_as_function(func);
427 func.new_block(name)
428 }
429
append_sibling_block(&mut self, name: &str) -> Block<'gcc>430 fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
431 let func = self.current_func();
432 func.new_block(name)
433 }
434
switch_to_block(&mut self, block: Self::BasicBlock)435 fn switch_to_block(&mut self, block: Self::BasicBlock) {
436 self.block = block;
437 }
438
ret_void(&mut self)439 fn ret_void(&mut self) {
440 self.llbb().end_with_void_return(None)
441 }
442
ret(&mut self, mut value: RValue<'gcc>)443 fn ret(&mut self, mut value: RValue<'gcc>) {
444 if self.structs_as_pointer.borrow().contains(&value) {
445 // NOTE: hack to workaround a limitation of the rustc API: see comment on
446 // CodegenCx.structs_as_pointer
447 value = value.dereference(None).to_rvalue();
448 }
449 let expected_return_type = self.current_func().get_return_type();
450 if !expected_return_type.is_compatible_with(value.get_type()) {
451 // NOTE: due to opaque pointers now being used, we need to cast here.
452 value = self.context.new_cast(None, value, expected_return_type);
453 }
454 self.llbb().end_with_return(None, value);
455 }
456
br(&mut self, dest: Block<'gcc>)457 fn br(&mut self, dest: Block<'gcc>) {
458 self.llbb().end_with_jump(None, dest)
459 }
460
cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>)461 fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
462 self.llbb().end_with_conditional(None, cond, then_block, else_block)
463 }
464
switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>)465 fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
466 let mut gcc_cases = vec![];
467 let typ = self.val_ty(value);
468 for (on_val, dest) in cases {
469 let on_val = self.const_uint_big(typ, on_val);
470 gcc_cases.push(self.context.new_case(on_val, on_val, dest));
471 }
472 self.block.end_with_switch(None, value, default_block, &gcc_cases);
473 }
474
475 #[cfg(feature="master")]
invoke(&mut self, typ: Type<'gcc>, fn_attrs: Option<&CodegenFnAttrs>, _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc>476 fn invoke(&mut self, typ: Type<'gcc>, fn_attrs: Option<&CodegenFnAttrs>, _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
477 let try_block = self.current_func().new_block("try");
478
479 let current_block = self.block.clone();
480 self.block = try_block;
481 let call = self.call(typ, fn_attrs, None, func, args, None); // TODO(antoyo): use funclet here?
482 self.block = current_block;
483
484 let return_value = self.current_func()
485 .new_local(None, call.get_type(), "invokeResult");
486
487 try_block.add_assignment(None, return_value, call);
488
489 try_block.end_with_jump(None, then);
490
491 if self.cleanup_blocks.borrow().contains(&catch) {
492 self.block.add_try_finally(None, try_block, catch);
493 }
494 else {
495 self.block.add_try_catch(None, try_block, catch);
496 }
497
498 self.block.end_with_jump(None, then);
499
500 return_value.to_rvalue()
501 }
502
503 #[cfg(not(feature="master"))]
invoke(&mut self, typ: Type<'gcc>, fn_attrs: &CodegenFnAttrs, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc>504 fn invoke(&mut self, typ: Type<'gcc>, fn_attrs: &CodegenFnAttrs, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
505 let call_site = self.call(typ, fn_attrs, None, func, args, None);
506 let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
507 self.llbb().end_with_conditional(None, condition, then, catch);
508 if let Some(_fn_abi) = fn_abi {
509 // TODO(bjorn3): Apply function attributes
510 }
511 call_site
512 }
513
unreachable(&mut self)514 fn unreachable(&mut self) {
515 let func = self.context.get_builtin_function("__builtin_unreachable");
516 self.block.add_eval(None, self.context.new_call(None, func, &[]));
517 let return_type = self.block.get_function().get_return_type();
518 let void_type = self.context.new_type::<()>();
519 if return_type == void_type {
520 self.block.end_with_void_return(None)
521 }
522 else {
523 let return_value = self.current_func()
524 .new_local(None, return_type, "unreachableReturn");
525 self.block.end_with_return(None, return_value)
526 }
527 }
528
add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>529 fn add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
530 self.gcc_add(a, b)
531 }
532
fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>533 fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
534 a + b
535 }
536
sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>537 fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
538 self.gcc_sub(a, b)
539 }
540
fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>541 fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
542 a - b
543 }
544
mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>545 fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
546 self.gcc_mul(a, b)
547 }
548
fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>549 fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
550 a * b
551 }
552
udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>553 fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
554 self.gcc_udiv(a, b)
555 }
556
exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>557 fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
558 // TODO(antoyo): poison if not exact.
559 let a_type = a.get_type().to_unsigned(self);
560 let a = self.gcc_int_cast(a, a_type);
561 let b_type = b.get_type().to_unsigned(self);
562 let b = self.gcc_int_cast(b, b_type);
563 a / b
564 }
565
sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>566 fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
567 self.gcc_sdiv(a, b)
568 }
569
exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>570 fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
571 // TODO(antoyo): poison if not exact.
572 // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
573 // should be the same.
574 let typ = a.get_type().to_signed(self);
575 let b = self.context.new_cast(None, b, typ);
576 a / b
577 }
578
fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>579 fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
580 a / b
581 }
582
urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>583 fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
584 self.gcc_urem(a, b)
585 }
586
srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>587 fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
588 self.gcc_srem(a, b)
589 }
590
frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>591 fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
592 // TODO(antoyo): add check in libgccjit since using the binary operator % causes the following error:
593 // during RTL pass: expand
594 // libgccjit.so: error: in expmed_mode_index, at expmed.h:240
595 // 0x7f0101d58dc6 expmed_mode_index
596 // ../../../gcc/gcc/expmed.h:240
597 // 0x7f0101d58e35 expmed_op_cost_ptr
598 // ../../../gcc/gcc/expmed.h:262
599 // 0x7f0101d594a1 sdiv_cost_ptr
600 // ../../../gcc/gcc/expmed.h:531
601 // 0x7f0101d594f3 sdiv_cost
602 // ../../../gcc/gcc/expmed.h:549
603 // 0x7f0101d6af7e expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int, optab_methods)
604 // ../../../gcc/gcc/expmed.cc:4356
605 // 0x7f0101d94f9e expand_expr_divmod
606 // ../../../gcc/gcc/expr.cc:8929
607 // 0x7f0101d97a26 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier)
608 // ../../../gcc/gcc/expr.cc:9566
609 // 0x7f0101bef6ef expand_gimple_stmt_1
610 // ../../../gcc/gcc/cfgexpand.cc:3967
611 // 0x7f0101bef910 expand_gimple_stmt
612 // ../../../gcc/gcc/cfgexpand.cc:4028
613 // 0x7f0101bf6ee7 expand_gimple_basic_block
614 // ../../../gcc/gcc/cfgexpand.cc:6069
615 // 0x7f0101bf9194 execute
616 // ../../../gcc/gcc/cfgexpand.cc:6795
617 if a.get_type().is_compatible_with(self.cx.float_type) {
618 let fmodf = self.context.get_builtin_function("fmodf");
619 // FIXME(antoyo): this seems to produce the wrong result.
620 return self.context.new_call(None, fmodf, &[a, b]);
621 }
622 assert_eq!(a.get_type().unqualified(), self.cx.double_type);
623
624 let fmod = self.context.get_builtin_function("fmod");
625 return self.context.new_call(None, fmod, &[a, b]);
626 }
627
shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>628 fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
629 self.gcc_shl(a, b)
630 }
631
lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>632 fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
633 self.gcc_lshr(a, b)
634 }
635
ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>636 fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
637 // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
638 // It seems to be if the value is signed.
639 self.gcc_lshr(a, b)
640 }
641
and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>642 fn and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
643 self.gcc_and(a, b)
644 }
645
or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>646 fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
647 self.cx.gcc_or(a, b)
648 }
649
xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>650 fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
651 self.gcc_xor(a, b)
652 }
653
neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc>654 fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
655 self.gcc_neg(a)
656 }
657
fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc>658 fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
659 self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
660 }
661
not(&mut self, a: RValue<'gcc>) -> RValue<'gcc>662 fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
663 self.gcc_not(a)
664 }
665
unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>666 fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
667 a + b
668 }
669
unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>670 fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
671 self.gcc_add(a, b)
672 }
673
unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>674 fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
675 a - b
676 }
677
unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>678 fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
679 // TODO(antoyo): should generate poison value?
680 self.gcc_sub(a, b)
681 }
682
unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>683 fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
684 a * b
685 }
686
unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>687 fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
688 a * b
689 }
690
fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc>691 fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
692 // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
693 lhs + rhs
694 }
695
fsub_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc>696 fn fsub_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
697 // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
698 lhs - rhs
699 }
700
fmul_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc>701 fn fmul_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
702 // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
703 lhs * rhs
704 }
705
fdiv_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc>706 fn fdiv_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
707 // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
708 lhs / rhs
709 }
710
frem_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc>711 fn frem_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
712 // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
713 self.frem(lhs, rhs)
714 }
715
checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value)716 fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
717 self.gcc_checked_binop(oop, typ, lhs, rhs)
718 }
719
alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc>720 fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
721 // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
722 // Ideally, we shouldn't need to do this check.
723 let aligned_type =
724 if ty == self.cx.u128_type || ty == self.cx.i128_type {
725 ty
726 }
727 else {
728 ty.get_aligned(align.bytes())
729 };
730 // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
731 self.stack_var_count.set(self.stack_var_count.get() + 1);
732 self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
733 }
734
byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc>735 fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
736 unimplemented!();
737 }
738
load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc>739 fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
740 let block = self.llbb();
741 let function = block.get_function();
742 // NOTE: instead of returning the dereference here, we have to assign it to a variable in
743 // the current basic block. Otherwise, it could be used in another basic block, causing a
744 // dereference after a drop, for instance.
745 // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
746 // Ideally, we shouldn't need to do this check.
747 let aligned_type =
748 if pointee_ty == self.cx.u128_type || pointee_ty == self.cx.i128_type {
749 pointee_ty
750 }
751 else {
752 pointee_ty.get_aligned(align.bytes())
753 };
754 let ptr = self.context.new_cast(None, ptr, aligned_type.make_pointer());
755 let deref = ptr.dereference(None).to_rvalue();
756 unsafe { RETURN_VALUE_COUNT += 1 };
757 let loaded_value = function.new_local(None, aligned_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
758 block.add_assignment(None, loaded_value, deref);
759 loaded_value.to_rvalue()
760 }
761
volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc>762 fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
763 // TODO(antoyo): use ty.
764 let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
765 ptr.dereference(None).to_rvalue()
766 }
767
atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc>768 fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
769 // TODO(antoyo): use ty.
770 // TODO(antoyo): handle alignment.
771 let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
772 let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
773
774 let volatile_const_void_ptr_type = self.context.new_type::<()>()
775 .make_const()
776 .make_volatile()
777 .make_pointer();
778 let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
779 self.context.new_call(None, atomic_load, &[ptr, ordering])
780 }
781
load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>>782 fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
783 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
784
785 if place.layout.is_zst() {
786 return OperandRef::zero_sized(place.layout);
787 }
788
789 fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
790 let vr = scalar.valid_range(bx);
791 match scalar.primitive() {
792 abi::Int(..) => {
793 if !scalar.is_always_valid(bx) {
794 bx.range_metadata(load, vr);
795 }
796 }
797 abi::Pointer(_) if vr.start < vr.end && !vr.contains(0) => {
798 bx.nonnull_metadata(load);
799 }
800 _ => {}
801 }
802 }
803
804 let val =
805 if let Some(llextra) = place.llextra {
806 OperandValue::Ref(place.llval, Some(llextra), place.align)
807 }
808 else if place.layout.is_gcc_immediate() {
809 let load = self.load(
810 place.layout.gcc_type(self),
811 place.llval,
812 place.align,
813 );
814 if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
815 scalar_load_metadata(self, load, scalar);
816 }
817 OperandValue::Immediate(self.to_immediate(load, place.layout))
818 }
819 else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
820 let b_offset = a.size(self).align_to(b.align(self).abi);
821 let pair_type = place.layout.gcc_type(self);
822
823 let mut load = |i, scalar: &abi::Scalar, align| {
824 let llptr = self.struct_gep(pair_type, place.llval, i as u64);
825 let llty = place.layout.scalar_pair_element_gcc_type(self, i, false);
826 let load = self.load(llty, llptr, align);
827 scalar_load_metadata(self, load, scalar);
828 if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
829 };
830
831 OperandValue::Pair(
832 load(0, a, place.align),
833 load(1, b, place.align.restrict_for_offset(b_offset)),
834 )
835 }
836 else {
837 OperandValue::Ref(place.llval, None, place.align)
838 };
839
840 OperandRef { val, layout: place.layout }
841 }
842
write_operand_repeatedly(&mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>)843 fn write_operand_repeatedly(&mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) {
844 let zero = self.const_usize(0);
845 let count = self.const_usize(count);
846 let start = dest.project_index(self, zero).llval;
847 let end = dest.project_index(self, count).llval;
848
849 let header_bb = self.append_sibling_block("repeat_loop_header");
850 let body_bb = self.append_sibling_block("repeat_loop_body");
851 let next_bb = self.append_sibling_block("repeat_loop_next");
852
853 let ptr_type = start.get_type();
854 let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
855 let current_val = current.to_rvalue();
856 self.assign(current, start);
857
858 self.br(header_bb);
859
860 self.switch_to_block(header_bb);
861 let keep_going = self.icmp(IntPredicate::IntNE, current_val, end);
862 self.cond_br(keep_going, body_bb, next_bb);
863
864 self.switch_to_block(body_bb);
865 let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
866 cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
867
868 let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
869 self.llbb().add_assignment(None, current, next);
870 self.br(header_bb);
871
872 self.switch_to_block(next_bb);
873 }
874
range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange)875 fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
876 // TODO(antoyo)
877 }
878
nonnull_metadata(&mut self, _load: RValue<'gcc>)879 fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
880 // TODO(antoyo)
881 }
882
store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc>883 fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
884 self.store_with_flags(val, ptr, align, MemFlags::empty())
885 }
886
store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align, _flags: MemFlags) -> RValue<'gcc>887 fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align, _flags: MemFlags) -> RValue<'gcc> {
888 let ptr = self.check_store(val, ptr);
889 let destination = ptr.dereference(None);
890 // NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast
891 // to type so it gets the proper alignment.
892 let destination_type = destination.to_rvalue().get_type().unqualified();
893 let aligned_type = destination_type.get_aligned(align.bytes()).make_pointer();
894 let aligned_destination = self.cx.context.new_bitcast(None, ptr, aligned_type);
895 let aligned_destination = aligned_destination.dereference(None);
896 self.llbb().add_assignment(None, aligned_destination, val);
897 // TODO(antoyo): handle align and flags.
898 // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
899 self.cx.context.new_rvalue_zero(self.type_i32())
900 }
901
atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size)902 fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
903 // TODO(antoyo): handle alignment.
904 let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
905 let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
906 let volatile_const_void_ptr_type = self.context.new_type::<()>()
907 .make_volatile()
908 .make_pointer();
909 let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
910
911 // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
912 // the following cast is required to avoid this error:
913 // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
914 let int_type = atomic_store.get_param(1).to_rvalue().get_type();
915 let value = self.context.new_cast(None, value, int_type);
916 self.llbb()
917 .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
918 }
919
gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc>920 fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
921 let ptr_type = ptr.get_type();
922 let mut pointee_type = ptr.get_type();
923 // NOTE: we cannot use array indexing here like in inbounds_gep because array indexing is
924 // always considered in bounds in GCC (TODO(antoyo): to be verified).
925 // So, we have to cast to a number.
926 let mut result = self.context.new_bitcast(None, ptr, self.sizet_type);
927 // FIXME(antoyo): if there were more than 1 index, this code is probably wrong and would
928 // require dereferencing the pointer.
929 for index in indices {
930 pointee_type = pointee_type.get_pointee().expect("pointee type");
931 let pointee_size = self.context.new_rvalue_from_int(index.get_type(), pointee_type.get_size() as i32);
932 result = result + self.gcc_int_cast(*index * pointee_size, self.sizet_type);
933 }
934 self.context.new_bitcast(None, result, ptr_type)
935 }
936
inbounds_gep(&mut self, typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc>937 fn inbounds_gep(&mut self, typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
938 // NOTE: due to opaque pointers now being used, we need to cast here.
939 let ptr = self.context.new_cast(None, ptr, typ.make_pointer());
940 // NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified).
941 let mut indices = indices.into_iter();
942 let index = indices.next().expect("first index in inbounds_gep");
943 let mut result = self.context.new_array_access(None, ptr, *index);
944 for index in indices {
945 result = self.context.new_array_access(None, result, *index);
946 }
947 result.get_address(None)
948 }
949
struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc>950 fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
951 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
952 assert_eq!(idx as usize as u64, idx);
953 let value = ptr.dereference(None).to_rvalue();
954
955 if value_type.dyncast_array().is_some() {
956 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
957 let element = self.context.new_array_access(None, value, index);
958 element.get_address(None)
959 }
960 else if let Some(vector_type) = value_type.dyncast_vector() {
961 let array_type = vector_type.get_element_type().make_pointer();
962 let array = self.bitcast(ptr, array_type);
963 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
964 let element = self.context.new_array_access(None, array, index);
965 element.get_address(None)
966 }
967 else if let Some(struct_type) = value_type.is_struct() {
968 // NOTE: due to opaque pointers now being used, we need to bitcast here.
969 let ptr = self.bitcast_if_needed(ptr, value_type.make_pointer());
970 ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
971 }
972 else {
973 panic!("Unexpected type {:?}", value_type);
974 }
975 }
976
977 /* Casts */
trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>978 fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
979 // TODO(antoyo): check that it indeed truncate the value.
980 self.gcc_int_cast(value, dest_ty)
981 }
982
sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>983 fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
984 // TODO(antoyo): check that it indeed sign extend the value.
985 if dest_ty.dyncast_vector().is_some() {
986 // TODO(antoyo): nothing to do as it is only for LLVM?
987 return value;
988 }
989 self.context.new_cast(None, value, dest_ty)
990 }
991
fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>992 fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
993 self.gcc_float_to_uint_cast(value, dest_ty)
994 }
995
fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>996 fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
997 self.gcc_float_to_int_cast(value, dest_ty)
998 }
999
uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1000 fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1001 self.gcc_uint_to_float_cast(value, dest_ty)
1002 }
1003
sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1004 fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1005 self.gcc_int_to_float_cast(value, dest_ty)
1006 }
1007
fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1008 fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1009 // TODO(antoyo): make sure it truncates.
1010 self.context.new_cast(None, value, dest_ty)
1011 }
1012
fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1013 fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1014 self.context.new_cast(None, value, dest_ty)
1015 }
1016
ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1017 fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1018 let usize_value = self.cx.const_bitcast(value, self.cx.type_isize());
1019 self.intcast(usize_value, dest_ty, false)
1020 }
1021
inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1022 fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1023 let usize_value = self.intcast(value, self.cx.type_isize(), false);
1024 self.cx.const_bitcast(usize_value, dest_ty)
1025 }
1026
bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1027 fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1028 self.cx.const_bitcast(value, dest_ty)
1029 }
1030
intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc>1031 fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
1032 // NOTE: is_signed is for value, not dest_typ.
1033 self.gcc_int_cast(value, dest_typ)
1034 }
1035
pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1036 fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1037 let val_type = value.get_type();
1038 match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
1039 (false, true) => {
1040 // NOTE: Projecting a field of a pointer type will attempt a cast from a signed char to
1041 // a pointer, which is not supported by gccjit.
1042 return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
1043 },
1044 (false, false) => {
1045 // When they are not pointers, we want a transmute (or reinterpret_cast).
1046 self.bitcast(value, dest_ty)
1047 },
1048 (true, true) => self.cx.context.new_cast(None, value, dest_ty),
1049 (true, false) => unimplemented!(),
1050 }
1051 }
1052
1053 /* Comparisons */
icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc>1054 fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
1055 self.gcc_icmp(op, lhs, rhs)
1056 }
1057
fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc>1058 fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
1059 self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
1060 }
1061
1062 /* Miscellaneous instructions */
memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags)1063 fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
1064 assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
1065 let size = self.intcast(size, self.type_size_t(), false);
1066 let _is_volatile = flags.contains(MemFlags::VOLATILE);
1067 let dst = self.pointercast(dst, self.type_i8p());
1068 let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
1069 let memcpy = self.context.get_builtin_function("memcpy");
1070 // TODO(antoyo): handle aligns and is_volatile.
1071 self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
1072 }
1073
memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags)1074 fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
1075 if flags.contains(MemFlags::NONTEMPORAL) {
1076 // HACK(nox): This is inefficient but there is no nontemporal memmove.
1077 let val = self.load(src.get_type().get_pointee().expect("get_pointee"), src, src_align);
1078 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
1079 self.store_with_flags(val, ptr, dst_align, flags);
1080 return;
1081 }
1082 let size = self.intcast(size, self.type_size_t(), false);
1083 let _is_volatile = flags.contains(MemFlags::VOLATILE);
1084 let dst = self.pointercast(dst, self.type_i8p());
1085 let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
1086
1087 let memmove = self.context.get_builtin_function("memmove");
1088 // TODO(antoyo): handle is_volatile.
1089 self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
1090 }
1091
memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags)1092 fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
1093 let _is_volatile = flags.contains(MemFlags::VOLATILE);
1094 let ptr = self.pointercast(ptr, self.type_i8p());
1095 let memset = self.context.get_builtin_function("memset");
1096 // TODO(antoyo): handle align and is_volatile.
1097 let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
1098 let size = self.intcast(size, self.type_size_t(), false);
1099 self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
1100 }
1101
select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc>1102 fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
1103 let func = self.current_func();
1104 let variable = func.new_local(None, then_val.get_type(), "selectVar");
1105 let then_block = func.new_block("then");
1106 let else_block = func.new_block("else");
1107 let after_block = func.new_block("after");
1108 self.llbb().end_with_conditional(None, cond, then_block, else_block);
1109
1110 then_block.add_assignment(None, variable, then_val);
1111 then_block.end_with_jump(None, after_block);
1112
1113 if !then_val.get_type().is_compatible_with(else_val.get_type()) {
1114 else_val = self.context.new_cast(None, else_val, then_val.get_type());
1115 }
1116 else_block.add_assignment(None, variable, else_val);
1117 else_block.end_with_jump(None, after_block);
1118
1119 // NOTE: since jumps were added in a place rustc does not expect, the current block in the
1120 // state need to be updated.
1121 self.switch_to_block(after_block);
1122
1123 variable.to_rvalue()
1124 }
1125
1126 #[allow(dead_code)]
va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc>1127 fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
1128 unimplemented!();
1129 }
1130
1131 #[cfg(feature="master")]
extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc>1132 fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> {
1133 self.context.new_vector_access(None, vec, idx).to_rvalue()
1134 }
1135
1136 #[cfg(not(feature="master"))]
extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc>1137 fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> {
1138 let vector_type = vec.get_type().unqualified().dyncast_vector().expect("Called extract_element on a non-vector type");
1139 let element_type = vector_type.get_element_type();
1140 let vec_num_units = vector_type.get_num_units();
1141 let array_type = self.context.new_array_type(None, element_type, vec_num_units as u64);
1142 let array = self.context.new_bitcast(None, vec, array_type).to_rvalue();
1143 self.context.new_array_access(None, array, idx).to_rvalue()
1144 }
1145
vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc>1146 fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
1147 unimplemented!();
1148 }
1149
extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc>1150 fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1151 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
1152 assert_eq!(idx as usize as u64, idx);
1153 let value_type = aggregate_value.get_type();
1154
1155 if value_type.dyncast_array().is_some() {
1156 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1157 let element = self.context.new_array_access(None, aggregate_value, index);
1158 element.get_address(None)
1159 }
1160 else if value_type.dyncast_vector().is_some() {
1161 panic!();
1162 }
1163 else if let Some(pointer_type) = value_type.get_pointee() {
1164 if let Some(struct_type) = pointer_type.is_struct() {
1165 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1166 // CodegenCx.structs_as_pointer
1167 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1168 }
1169 else {
1170 panic!("Unexpected type {:?}", value_type);
1171 }
1172 }
1173 else if let Some(struct_type) = value_type.is_struct() {
1174 aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1175 }
1176 else {
1177 panic!("Unexpected type {:?}", value_type);
1178 }
1179 }
1180
insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc>1181 fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1182 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
1183 assert_eq!(idx as usize as u64, idx);
1184 let value_type = aggregate_value.get_type();
1185
1186 let lvalue =
1187 if value_type.dyncast_array().is_some() {
1188 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1189 self.context.new_array_access(None, aggregate_value, index)
1190 }
1191 else if value_type.dyncast_vector().is_some() {
1192 panic!();
1193 }
1194 else if let Some(pointer_type) = value_type.get_pointee() {
1195 if let Some(struct_type) = pointer_type.is_struct() {
1196 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1197 // CodegenCx.structs_as_pointer
1198 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
1199 }
1200 else {
1201 panic!("Unexpected type {:?}", value_type);
1202 }
1203 }
1204 else {
1205 panic!("Unexpected type {:?}", value_type);
1206 };
1207
1208 let lvalue_type = lvalue.to_rvalue().get_type();
1209 let value =
1210 // NOTE: sometimes, rustc will create a value with the wrong type.
1211 if lvalue_type != value.get_type() {
1212 self.context.new_cast(None, value, lvalue_type)
1213 }
1214 else {
1215 value
1216 };
1217
1218 self.llbb().add_assignment(None, lvalue, value);
1219
1220 aggregate_value
1221 }
1222
set_personality_fn(&mut self, _personality: RValue<'gcc>)1223 fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
1224 #[cfg(feature="master")]
1225 {
1226 let personality = self.rvalue_as_function(_personality);
1227 self.current_func().set_personality_function(personality);
1228 }
1229 }
1230
1231 #[cfg(feature="master")]
cleanup_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>)1232 fn cleanup_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
1233 self.set_personality_fn(pers_fn);
1234
1235 // NOTE: insert the current block in a variable so that a later call to invoke knows to
1236 // generate a try/finally instead of a try/catch for this block.
1237 self.cleanup_blocks.borrow_mut().insert(self.block);
1238
1239 let eh_pointer_builtin = self.cx.context.get_target_builtin_function("__builtin_eh_pointer");
1240 let zero = self.cx.context.new_rvalue_zero(self.int_type);
1241 let ptr = self.cx.context.new_call(None, eh_pointer_builtin, &[zero]);
1242
1243 let value1_type = self.u8_type.make_pointer();
1244 let ptr = self.cx.context.new_cast(None, ptr, value1_type);
1245 let value1 = ptr;
1246 let value2 = zero; // TODO(antoyo): set the proper value here (the type of exception?).
1247
1248 (value1, value2)
1249 }
1250
1251 #[cfg(not(feature="master"))]
cleanup_landing_pad(&mut self, _pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>)1252 fn cleanup_landing_pad(&mut self, _pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
1253 let value1 = self.current_func().new_local(None, self.u8_type.make_pointer(), "landing_pad0")
1254 .to_rvalue();
1255 let value2 = self.current_func().new_local(None, self.i32_type, "landing_pad1").to_rvalue();
1256 (value1, value2)
1257 }
1258
filter_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>)1259 fn filter_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
1260 // TODO(antoyo): generate the correct landing pad
1261 self.cleanup_landing_pad(pers_fn)
1262 }
1263
1264 #[cfg(feature="master")]
resume(&mut self, exn0: RValue<'gcc>, _exn1: RValue<'gcc>)1265 fn resume(&mut self, exn0: RValue<'gcc>, _exn1: RValue<'gcc>) {
1266 let exn_type = exn0.get_type();
1267 let exn = self.context.new_cast(None, exn0, exn_type);
1268 let unwind_resume = self.context.get_target_builtin_function("__builtin_unwind_resume");
1269 self.llbb().add_eval(None, self.context.new_call(None, unwind_resume, &[exn]));
1270 self.unreachable();
1271 }
1272
1273 #[cfg(not(feature="master"))]
resume(&mut self, _exn0: RValue<'gcc>, _exn1: RValue<'gcc>)1274 fn resume(&mut self, _exn0: RValue<'gcc>, _exn1: RValue<'gcc>) {
1275 self.unreachable();
1276 }
1277
cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet1278 fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
1279 unimplemented!();
1280 }
1281
cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>)1282 fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) {
1283 unimplemented!();
1284 }
1285
catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet1286 fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
1287 unimplemented!();
1288 }
1289
catch_switch( &mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _handlers: &[Block<'gcc>], ) -> RValue<'gcc>1290 fn catch_switch(
1291 &mut self,
1292 _parent: Option<RValue<'gcc>>,
1293 _unwind: Option<Block<'gcc>>,
1294 _handlers: &[Block<'gcc>],
1295 ) -> RValue<'gcc> {
1296 unimplemented!();
1297 }
1298
1299 // Atomic Operations
atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc>1300 fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
1301 let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
1302 self.llbb().add_assignment(None, expected, cmp);
1303 // NOTE: gcc doesn't support a failure memory model that is stronger than the success
1304 // memory model.
1305 let order =
1306 if failure_order as i32 > order as i32 {
1307 failure_order
1308 }
1309 else {
1310 order
1311 };
1312 let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
1313
1314 let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
1315 let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
1316 let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
1317
1318 let value_type = result.to_rvalue().get_type();
1319 if let Some(struct_type) = value_type.is_struct() {
1320 self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
1321 // NOTE: since success contains the call to the intrinsic, it must be stored before
1322 // expected so that we store expected after the call.
1323 self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
1324 }
1325 // TODO(antoyo): handle when value is not a struct.
1326
1327 result.to_rvalue()
1328 }
1329
atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc>1330 fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
1331 let size = src.get_type().get_size();
1332 let name =
1333 match op {
1334 AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
1335 AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
1336 AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
1337 AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
1338 AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
1339 AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
1340 AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
1341 AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1342 AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1343 AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1344 AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1345 };
1346
1347
1348 let atomic_function = self.context.get_builtin_function(name);
1349 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1350
1351 let void_ptr_type = self.context.new_type::<*mut ()>();
1352 let volatile_void_ptr_type = void_ptr_type.make_volatile();
1353 let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
1354 // FIXME(antoyo): not sure why, but we have the wrong type here.
1355 let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
1356 let src = self.context.new_cast(None, src, new_src_type);
1357 let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
1358 self.context.new_cast(None, res, src.get_type())
1359 }
1360
atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope)1361 fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
1362 let name =
1363 match scope {
1364 SynchronizationScope::SingleThread => "__atomic_signal_fence",
1365 SynchronizationScope::CrossThread => "__atomic_thread_fence",
1366 };
1367 let thread_fence = self.context.get_builtin_function(name);
1368 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1369 self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
1370 }
1371
set_invariant_load(&mut self, load: RValue<'gcc>)1372 fn set_invariant_load(&mut self, load: RValue<'gcc>) {
1373 // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
1374 self.normal_function_addresses.borrow_mut().insert(load);
1375 // TODO(antoyo)
1376 }
1377
lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size)1378 fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1379 // TODO(antoyo)
1380 }
1381
lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size)1382 fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1383 // TODO(antoyo)
1384 }
1385
call( &mut self, typ: Type<'gcc>, _fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>, ) -> RValue<'gcc>1386 fn call(
1387 &mut self,
1388 typ: Type<'gcc>,
1389 _fn_attrs: Option<&CodegenFnAttrs>,
1390 fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
1391 func: RValue<'gcc>,
1392 args: &[RValue<'gcc>],
1393 funclet: Option<&Funclet>,
1394 ) -> RValue<'gcc> {
1395 // FIXME(antoyo): remove when having a proper API.
1396 let gcc_func = unsafe { std::mem::transmute(func) };
1397 let call = if self.functions.borrow().values().any(|value| *value == gcc_func) {
1398 self.function_call(func, args, funclet)
1399 }
1400 else {
1401 // If it's a not function that was defined, it's a function pointer.
1402 self.function_ptr_call(typ, func, args, funclet)
1403 };
1404 if let Some(_fn_abi) = fn_abi {
1405 // TODO(bjorn3): Apply function attributes
1406 }
1407 call
1408 }
1409
zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc>1410 fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
1411 // FIXME(antoyo): this does not zero-extend.
1412 if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
1413 // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
1414 // Fix the code in codegen_ssa::base::from_immediate.
1415 return value;
1416 }
1417 self.gcc_int_cast(value, dest_typ)
1418 }
1419
cx(&self) -> &CodegenCx<'gcc, 'tcx>1420 fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
1421 self.cx
1422 }
1423
do_not_inline(&mut self, _llret: RValue<'gcc>)1424 fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
1425 // FIXME(bjorn3): implement
1426 }
1427
set_span(&mut self, _span: Span)1428 fn set_span(&mut self, _span: Span) {}
1429
from_immediate(&mut self, val: Self::Value) -> Self::Value1430 fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
1431 if self.cx().val_ty(val) == self.cx().type_i1() {
1432 self.zext(val, self.cx().type_i8())
1433 }
1434 else {
1435 val
1436 }
1437 }
1438
to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value1439 fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
1440 if scalar.is_bool() {
1441 return self.trunc(val, self.cx().type_i1());
1442 }
1443 val
1444 }
1445
fptoui_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1446 fn fptoui_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1447 self.fptoint_sat(false, val, dest_ty)
1448 }
1449
fptosi_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1450 fn fptosi_sat(&mut self, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1451 self.fptoint_sat(true, val, dest_ty)
1452 }
1453
instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>)1454 fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
1455 unimplemented!();
1456 }
1457 }
1458
1459 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
fptoint_sat(&mut self, signed: bool, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1460 fn fptoint_sat(&mut self, signed: bool, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1461 let src_ty = self.cx.val_ty(val);
1462 let (float_ty, int_ty) = if self.cx.type_kind(src_ty) == TypeKind::Vector {
1463 assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
1464 (self.cx.element_type(src_ty), self.cx.element_type(dest_ty))
1465 } else {
1466 (src_ty, dest_ty)
1467 };
1468
1469 // FIXME(jistone): the following was originally the fallback SSA implementation, before LLVM 13
1470 // added native `fptosi.sat` and `fptoui.sat` conversions, but it was used by GCC as well.
1471 // Now that LLVM always relies on its own, the code has been moved to GCC, but the comments are
1472 // still LLVM-specific. This should be updated, and use better GCC specifics if possible.
1473
1474 let int_width = self.cx.int_width(int_ty);
1475 let float_width = self.cx.float_width(float_ty);
1476 // LLVM's fpto[su]i returns undef when the input val is infinite, NaN, or does not fit into the
1477 // destination integer type after rounding towards zero. This `undef` value can cause UB in
1478 // safe code (see issue #10184), so we implement a saturating conversion on top of it:
1479 // Semantically, the mathematical value of the input is rounded towards zero to the next
1480 // mathematical integer, and then the result is clamped into the range of the destination
1481 // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
1482 // the destination integer type. NaN is mapped to 0.
1483 //
1484 // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
1485 // a value representable in int_ty.
1486 // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
1487 // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
1488 // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
1489 // representable. Note that this only works if float_ty's exponent range is sufficiently large.
1490 // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
1491 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
1492 // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
1493 // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
1494 // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
1495 let int_max = |signed: bool, int_width: u64| -> u128 {
1496 let shift_amount = 128 - int_width;
1497 if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
1498 };
1499 let int_min = |signed: bool, int_width: u64| -> i128 {
1500 if signed { i128::MIN >> (128 - int_width) } else { 0 }
1501 };
1502
1503 let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
1504 let rounded_min =
1505 ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
1506 assert_eq!(rounded_min.status, Status::OK);
1507 let rounded_max =
1508 ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
1509 assert!(rounded_max.value.is_finite());
1510 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
1511 };
1512 let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
1513 let rounded_min =
1514 ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
1515 assert_eq!(rounded_min.status, Status::OK);
1516 let rounded_max =
1517 ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
1518 assert!(rounded_max.value.is_finite());
1519 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
1520 };
1521 // To implement saturation, we perform the following steps:
1522 //
1523 // 1. Cast val to an integer with fpto[su]i. This may result in undef.
1524 // 2. Compare val to f_min and f_max, and use the comparison results to select:
1525 // a) int_ty::MIN if val < f_min or val is NaN
1526 // b) int_ty::MAX if val > f_max
1527 // c) the result of fpto[su]i otherwise
1528 // 3. If val is NaN, return 0.0, otherwise return the result of step 2.
1529 //
1530 // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
1531 // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
1532 // undef does not introduce any non-determinism either.
1533 // More importantly, the above procedure correctly implements saturating conversion.
1534 // Proof (sketch):
1535 // If val is NaN, 0 is returned by definition.
1536 // Otherwise, val is finite or infinite and thus can be compared with f_min and f_max.
1537 // This yields three cases to consider:
1538 // (1) if val in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
1539 // saturating conversion for inputs in that range.
1540 // (2) if val > f_max, then val is larger than int_ty::MAX. This holds even if f_max is rounded
1541 // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
1542 // than int_ty::MAX. Because val is larger than int_ty::MAX, the return value of int_ty::MAX
1543 // is correct.
1544 // (3) if val < f_min, then val is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
1545 // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
1546 // QED.
1547
1548 let float_bits_to_llval = |bx: &mut Self, bits| {
1549 let bits_llval = match float_width {
1550 32 => bx.cx().const_u32(bits as u32),
1551 64 => bx.cx().const_u64(bits as u64),
1552 n => bug!("unsupported float width {}", n),
1553 };
1554 bx.bitcast(bits_llval, float_ty)
1555 };
1556 let (f_min, f_max) = match float_width {
1557 32 => compute_clamp_bounds_single(signed, int_width),
1558 64 => compute_clamp_bounds_double(signed, int_width),
1559 n => bug!("unsupported float width {}", n),
1560 };
1561 let f_min = float_bits_to_llval(self, f_min);
1562 let f_max = float_bits_to_llval(self, f_max);
1563 let int_max = self.cx.const_uint_big(int_ty, int_max(signed, int_width));
1564 let int_min = self.cx.const_uint_big(int_ty, int_min(signed, int_width) as u128);
1565 let zero = self.cx.const_uint(int_ty, 0);
1566
1567 // If we're working with vectors, constants must be "splatted": the constant is duplicated
1568 // into each lane of the vector. The algorithm stays the same, we are just using the
1569 // same constant across all lanes.
1570 let maybe_splat = |bx: &mut Self, val| {
1571 if bx.cx().type_kind(dest_ty) == TypeKind::Vector {
1572 bx.vector_splat(bx.vector_length(dest_ty), val)
1573 } else {
1574 val
1575 }
1576 };
1577 let f_min = maybe_splat(self, f_min);
1578 let f_max = maybe_splat(self, f_max);
1579 let int_max = maybe_splat(self, int_max);
1580 let int_min = maybe_splat(self, int_min);
1581 let zero = maybe_splat(self, zero);
1582
1583 // Step 1 ...
1584 let fptosui_result = if signed { self.fptosi(val, dest_ty) } else { self.fptoui(val, dest_ty) };
1585 let less_or_nan = self.fcmp(RealPredicate::RealULT, val, f_min);
1586 let greater = self.fcmp(RealPredicate::RealOGT, val, f_max);
1587
1588 // Step 2: We use two comparisons and two selects, with %s1 being the
1589 // result:
1590 // %less_or_nan = fcmp ult %val, %f_min
1591 // %greater = fcmp olt %val, %f_max
1592 // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
1593 // %s1 = select %greater, int_ty::MAX, %s0
1594 // Note that %less_or_nan uses an *unordered* comparison. This
1595 // comparison is true if the operands are not comparable (i.e., if val is
1596 // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
1597 // val is NaN.
1598 //
1599 // Performance note: Unordered comparison can be lowered to a "flipped"
1600 // comparison and a negation, and the negation can be merged into the
1601 // select. Therefore, it not necessarily any more expensive than an
1602 // ordered ("normal") comparison. Whether these optimizations will be
1603 // performed is ultimately up to the backend, but at least x86 does
1604 // perform them.
1605 let s0 = self.select(less_or_nan, int_min, fptosui_result);
1606 let s1 = self.select(greater, int_max, s0);
1607
1608 // Step 3: NaN replacement.
1609 // For unsigned types, the above step already yielded int_ty::MIN == 0 if val is NaN.
1610 // Therefore we only need to execute this step for signed integer types.
1611 if signed {
1612 // LLVM has no isNaN predicate, so we use (val == val) instead
1613 let cmp = self.fcmp(RealPredicate::RealOEQ, val, val);
1614 self.select(cmp, s1, zero)
1615 } else {
1616 s1
1617 }
1618 }
1619
1620 #[cfg(feature="master")]
shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc>1621 pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
1622 let struct_type = mask.get_type().is_struct().expect("mask should be of struct type");
1623
1624 // TODO(antoyo): use a recursive unqualified() here.
1625 let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type");
1626 let element_type = vector_type.get_element_type();
1627 let vec_num_units = vector_type.get_num_units();
1628
1629 let mask_num_units = struct_type.get_field_count();
1630 let mut vector_elements = vec![];
1631 let mask_element_type =
1632 if element_type.is_integral() {
1633 element_type
1634 }
1635 else {
1636 #[cfg(feature="master")]
1637 {
1638 self.cx.type_ix(element_type.get_size() as u64 * 8)
1639 }
1640 #[cfg(not(feature="master"))]
1641 self.int_type
1642 };
1643 for i in 0..mask_num_units {
1644 let field = struct_type.get_field(i as i32);
1645 vector_elements.push(self.context.new_cast(None, mask.access_field(None, field).to_rvalue(), mask_element_type));
1646 }
1647
1648 // NOTE: the mask needs to be the same length as the input vectors, so add the missing
1649 // elements in the mask if needed.
1650 for _ in mask_num_units..vec_num_units {
1651 vector_elements.push(self.context.new_rvalue_zero(mask_element_type));
1652 }
1653
1654 let result_type = self.context.new_vector_type(element_type, mask_num_units as u64);
1655 let (v1, v2) =
1656 if vec_num_units < mask_num_units {
1657 // NOTE: the mask needs to be the same length as the input vectors, so join the 2
1658 // vectors and create a dummy second vector.
1659 let mut elements = vec![];
1660 for i in 0..vec_num_units {
1661 elements.push(self.context.new_vector_access(None, v1, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
1662 }
1663 for i in 0..(mask_num_units - vec_num_units) {
1664 elements.push(self.context.new_vector_access(None, v2, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
1665 }
1666 let v1 = self.context.new_rvalue_from_vector(None, result_type, &elements);
1667 let zero = self.context.new_rvalue_zero(element_type);
1668 let v2 = self.context.new_rvalue_from_vector(None, result_type, &vec![zero; mask_num_units]);
1669 (v1, v2)
1670 }
1671 else {
1672 (v1, v2)
1673 };
1674
1675 let new_mask_num_units = std::cmp::max(mask_num_units, vec_num_units);
1676 let mask_type = self.context.new_vector_type(mask_element_type, new_mask_num_units as u64);
1677 let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
1678 let result = self.context.new_rvalue_vector_perm(None, v1, v2, mask);
1679
1680 if vec_num_units != mask_num_units {
1681 // NOTE: if padding was added, only select the number of elements of the masks to
1682 // remove that padding in the result.
1683 let mut elements = vec![];
1684 for i in 0..mask_num_units {
1685 elements.push(self.context.new_vector_access(None, result, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
1686 }
1687 self.context.new_rvalue_from_vector(None, result_type, &elements)
1688 }
1689 else {
1690 result
1691 }
1692 }
1693
1694 #[cfg(not(feature="master"))]
shuffle_vector(&mut self, _v1: RValue<'gcc>, _v2: RValue<'gcc>, _mask: RValue<'gcc>) -> RValue<'gcc>1695 pub fn shuffle_vector(&mut self, _v1: RValue<'gcc>, _v2: RValue<'gcc>, _mask: RValue<'gcc>) -> RValue<'gcc> {
1696 unimplemented!();
1697 }
1698
1699 #[cfg(feature="master")]
vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc> where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>1700 pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc>
1701 where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
1702 {
1703 let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
1704 let element_type = vector_type.get_element_type();
1705 let mask_element_type = self.type_ix(element_type.get_size() as u64 * 8);
1706 let element_count = vector_type.get_num_units();
1707 let mut vector_elements = vec![];
1708 for i in 0..element_count {
1709 vector_elements.push(i);
1710 }
1711 let mask_type = self.context.new_vector_type(mask_element_type, element_count as u64);
1712 let mut shift = 1;
1713 let mut res = src;
1714 while shift < element_count {
1715 let vector_elements: Vec<_> =
1716 vector_elements.iter()
1717 .map(|i| self.context.new_rvalue_from_int(mask_element_type, ((i + shift) % element_count) as i32))
1718 .collect();
1719 let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
1720 let shifted = self.context.new_rvalue_vector_perm(None, res, res, mask);
1721 shift *= 2;
1722 res = op(res, shifted, &self.context);
1723 }
1724 self.context.new_vector_access(None, res, self.context.new_rvalue_zero(self.int_type))
1725 .to_rvalue()
1726 }
1727
1728 #[cfg(not(feature="master"))]
vector_reduce<F>(&mut self, _src: RValue<'gcc>, _op: F) -> RValue<'gcc> where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>1729 pub fn vector_reduce<F>(&mut self, _src: RValue<'gcc>, _op: F) -> RValue<'gcc>
1730 where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
1731 {
1732 unimplemented!();
1733 }
1734
vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc>1735 pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> {
1736 self.vector_reduce(src, |a, b, context| context.new_binary_op(None, op, a.get_type(), a, b))
1737 }
1738
vector_reduce_fadd_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc>1739 pub fn vector_reduce_fadd_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
1740 unimplemented!();
1741 }
1742
1743 #[cfg(feature="master")]
vector_reduce_fadd(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc>1744 pub fn vector_reduce_fadd(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> {
1745 let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
1746 let element_count = vector_type.get_num_units();
1747 (0..element_count).into_iter()
1748 .map(|i| self.context
1749 .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
1750 .to_rvalue())
1751 .fold(acc, |x, i| x + i)
1752 }
1753
1754 #[cfg(not(feature="master"))]
vector_reduce_fadd(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc>1755 pub fn vector_reduce_fadd(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
1756 unimplemented!();
1757 }
1758
vector_reduce_fmul_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc>1759 pub fn vector_reduce_fmul_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
1760 unimplemented!();
1761 }
1762
1763 #[cfg(feature="master")]
vector_reduce_fmul(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc>1764 pub fn vector_reduce_fmul(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> {
1765 let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
1766 let element_count = vector_type.get_num_units();
1767 (0..element_count).into_iter()
1768 .map(|i| self.context
1769 .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
1770 .to_rvalue())
1771 .fold(acc, |x, i| x * i)
1772 }
1773
1774 #[cfg(not(feature="master"))]
vector_reduce_fmul(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc>1775 pub fn vector_reduce_fmul(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
1776 unimplemented!()
1777 }
1778
1779 // Inspired by Hacker's Delight min implementation.
vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc>1780 pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
1781 self.vector_reduce(src, |a, b, context| {
1782 let differences_or_zeros = difference_or_zero(a, b, context);
1783 context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
1784 })
1785 }
1786
1787 // Inspired by Hacker's Delight max implementation.
vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc>1788 pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
1789 self.vector_reduce(src, |a, b, context| {
1790 let differences_or_zeros = difference_or_zero(a, b, context);
1791 context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
1792 })
1793 }
1794
vector_extremum(&mut self, a: RValue<'gcc>, b: RValue<'gcc>, direction: ExtremumOperation) -> RValue<'gcc>1795 fn vector_extremum(&mut self, a: RValue<'gcc>, b: RValue<'gcc>, direction: ExtremumOperation) -> RValue<'gcc> {
1796 let vector_type = a.get_type();
1797
1798 // mask out the NaNs in b and replace them with the corresponding lane in a, so when a and
1799 // b get compared & spliced together, we get the numeric values instead of NaNs.
1800 let b_nan_mask = self.context.new_comparison(None, ComparisonOp::NotEquals, b, b);
1801 let mask_type = b_nan_mask.get_type();
1802 let b_nan_mask_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, mask_type, b_nan_mask);
1803 let a_cast = self.context.new_bitcast(None, a, mask_type);
1804 let b_cast = self.context.new_bitcast(None, b, mask_type);
1805 let res = (b_nan_mask & a_cast) | (b_nan_mask_inverted & b_cast);
1806 let b = self.context.new_bitcast(None, res, vector_type);
1807
1808 // now do the actual comparison
1809 let comparison_op = match direction {
1810 ExtremumOperation::Min => ComparisonOp::LessThan,
1811 ExtremumOperation::Max => ComparisonOp::GreaterThan,
1812 };
1813 let cmp = self.context.new_comparison(None, comparison_op, a, b);
1814 let cmp_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, cmp.get_type(), cmp);
1815 let res = (cmp & a_cast) | (cmp_inverted & res);
1816 self.context.new_bitcast(None, res, vector_type)
1817 }
1818
vector_fmin(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>1819 pub fn vector_fmin(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
1820 self.vector_extremum(a, b, ExtremumOperation::Min)
1821 }
1822
1823 #[cfg(feature="master")]
vector_reduce_fmin(&mut self, src: RValue<'gcc>) -> RValue<'gcc>1824 pub fn vector_reduce_fmin(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
1825 let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
1826 let element_count = vector_type.get_num_units();
1827 let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue();
1828 for i in 1..element_count {
1829 let elem = self.context
1830 .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
1831 .to_rvalue();
1832 let cmp = self.context.new_comparison(None, ComparisonOp::LessThan, acc, elem);
1833 acc = self.select(cmp, acc, elem);
1834 }
1835 acc
1836 }
1837
1838 #[cfg(not(feature="master"))]
vector_reduce_fmin(&mut self, _src: RValue<'gcc>) -> RValue<'gcc>1839 pub fn vector_reduce_fmin(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> {
1840 unimplemented!();
1841 }
1842
vector_fmax(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>1843 pub fn vector_fmax(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
1844 self.vector_extremum(a, b, ExtremumOperation::Max)
1845 }
1846
1847 #[cfg(feature="master")]
vector_reduce_fmax(&mut self, src: RValue<'gcc>) -> RValue<'gcc>1848 pub fn vector_reduce_fmax(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
1849 let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
1850 let element_count = vector_type.get_num_units();
1851 let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue();
1852 for i in 1..element_count {
1853 let elem = self.context
1854 .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
1855 .to_rvalue();
1856 let cmp = self.context.new_comparison(None, ComparisonOp::GreaterThan, acc, elem);
1857 acc = self.select(cmp, acc, elem);
1858 }
1859 acc
1860 }
1861
1862 #[cfg(not(feature="master"))]
vector_reduce_fmax(&mut self, _src: RValue<'gcc>) -> RValue<'gcc>1863 pub fn vector_reduce_fmax(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> {
1864 unimplemented!();
1865 }
1866
vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc>1867 pub fn vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc> {
1868 // cond is a vector of integers, not of bools.
1869 let vector_type = cond.get_type().unqualified().dyncast_vector().expect("vector type");
1870 let num_units = vector_type.get_num_units();
1871 let element_type = vector_type.get_element_type();
1872
1873 #[cfg(feature="master")]
1874 let (cond, element_type) = {
1875 // TODO(antoyo): dyncast_vector should not require a call to unqualified.
1876 let then_val_vector_type = then_val.get_type().unqualified().dyncast_vector().expect("vector type");
1877 let then_val_element_type = then_val_vector_type.get_element_type();
1878 let then_val_element_size = then_val_element_type.get_size();
1879
1880 // NOTE: the mask needs to be of the same size as the other arguments in order for the &
1881 // operation to work.
1882 if then_val_element_size != element_type.get_size() {
1883 let new_element_type = self.type_ix(then_val_element_size as u64 * 8);
1884 let new_vector_type = self.context.new_vector_type(new_element_type, num_units as u64);
1885 let cond = self.context.convert_vector(None, cond, new_vector_type);
1886 (cond, new_element_type)
1887 }
1888 else {
1889 (cond, element_type)
1890 }
1891 };
1892
1893 let cond_type = cond.get_type();
1894
1895 let zeros = vec![self.context.new_rvalue_zero(element_type); num_units];
1896 let zeros = self.context.new_rvalue_from_vector(None, cond_type, &zeros);
1897
1898 let result_type = then_val.get_type();
1899
1900 let masks = self.context.new_comparison(None, ComparisonOp::NotEquals, cond, zeros);
1901 // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make
1902 // the & operation work.
1903 let then_val = self.bitcast_if_needed(then_val, masks.get_type());
1904 let then_vals = masks & then_val;
1905
1906 let minus_ones = vec![self.context.new_rvalue_from_int(element_type, -1); num_units];
1907 let minus_ones = self.context.new_rvalue_from_vector(None, cond_type, &minus_ones);
1908 let inverted_masks = masks ^ minus_ones;
1909 // NOTE: sometimes, the type of else_val can be different than the type of then_val in
1910 // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND
1911 // operation to work.
1912 // TODO: remove bitcast now that vector types can be compared?
1913 let else_val = self.context.new_bitcast(None, else_val, then_val.get_type());
1914 let else_vals = inverted_masks & else_val;
1915
1916 let res = then_vals | else_vals;
1917 self.bitcast_if_needed(res, result_type)
1918 }
1919 }
1920
difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc>1921 fn difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc> {
1922 let difference = a - b;
1923 let masks = context.new_comparison(None, ComparisonOp::GreaterThanEquals, b, a);
1924 // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make
1925 // the & operation work.
1926 let a_type = a.get_type();
1927 let masks =
1928 if masks.get_type() != a_type {
1929 context.new_bitcast(None, masks, a_type)
1930 }
1931 else {
1932 masks
1933 };
1934 difference & masks
1935 }
1936
1937 impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
get_static(&mut self, def_id: DefId) -> RValue<'gcc>1938 fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
1939 // Forward to the `get_static` method of `CodegenCx`
1940 self.cx().get_static(def_id).get_address(None)
1941 }
1942 }
1943
1944 impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
param_env(&self) -> ParamEnv<'tcx>1945 fn param_env(&self) -> ParamEnv<'tcx> {
1946 self.cx.param_env()
1947 }
1948 }
1949
1950 impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
target_spec(&self) -> &Target1951 fn target_spec(&self) -> &Target {
1952 &self.cx.target_spec()
1953 }
1954 }
1955
1956 pub trait ToGccComp {
to_gcc_comparison(&self) -> ComparisonOp1957 fn to_gcc_comparison(&self) -> ComparisonOp;
1958 }
1959
1960 impl ToGccComp for IntPredicate {
to_gcc_comparison(&self) -> ComparisonOp1961 fn to_gcc_comparison(&self) -> ComparisonOp {
1962 match *self {
1963 IntPredicate::IntEQ => ComparisonOp::Equals,
1964 IntPredicate::IntNE => ComparisonOp::NotEquals,
1965 IntPredicate::IntUGT => ComparisonOp::GreaterThan,
1966 IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
1967 IntPredicate::IntULT => ComparisonOp::LessThan,
1968 IntPredicate::IntULE => ComparisonOp::LessThanEquals,
1969 IntPredicate::IntSGT => ComparisonOp::GreaterThan,
1970 IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
1971 IntPredicate::IntSLT => ComparisonOp::LessThan,
1972 IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
1973 }
1974 }
1975 }
1976
1977 impl ToGccComp for RealPredicate {
to_gcc_comparison(&self) -> ComparisonOp1978 fn to_gcc_comparison(&self) -> ComparisonOp {
1979 // TODO(antoyo): check that ordered vs non-ordered is respected.
1980 match *self {
1981 RealPredicate::RealPredicateFalse => unreachable!(),
1982 RealPredicate::RealOEQ => ComparisonOp::Equals,
1983 RealPredicate::RealOGT => ComparisonOp::GreaterThan,
1984 RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
1985 RealPredicate::RealOLT => ComparisonOp::LessThan,
1986 RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
1987 RealPredicate::RealONE => ComparisonOp::NotEquals,
1988 RealPredicate::RealORD => unreachable!(),
1989 RealPredicate::RealUNO => unreachable!(),
1990 RealPredicate::RealUEQ => ComparisonOp::Equals,
1991 RealPredicate::RealUGT => ComparisonOp::GreaterThan,
1992 RealPredicate::RealUGE => ComparisonOp::GreaterThan,
1993 RealPredicate::RealULT => ComparisonOp::LessThan,
1994 RealPredicate::RealULE => ComparisonOp::LessThan,
1995 RealPredicate::RealUNE => ComparisonOp::NotEquals,
1996 RealPredicate::RealPredicateTrue => unreachable!(),
1997 }
1998 }
1999 }
2000
2001 #[repr(C)]
2002 #[allow(non_camel_case_types)]
2003 enum MemOrdering {
2004 __ATOMIC_RELAXED,
2005 __ATOMIC_CONSUME,
2006 __ATOMIC_ACQUIRE,
2007 __ATOMIC_RELEASE,
2008 __ATOMIC_ACQ_REL,
2009 __ATOMIC_SEQ_CST,
2010 }
2011
2012 trait ToGccOrdering {
to_gcc(self) -> i322013 fn to_gcc(self) -> i32;
2014 }
2015
2016 impl ToGccOrdering for AtomicOrdering {
to_gcc(self) -> i322017 fn to_gcc(self) -> i32 {
2018 use MemOrdering::*;
2019
2020 let ordering =
2021 match self {
2022 AtomicOrdering::Unordered => __ATOMIC_RELAXED,
2023 AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
2024 AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
2025 AtomicOrdering::Release => __ATOMIC_RELEASE,
2026 AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
2027 AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
2028 };
2029 ordering as i32
2030 }
2031 }
2032