1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/ic/binary-op-assembler.h"
6
7 #include "src/common/globals.h"
8
9 namespace v8 {
10 namespace internal {
11
Generate_AddWithFeedback(TNode<Context> context,TNode<Object> lhs,TNode<Object> rhs,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector,bool rhs_known_smi)12 TNode<Object> BinaryOpAssembler::Generate_AddWithFeedback(
13 TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
14 TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
15 bool rhs_known_smi) {
16 // Shared entry for floating point addition.
17 Label do_fadd(this), if_lhsisnotnumber(this, Label::kDeferred),
18 check_rhsisoddball(this, Label::kDeferred),
19 call_with_oddball_feedback(this), call_with_any_feedback(this),
20 call_add_stub(this), end(this), bigint(this, Label::kDeferred);
21 TVARIABLE(Float64T, var_fadd_lhs);
22 TVARIABLE(Float64T, var_fadd_rhs);
23 TVARIABLE(Smi, var_type_feedback);
24 TVARIABLE(Object, var_result);
25
26 // Check if the {lhs} is a Smi or a HeapObject.
27 Label if_lhsissmi(this);
28 // If rhs is known to be an Smi we want to fast path Smi operation. This is
29 // for AddSmi operation. For the normal Add operation, we want to fast path
30 // both Smi and Number operations, so this path should not be marked as
31 // Deferred.
32 Label if_lhsisnotsmi(this,
33 rhs_known_smi ? Label::kDeferred : Label::kNonDeferred);
34 Branch(TaggedIsNotSmi(lhs), &if_lhsisnotsmi, &if_lhsissmi);
35
36 BIND(&if_lhsissmi);
37 {
38 Comment("lhs is Smi");
39 TNode<Smi> lhs_smi = CAST(lhs);
40 if (!rhs_known_smi) {
41 // Check if the {rhs} is also a Smi.
42 Label if_rhsissmi(this), if_rhsisnotsmi(this);
43 Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
44
45 BIND(&if_rhsisnotsmi);
46 {
47 // Check if the {rhs} is a HeapNumber.
48 TNode<HeapObject> rhs_heap_object = CAST(rhs);
49 GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
50
51 var_fadd_lhs = SmiToFloat64(lhs_smi);
52 var_fadd_rhs = LoadHeapNumberValue(rhs_heap_object);
53 Goto(&do_fadd);
54 }
55
56 BIND(&if_rhsissmi);
57 }
58
59 {
60 Comment("perform smi operation");
61 // If rhs is known to be an Smi we want to fast path Smi operation. This
62 // is for AddSmi operation. For the normal Add operation, we want to fast
63 // path both Smi and Number operations, so this path should not be marked
64 // as Deferred.
65 TNode<Smi> rhs_smi = CAST(rhs);
66 Label if_overflow(this,
67 rhs_known_smi ? Label::kDeferred : Label::kNonDeferred);
68 TNode<Smi> smi_result = TrySmiAdd(lhs_smi, rhs_smi, &if_overflow);
69 // Not overflowed.
70 {
71 var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
72 UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
73 slot_id);
74 var_result = smi_result;
75 Goto(&end);
76 }
77
78 BIND(&if_overflow);
79 {
80 var_fadd_lhs = SmiToFloat64(lhs_smi);
81 var_fadd_rhs = SmiToFloat64(rhs_smi);
82 Goto(&do_fadd);
83 }
84 }
85 }
86
87 BIND(&if_lhsisnotsmi);
88 {
89 // Check if {lhs} is a HeapNumber.
90 TNode<HeapObject> lhs_heap_object = CAST(lhs);
91 GotoIfNot(IsHeapNumber(lhs_heap_object), &if_lhsisnotnumber);
92
93 if (!rhs_known_smi) {
94 // Check if the {rhs} is Smi.
95 Label if_rhsissmi(this), if_rhsisnotsmi(this);
96 Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
97
98 BIND(&if_rhsisnotsmi);
99 {
100 // Check if the {rhs} is a HeapNumber.
101 TNode<HeapObject> rhs_heap_object = CAST(rhs);
102 GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
103
104 var_fadd_lhs = LoadHeapNumberValue(lhs_heap_object);
105 var_fadd_rhs = LoadHeapNumberValue(rhs_heap_object);
106 Goto(&do_fadd);
107 }
108
109 BIND(&if_rhsissmi);
110 }
111 {
112 var_fadd_lhs = LoadHeapNumberValue(lhs_heap_object);
113 var_fadd_rhs = SmiToFloat64(CAST(rhs));
114 Goto(&do_fadd);
115 }
116 }
117
118 BIND(&do_fadd);
119 {
120 var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
121 UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
122 TNode<Float64T> value =
123 Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
124 TNode<HeapNumber> result = AllocateHeapNumberWithValue(value);
125 var_result = result;
126 Goto(&end);
127 }
128
129 BIND(&if_lhsisnotnumber);
130 {
131 // No checks on rhs are done yet. We just know lhs is not a number or Smi.
132 Label if_lhsisoddball(this), if_lhsisnotoddball(this);
133 TNode<Uint16T> lhs_instance_type = LoadInstanceType(CAST(lhs));
134 TNode<BoolT> lhs_is_oddball =
135 InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
136 Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball);
137
138 BIND(&if_lhsisoddball);
139 {
140 GotoIf(TaggedIsSmi(rhs), &call_with_oddball_feedback);
141
142 // Check if {rhs} is a HeapNumber.
143 Branch(IsHeapNumber(CAST(rhs)), &call_with_oddball_feedback,
144 &check_rhsisoddball);
145 }
146
147 BIND(&if_lhsisnotoddball);
148 {
149 // Check if the {rhs} is a smi, and exit the string and bigint check early
150 // if it is.
151 GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
152 TNode<HeapObject> rhs_heap_object = CAST(rhs);
153
154 Label lhs_is_string(this), lhs_is_bigint(this);
155 GotoIf(IsStringInstanceType(lhs_instance_type), &lhs_is_string);
156 GotoIf(IsBigIntInstanceType(lhs_instance_type), &lhs_is_bigint);
157 Goto(&call_with_any_feedback);
158
159 BIND(&lhs_is_bigint);
160 Branch(IsBigInt(rhs_heap_object), &bigint, &call_with_any_feedback);
161
162 BIND(&lhs_is_string);
163 {
164 TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs_heap_object);
165
166 // Exit unless {rhs} is a string. Since {lhs} is a string we no longer
167 // need an Oddball check.
168 GotoIfNot(IsStringInstanceType(rhs_instance_type),
169 &call_with_any_feedback);
170
171 var_type_feedback = SmiConstant(BinaryOperationFeedback::kString);
172 UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector,
173 slot_id);
174 var_result =
175 CallBuiltin(Builtins::kStringAdd_CheckNone, context, lhs, rhs);
176
177 Goto(&end);
178 }
179 }
180 }
181
182 BIND(&check_rhsisoddball);
183 {
184 // Check if rhs is an oddball. At this point we know lhs is either a
185 // Smi or number or oddball and rhs is not a number or Smi.
186 TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs));
187 TNode<BoolT> rhs_is_oddball =
188 InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
189 GotoIf(rhs_is_oddball, &call_with_oddball_feedback);
190 Goto(&call_with_any_feedback);
191 }
192
193 BIND(&bigint);
194 {
195 // Both {lhs} and {rhs} are of BigInt type.
196 Label bigint_too_big(this);
197 var_result = CallBuiltin(Builtins::kBigIntAddNoThrow, context, lhs, rhs);
198 // Check for sentinel that signals BigIntTooBig exception.
199 GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
200
201 var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
202 UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
203 Goto(&end);
204
205 BIND(&bigint_too_big);
206 {
207 // Update feedback to prevent deopt loop.
208 UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
209 maybe_feedback_vector, slot_id);
210 ThrowRangeError(context, MessageTemplate::kBigIntTooBig);
211 }
212 }
213
214 BIND(&call_with_oddball_feedback);
215 {
216 var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
217 Goto(&call_add_stub);
218 }
219
220 BIND(&call_with_any_feedback);
221 {
222 var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
223 Goto(&call_add_stub);
224 }
225
226 BIND(&call_add_stub);
227 {
228 UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
229 var_result = CallBuiltin(Builtins::kAdd, context, lhs, rhs);
230 Goto(&end);
231 }
232
233 BIND(&end);
234 return var_result.value();
235 }
236
Generate_BinaryOperationWithFeedback(TNode<Context> context,TNode<Object> lhs,TNode<Object> rhs,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector,const SmiOperation & smiOperation,const FloatOperation & floatOperation,Operation op,bool rhs_known_smi)237 TNode<Object> BinaryOpAssembler::Generate_BinaryOperationWithFeedback(
238 TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
239 TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
240 const SmiOperation& smiOperation, const FloatOperation& floatOperation,
241 Operation op, bool rhs_known_smi) {
242 Label do_float_operation(this), end(this), call_stub(this),
243 check_rhsisoddball(this, Label::kDeferred), call_with_any_feedback(this),
244 if_lhsisnotnumber(this, Label::kDeferred),
245 if_both_bigint(this, Label::kDeferred);
246 TVARIABLE(Float64T, var_float_lhs);
247 TVARIABLE(Float64T, var_float_rhs);
248 TVARIABLE(Smi, var_type_feedback);
249 TVARIABLE(Object, var_result);
250
251 Label if_lhsissmi(this);
252 // If rhs is known to be an Smi (in the SubSmi, MulSmi, DivSmi, ModSmi
253 // bytecode handlers) we want to fast path Smi operation. For the normal
254 // operation, we want to fast path both Smi and Number operations, so this
255 // path should not be marked as Deferred.
256 Label if_lhsisnotsmi(this,
257 rhs_known_smi ? Label::kDeferred : Label::kNonDeferred);
258 Branch(TaggedIsNotSmi(lhs), &if_lhsisnotsmi, &if_lhsissmi);
259
260 // Check if the {lhs} is a Smi or a HeapObject.
261 BIND(&if_lhsissmi);
262 {
263 Comment("lhs is Smi");
264 TNode<Smi> lhs_smi = CAST(lhs);
265 if (!rhs_known_smi) {
266 // Check if the {rhs} is also a Smi.
267 Label if_rhsissmi(this), if_rhsisnotsmi(this);
268 Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
269
270 BIND(&if_rhsisnotsmi);
271 {
272 // Check if {rhs} is a HeapNumber.
273 TNode<HeapObject> rhs_heap_object = CAST(rhs);
274 GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
275
276 // Perform a floating point operation.
277 var_float_lhs = SmiToFloat64(lhs_smi);
278 var_float_rhs = LoadHeapNumberValue(rhs_heap_object);
279 Goto(&do_float_operation);
280 }
281
282 BIND(&if_rhsissmi);
283 }
284
285 {
286 Comment("perform smi operation");
287 var_result = smiOperation(lhs_smi, CAST(rhs), &var_type_feedback);
288 UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
289 Goto(&end);
290 }
291 }
292
293 BIND(&if_lhsisnotsmi);
294 {
295 Comment("lhs is not Smi");
296 // Check if the {lhs} is a HeapNumber.
297 TNode<HeapObject> lhs_heap_object = CAST(lhs);
298 GotoIfNot(IsHeapNumber(lhs_heap_object), &if_lhsisnotnumber);
299
300 if (!rhs_known_smi) {
301 // Check if the {rhs} is a Smi.
302 Label if_rhsissmi(this), if_rhsisnotsmi(this);
303 Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
304
305 BIND(&if_rhsisnotsmi);
306 {
307 // Check if the {rhs} is a HeapNumber.
308 TNode<HeapObject> rhs_heap_object = CAST(rhs);
309 GotoIfNot(IsHeapNumber(rhs_heap_object), &check_rhsisoddball);
310
311 // Perform a floating point operation.
312 var_float_lhs = LoadHeapNumberValue(lhs_heap_object);
313 var_float_rhs = LoadHeapNumberValue(rhs_heap_object);
314 Goto(&do_float_operation);
315 }
316
317 BIND(&if_rhsissmi);
318 }
319
320 {
321 // Perform floating point operation.
322 var_float_lhs = LoadHeapNumberValue(lhs_heap_object);
323 var_float_rhs = SmiToFloat64(CAST(rhs));
324 Goto(&do_float_operation);
325 }
326 }
327
328 BIND(&do_float_operation);
329 {
330 var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
331 UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
332 TNode<Float64T> lhs_value = var_float_lhs.value();
333 TNode<Float64T> rhs_value = var_float_rhs.value();
334 TNode<Float64T> value = floatOperation(lhs_value, rhs_value);
335 var_result = AllocateHeapNumberWithValue(value);
336 Goto(&end);
337 }
338
339 BIND(&if_lhsisnotnumber);
340 {
341 // No checks on rhs are done yet. We just know lhs is not a number or Smi.
342 Label if_left_bigint(this), if_left_oddball(this);
343 TNode<Uint16T> lhs_instance_type = LoadInstanceType(CAST(lhs));
344 GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_left_bigint);
345 TNode<BoolT> lhs_is_oddball =
346 InstanceTypeEqual(lhs_instance_type, ODDBALL_TYPE);
347 Branch(lhs_is_oddball, &if_left_oddball, &call_with_any_feedback);
348
349 BIND(&if_left_oddball);
350 {
351 Label if_rhsissmi(this), if_rhsisnotsmi(this);
352 Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
353
354 BIND(&if_rhsissmi);
355 {
356 var_type_feedback =
357 SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
358 Goto(&call_stub);
359 }
360
361 BIND(&if_rhsisnotsmi);
362 {
363 // Check if {rhs} is a HeapNumber.
364 GotoIfNot(IsHeapNumber(CAST(rhs)), &check_rhsisoddball);
365
366 var_type_feedback =
367 SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
368 Goto(&call_stub);
369 }
370 }
371
372 BIND(&if_left_bigint);
373 {
374 GotoIf(TaggedIsSmi(rhs), &call_with_any_feedback);
375 Branch(IsBigInt(CAST(rhs)), &if_both_bigint, &call_with_any_feedback);
376 }
377 }
378
379 BIND(&check_rhsisoddball);
380 {
381 // Check if rhs is an oddball. At this point we know lhs is either a
382 // Smi or number or oddball and rhs is not a number or Smi.
383 TNode<Uint16T> rhs_instance_type = LoadInstanceType(CAST(rhs));
384 TNode<BoolT> rhs_is_oddball =
385 InstanceTypeEqual(rhs_instance_type, ODDBALL_TYPE);
386 GotoIfNot(rhs_is_oddball, &call_with_any_feedback);
387
388 var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumberOrOddball);
389 Goto(&call_stub);
390 }
391
392 BIND(&if_both_bigint);
393 {
394 var_type_feedback = SmiConstant(BinaryOperationFeedback::kBigInt);
395 UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
396 if (op == Operation::kSubtract) {
397 Label bigint_too_big(this);
398 var_result =
399 CallBuiltin(Builtins::kBigIntSubtractNoThrow, context, lhs, rhs);
400
401 // Check for sentinel that signals BigIntTooBig exception.
402 GotoIf(TaggedIsSmi(var_result.value()), &bigint_too_big);
403 Goto(&end);
404
405 BIND(&bigint_too_big);
406 {
407 // Update feedback to prevent deopt loop.
408 UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny),
409 maybe_feedback_vector, slot_id);
410 ThrowRangeError(context, MessageTemplate::kBigIntTooBig);
411 }
412 } else {
413 var_result = CallRuntime(Runtime::kBigIntBinaryOp, context, lhs, rhs,
414 SmiConstant(op));
415 Goto(&end);
416 }
417 }
418
419 BIND(&call_with_any_feedback);
420 {
421 var_type_feedback = SmiConstant(BinaryOperationFeedback::kAny);
422 Goto(&call_stub);
423 }
424
425 BIND(&call_stub);
426 {
427 UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot_id);
428 TNode<Object> result;
429 switch (op) {
430 case Operation::kSubtract:
431 result = CallBuiltin(Builtins::kSubtract, context, lhs, rhs);
432 break;
433 case Operation::kMultiply:
434 result = CallBuiltin(Builtins::kMultiply, context, lhs, rhs);
435 break;
436 case Operation::kDivide:
437 result = CallBuiltin(Builtins::kDivide, context, lhs, rhs);
438 break;
439 case Operation::kModulus:
440 result = CallBuiltin(Builtins::kModulus, context, lhs, rhs);
441 break;
442 default:
443 UNREACHABLE();
444 }
445 var_result = result;
446 Goto(&end);
447 }
448
449 BIND(&end);
450 return var_result.value();
451 }
452
Generate_SubtractWithFeedback(TNode<Context> context,TNode<Object> lhs,TNode<Object> rhs,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector,bool rhs_known_smi)453 TNode<Object> BinaryOpAssembler::Generate_SubtractWithFeedback(
454 TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
455 TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
456 bool rhs_known_smi) {
457 auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
458 TVariable<Smi>* var_type_feedback) {
459 Label end(this);
460 TVARIABLE(Number, var_result);
461 // If rhs is known to be an Smi (for SubSmi) we want to fast path Smi
462 // operation. For the normal Sub operation, we want to fast path both
463 // Smi and Number operations, so this path should not be marked as Deferred.
464 Label if_overflow(this,
465 rhs_known_smi ? Label::kDeferred : Label::kNonDeferred);
466 var_result = TrySmiSub(lhs, rhs, &if_overflow);
467 *var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
468 Goto(&end);
469
470 BIND(&if_overflow);
471 {
472 *var_type_feedback = SmiConstant(BinaryOperationFeedback::kNumber);
473 TNode<Float64T> value = Float64Sub(SmiToFloat64(lhs), SmiToFloat64(rhs));
474 var_result = AllocateHeapNumberWithValue(value);
475 Goto(&end);
476 }
477
478 BIND(&end);
479 return var_result.value();
480 };
481 auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
482 return Float64Sub(lhs, rhs);
483 };
484 return Generate_BinaryOperationWithFeedback(
485 context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction,
486 floatFunction, Operation::kSubtract, rhs_known_smi);
487 }
488
Generate_MultiplyWithFeedback(TNode<Context> context,TNode<Object> lhs,TNode<Object> rhs,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector,bool rhs_known_smi)489 TNode<Object> BinaryOpAssembler::Generate_MultiplyWithFeedback(
490 TNode<Context> context, TNode<Object> lhs, TNode<Object> rhs,
491 TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
492 bool rhs_known_smi) {
493 auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
494 TVariable<Smi>* var_type_feedback) {
495 TNode<Number> result = SmiMul(lhs, rhs);
496 *var_type_feedback = SelectSmiConstant(
497 TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
498 BinaryOperationFeedback::kNumber);
499 return result;
500 };
501 auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
502 return Float64Mul(lhs, rhs);
503 };
504 return Generate_BinaryOperationWithFeedback(
505 context, lhs, rhs, slot_id, maybe_feedback_vector, smiFunction,
506 floatFunction, Operation::kMultiply, rhs_known_smi);
507 }
508
Generate_DivideWithFeedback(TNode<Context> context,TNode<Object> dividend,TNode<Object> divisor,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector,bool rhs_known_smi)509 TNode<Object> BinaryOpAssembler::Generate_DivideWithFeedback(
510 TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
511 TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
512 bool rhs_known_smi) {
513 auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
514 TVariable<Smi>* var_type_feedback) {
515 TVARIABLE(Object, var_result);
516 // If rhs is known to be an Smi (for DivSmi) we want to fast path Smi
517 // operation. For the normal Div operation, we want to fast path both
518 // Smi and Number operations, so this path should not be marked as Deferred.
519 Label bailout(this, rhs_known_smi ? Label::kDeferred : Label::kNonDeferred),
520 end(this);
521 var_result = TrySmiDiv(lhs, rhs, &bailout);
522 *var_type_feedback = SmiConstant(BinaryOperationFeedback::kSignedSmall);
523 Goto(&end);
524
525 BIND(&bailout);
526 {
527 *var_type_feedback =
528 SmiConstant(BinaryOperationFeedback::kSignedSmallInputs);
529 TNode<Float64T> value = Float64Div(SmiToFloat64(lhs), SmiToFloat64(rhs));
530 var_result = AllocateHeapNumberWithValue(value);
531 Goto(&end);
532 }
533
534 BIND(&end);
535 return var_result.value();
536 };
537 auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
538 return Float64Div(lhs, rhs);
539 };
540 return Generate_BinaryOperationWithFeedback(
541 context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction,
542 floatFunction, Operation::kDivide, rhs_known_smi);
543 }
544
Generate_ModulusWithFeedback(TNode<Context> context,TNode<Object> dividend,TNode<Object> divisor,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector,bool rhs_known_smi)545 TNode<Object> BinaryOpAssembler::Generate_ModulusWithFeedback(
546 TNode<Context> context, TNode<Object> dividend, TNode<Object> divisor,
547 TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
548 bool rhs_known_smi) {
549 auto smiFunction = [=](TNode<Smi> lhs, TNode<Smi> rhs,
550 TVariable<Smi>* var_type_feedback) {
551 TNode<Number> result = SmiMod(lhs, rhs);
552 *var_type_feedback = SelectSmiConstant(
553 TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
554 BinaryOperationFeedback::kNumber);
555 return result;
556 };
557 auto floatFunction = [=](TNode<Float64T> lhs, TNode<Float64T> rhs) {
558 return Float64Mod(lhs, rhs);
559 };
560 return Generate_BinaryOperationWithFeedback(
561 context, dividend, divisor, slot_id, maybe_feedback_vector, smiFunction,
562 floatFunction, Operation::kModulus, rhs_known_smi);
563 }
564
Generate_ExponentiateWithFeedback(TNode<Context> context,TNode<Object> base,TNode<Object> exponent,TNode<UintPtrT> slot_id,TNode<HeapObject> maybe_feedback_vector,bool rhs_known_smi)565 TNode<Object> BinaryOpAssembler::Generate_ExponentiateWithFeedback(
566 TNode<Context> context, TNode<Object> base, TNode<Object> exponent,
567 TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector,
568 bool rhs_known_smi) {
569 // We currently don't optimize exponentiation based on feedback.
570 TNode<Smi> dummy_feedback = SmiConstant(BinaryOperationFeedback::kAny);
571 UpdateFeedback(dummy_feedback, maybe_feedback_vector, slot_id);
572 return CallBuiltin(Builtins::kExponentiate, context, base, exponent);
573 }
574
Generate_BitwiseBinaryOpWithOptionalFeedback(Operation bitwise_op,TNode<Object> left,TNode<Object> right,TNode<Context> context,TVariable<Smi> * feedback)575 TNode<Object> BinaryOpAssembler::Generate_BitwiseBinaryOpWithOptionalFeedback(
576 Operation bitwise_op, TNode<Object> left, TNode<Object> right,
577 TNode<Context> context, TVariable<Smi>* feedback) {
578 TVARIABLE(Object, result);
579 TVARIABLE(Smi, var_left_feedback);
580 TVARIABLE(Smi, var_right_feedback);
581 TVARIABLE(Word32T, var_left_word32);
582 TVARIABLE(Word32T, var_right_word32);
583 TVARIABLE(BigInt, var_left_bigint);
584 TVARIABLE(BigInt, var_right_bigint);
585 // These are the variables that are passed to BigIntBinaryOp. They are not
586 // guaranteed to be BigInts because the Runtime call handles throwing
587 // exceptions when only one side is a BigInt.
588 TVARIABLE(Object, var_left_maybe_bigint, left);
589 TVARIABLE(Numeric, var_right_maybe_bigint);
590 Label done(this);
591 Label if_left_number(this), do_number_op(this);
592 Label if_left_bigint(this), do_bigint_op(this);
593
594 TaggedToWord32OrBigIntWithFeedback(
595 context, left, &if_left_number, &var_left_word32, &if_left_bigint,
596 &var_left_bigint, feedback ? &var_left_feedback : nullptr);
597
598 Label right_is_bigint(this);
599 BIND(&if_left_number);
600 {
601 TaggedToWord32OrBigIntWithFeedback(
602 context, right, &do_number_op, &var_right_word32, &right_is_bigint,
603 &var_right_bigint, feedback ? &var_right_feedback : nullptr);
604 }
605
606 BIND(&right_is_bigint);
607 {
608 // At this point it's guaranteed that the op will fail because the RHS is a
609 // BigInt while the LHS is not, but that's ok because the Runtime call will
610 // throw the exception.
611 var_right_maybe_bigint = var_right_bigint.value();
612 Goto(&do_bigint_op);
613 }
614
615 BIND(&do_number_op);
616 {
617 result = BitwiseOp(var_left_word32.value(), var_right_word32.value(),
618 bitwise_op);
619
620 if (feedback) {
621 TNode<Smi> result_type = SelectSmiConstant(
622 TaggedIsSmi(result.value()), BinaryOperationFeedback::kSignedSmall,
623 BinaryOperationFeedback::kNumber);
624 TNode<Smi> input_feedback =
625 SmiOr(var_left_feedback.value(), var_right_feedback.value());
626 *feedback = SmiOr(result_type, input_feedback);
627 }
628 Goto(&done);
629 }
630
631 // BigInt cases.
632 BIND(&if_left_bigint);
633 {
634 TaggedToNumericWithFeedback(context, right, &var_right_maybe_bigint,
635 &var_right_feedback);
636 var_left_maybe_bigint = var_left_bigint.value();
637 Goto(&do_bigint_op);
638 }
639
640 BIND(&do_bigint_op);
641 {
642 if (feedback) {
643 *feedback = SmiOr(var_left_feedback.value(), var_right_feedback.value());
644 }
645 result = CallRuntime(
646 Runtime::kBigIntBinaryOp, context, var_left_maybe_bigint.value(),
647 var_right_maybe_bigint.value(), SmiConstant(bitwise_op));
648 Goto(&done);
649 }
650
651 BIND(&done);
652 return result.value();
653 }
654
655 } // namespace internal
656 } // namespace v8
657