1 /*
2 * Copyright 2017, The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <algorithm>
18 #include <iostream>
19 #include <string>
20
21 #include "clang/AST/APValue.h"
22
23 #include "slang_assert.h"
24 #include "slang_rs_export_foreach.h"
25 #include "slang_rs_export_func.h"
26 #include "slang_rs_export_reduce.h"
27 #include "slang_rs_export_type.h"
28 #include "slang_rs_export_var.h"
29 #include "slang_rs_reflection.h"
30 #include "slang_rs_reflection_state.h"
31
32 #include "bcinfo/MetadataExtractor.h"
33
34 namespace slang {
35
equal(const clang::APValue & a,const clang::APValue & b)36 static bool equal(const clang::APValue &a, const clang::APValue &b) {
37 if (a.getKind() != b.getKind())
38 return false;
39 switch (a.getKind()) {
40 case clang::APValue::Float:
41 return a.getFloat().bitwiseIsEqual(b.getFloat());
42 case clang::APValue::Int:
43 return a.getInt() == b.getInt();
44 case clang::APValue::Vector: {
45 unsigned NumElements = a.getVectorLength();
46 if (NumElements != b.getVectorLength())
47 return false;
48 for (unsigned i = 0; i < NumElements; ++i) {
49 if (!equal(a.getVectorElt(i), b.getVectorElt(i)))
50 return false;
51 }
52 return true;
53 }
54 default:
55 slangAssert(false && "unexpected APValue kind");
56 return false;
57 }
58 }
59
~ReflectionState()60 ReflectionState::~ReflectionState() {
61 slangAssert(mState==S_Initial || mState==S_ClosedJava64 || mState==S_Bad);
62 delete mStringSet;
63 }
64
openJava32(size_t NumFiles)65 void ReflectionState::openJava32(size_t NumFiles) {
66 if (kDisabled)
67 return;
68 slangAssert(mState==S_Initial);
69 mState = S_OpenJava32;
70 mStringSet = new llvm::StringSet<>;
71 mFiles.BeginCollecting(NumFiles);
72 }
73
closeJava32()74 void ReflectionState::closeJava32() {
75 if (kDisabled)
76 return;
77 slangAssert(mState==S_OpenJava32 && (mForEachOpen < 0) && !mOutputClassOpen && (mRecordsState != RS_Open));
78 mState = S_ClosedJava32;
79 mRSC = nullptr;
80 }
81
openJava64()82 void ReflectionState::openJava64() {
83 if (kDisabled)
84 return;
85 slangAssert(mState==S_ClosedJava32);
86 mState = S_OpenJava64;
87 mFiles.BeginUsing();
88 }
89
closeJava64()90 void ReflectionState::closeJava64() {
91 if (kDisabled)
92 return;
93 slangAssert(mState==S_OpenJava64 && (mForEachOpen < 0) && !mOutputClassOpen && (mRecordsState != RS_Open));
94 mState = S_ClosedJava64;
95 mRSC = nullptr;
96 }
97
canon(const std::string & String)98 llvm::StringRef ReflectionState::canon(const std::string &String) {
99 slangAssert(isCollecting());
100 return mStringSet->insert(String).first->getKey();
101 }
102
getUniqueTypeName(const RSExportType * T)103 std::string ReflectionState::getUniqueTypeName(const RSExportType *T) {
104 return RSReflectionJava::GetTypeName(T, RSReflectionJava::TypeNamePseudoC);
105 }
106
nextFile(const RSContext * RSC,const std::string & PackageName,const std::string & RSSourceFileName)107 void ReflectionState::nextFile(const RSContext *RSC,
108 const std::string &PackageName,
109 const std::string &RSSourceFileName) {
110 slangAssert(!isClosed());
111 if (!isActive())
112 return;
113
114 mRSC = RSC;
115
116 slangAssert(mRecordsState != RS_Open);
117 mRecordsState = RS_Initial;
118
119 if (isCollecting()) {
120 File &file = mFiles.CollectNext();
121 file.mPackageName = PackageName;
122 file.mRSSourceFileName = RSSourceFileName;
123 }
124 if (isUsing()) {
125 File &file = mFiles.UseNext();
126 slangAssert(file.mRSSourceFileName == RSSourceFileName);
127 if (file.mPackageName != PackageName)
128 mRSC->ReportError("in file '%0' Java package name is '%1' for 32-bit targets "
129 "but '%2' for 64-bit targets")
130 << RSSourceFileName << file.mPackageName << PackageName;
131 }
132 }
133
dump()134 void ReflectionState::dump() {
135 const size_t NumFiles = mFiles.Size();
136 for (int i = 0; i < NumFiles; ++i) {
137 const File &file = mFiles[i];
138 std::cout << "file = \"" << file.mRSSourceFileName << "\", "
139 << "package = \"" << file.mPackageName << "\"" << std::endl;
140
141 // NOTE: "StringMap iteration order, however, is not guaranteed to
142 // be deterministic". So sort before dumping.
143 typedef const llvm::StringMap<File::Record>::MapEntryTy *RecordsEntryTy;
144 std::vector<RecordsEntryTy> Records;
145 Records.reserve(file.mRecords.size());
146 for (auto I = file.mRecords.begin(), E = file.mRecords.end(); I != E; I++)
147 Records.push_back(&(*I));
148 std::sort(Records.begin(), Records.end(),
149 [](RecordsEntryTy a, RecordsEntryTy b) { return a->getKey().compare(b->getKey())==-1; });
150 for (auto Record : Records) {
151 const auto &Val = Record->getValue();
152 std::cout << " (Record) name=\"" << Record->getKey().str() << "\""
153 << " allocSize=" << Val.mAllocSize
154 << " postPadding=" << Val.mPostPadding
155 << " ordinary=" << Val.mOrdinary
156 << " matchedByName=" << Val.mMatchedByName
157 << std::endl;
158 const size_t NumFields = Val.mFieldCount;
159 for (int fieldIdx = 0; fieldIdx < NumFields; ++fieldIdx) {
160 const auto &field = Val.mFields[fieldIdx];
161 std::cout << " (Field) name=\"" << field.mName << "\" ("
162 << field.mPrePadding << ", \"" << field.mType.str()
163 << "\"(" << field.mStoreSize << ")@" << field.mOffset
164 << ", " << field.mPostPadding << ")" << std::endl;
165 }
166 }
167
168 const size_t NumVars = file.mVariables.Size();
169 for (int varIdx = 0; varIdx < NumVars; ++varIdx) {
170 const auto &var = file.mVariables[varIdx];
171 std::cout << " (Var) name=\"" << var.mName << "\" type=\"" << var.mType.str()
172 << "\" const=" << var.mIsConst << " initialized=" << (var.mInitializerCount != 0)
173 << " allocSize=" << var.mAllocSize << std::endl;
174 }
175
176 for (int feIdx = 0; feIdx < file.mForEachCount; ++feIdx) {
177 const auto &fe = file.mForEaches[feIdx];
178 std::cout << " (ForEach) ordinal=" << feIdx << " state=";
179 switch (fe.mState) {
180 case File::ForEach::S_Initial:
181 std::cout << "initial" << std::endl;
182 continue;
183 case File::ForEach::S_Collected:
184 std::cout << "collected";
185 break;
186 case File::ForEach::S_UseMatched:
187 std::cout << "usematched";
188 break;
189 default:
190 std::cout << fe.mState;
191 break;
192 }
193 std::cout << " name=\"" << fe.mName << "\" kernel=" << fe.mIsKernel
194 << " hasOut=" << fe.mHasOut << " out=\"" << fe.mOut.str()
195 << "\" metadata=0x" << std::hex << fe.mSignatureMetadata << std::dec
196 << std::endl;
197 const size_t NumIns = fe.mIns.Size();
198 for (int insIdx = 0; insIdx < NumIns; ++insIdx)
199 std::cout << " (In) " << fe.mIns[insIdx].str() << std::endl;
200 const size_t NumParams = fe.mParams.Size();
201 for (int paramsIdx = 0; paramsIdx < NumParams; ++paramsIdx)
202 std::cout << " (Param) " << fe.mParams[paramsIdx].str() << std::endl;
203 }
204
205 for (auto feBad : mForEachesBad) {
206 std::cout << " (ForEachBad) ordinal=" << feBad->getOrdinal()
207 << " name=\"" << feBad->getName() << "\""
208 << std::endl;
209 }
210
211 const size_t NumInvokables = file.mInvokables.Size();
212 for (int invIdx = 0; invIdx < NumInvokables; ++invIdx) {
213 const auto &inv = file.mInvokables[invIdx];
214 std::cout << " (Invokable) name=\"" << inv.mName << "\"" << std::endl;
215 const size_t NumParams = inv.mParamCount;
216 for (int paramsIdx = 0; paramsIdx < NumParams; ++paramsIdx)
217 std::cout << " (Param) " << inv.mParams[paramsIdx].str() << std::endl;
218 }
219
220 const size_t NumReduces = file.mReduces.Size();
221 for (int redIdx = 0; redIdx < NumReduces; ++redIdx) {
222 const auto &red = file.mReduces[redIdx];
223 std::cout << " (Reduce) name=\"" << red.mName
224 << "\" result=\"" << red.mResult.str()
225 << "\" exportable=" << red.mIsExportable
226 << std::endl;
227 const size_t NumIns = red.mAccumInCount;
228 for (int insIdx = 0; insIdx < NumIns; ++insIdx)
229 std::cout << " (In) " << red.mAccumIns[insIdx].str() << std::endl;
230 }
231 }
232 }
233
234 // ForEach /////////////////////////////////////////////////////////////////////////////////////
235
beginForEaches(size_t Count)236 void ReflectionState::beginForEaches(size_t Count) {
237 slangAssert(!isClosed());
238 if (!isActive())
239 return;
240
241 if (isCollecting()) {
242 auto &file = mFiles.Current();
243 file.mForEaches = new File::ForEach[Count];
244 file.mForEachCount = Count;
245 }
246 if (isUsing()) {
247 slangAssert(mForEachesBad.empty());
248 mNumForEachesMatchedByOrdinal = 0;
249 }
250 }
251
252 // Keep this in sync with RSReflectionJava::genExportForEach().
beginForEach(const RSExportForEach * EF)253 void ReflectionState::beginForEach(const RSExportForEach *EF) {
254 slangAssert(!isClosed() && (mForEachOpen < 0));
255 if (!isActive())
256 return;
257
258 const bool IsKernel = EF->isKernelStyle();
259 const std::string Name = EF->getName();
260 const unsigned Ordinal = EF->getOrdinal();
261 const size_t InCount = EF->getInTypes().size();
262 const size_t ParamCount = EF->params_count();
263
264 const RSExportType *OET = EF->getOutType();
265 if (OET && !IsKernel) {
266 slangAssert(OET->getClass() == RSExportType::ExportClassPointer);
267 OET = static_cast<const RSExportPointerType *>(OET)->getPointeeType();
268 }
269 const std::string OutType = (OET ? getUniqueTypeName(OET) : "");
270 const bool HasOut = (EF->hasOut() || EF->hasReturn());
271
272 mForEachOpen = Ordinal;
273 mForEachFatal = true; // we'll set this to false if everything looks ok
274
275 auto &file = mFiles.Current();
276 auto &foreaches = file.mForEaches;
277 if (isCollecting()) {
278 slangAssert(Ordinal < file.mForEachCount);
279 auto &foreach = foreaches[Ordinal];
280 slangAssert(foreach.mState == File::ForEach::S_Initial);
281 foreach.mState = File::ForEach::S_Collected;
282 foreach.mName = Name;
283 foreach.mIns.BeginCollecting(InCount);
284 foreach.mParams.BeginCollecting(ParamCount);
285 foreach.mOut = canon(OutType);
286 foreach.mHasOut = HasOut;
287 foreach.mSignatureMetadata = 0;
288 foreach.mIsKernel = IsKernel;
289 }
290 if (isUsing()) {
291 if (Ordinal >= file.mForEachCount) {
292 mForEachesBad.push_back(EF);
293 return;
294 }
295
296 auto &foreach = foreaches[Ordinal];
297 slangAssert(foreach.mState == File::ForEach::S_Collected);
298 foreach.mState = File::ForEach::S_UseMatched;
299 ++mNumForEachesMatchedByOrdinal;
300
301 if (foreach.mName != Name) {
302 // Order matters because it determines slot number
303 mForEachesBad.push_back(EF);
304 return;
305 }
306
307 // At this point, we have matching ordinal and matching name.
308
309 if (foreach.mIsKernel != IsKernel) {
310 mRSC->ReportError(EF->getLocation(),
311 "foreach kernel '%0' has __attribute__((kernel)) for %select{32|64}1-bit targets "
312 "but not for %select{64|32}1-bit targets")
313 << Name << IsKernel;
314 return;
315 }
316
317 if ((foreach.mHasOut != HasOut) || !foreach.mOut.equals(OutType)) {
318 // There are several different patterns we need to handle:
319 // (1) Two different non-void* output types
320 // (2) One non-void* output type, one void* output type
321 // (3) One non-void* output type, one no-output
322 // (4) One void* output type, one no-output
323 if (foreach.mHasOut && HasOut) {
324 if (foreach.mOut.size() && OutType.size()) {
325 // (1) Two different non-void* output types
326 mRSC->ReportError(EF->getLocation(),
327 "foreach kernel '%0' has output type '%1' for 32-bit targets "
328 "but output type '%2' for 64-bit targets")
329 << Name << foreach.mOut.str() << OutType;
330 } else {
331 // (2) One non-void* return type, one void* output type
332 const bool hasTyped64 = OutType.size();
333 mRSC->ReportError(EF->getLocation(),
334 "foreach kernel '%0' has output type '%1' for %select{32|64}2-bit targets "
335 "but has untyped output for %select{64|32}2-bit targets")
336 << Name << (foreach.mOut.str() + OutType) << hasTyped64;
337 }
338 } else {
339 const std::string CombinedOutType = (foreach.mOut.str() + OutType);
340 if (CombinedOutType.size()) {
341 // (3) One non-void* output type, one no-output
342 mRSC->ReportError(EF->getLocation(),
343 "foreach kernel '%0' has output type '%1' for %select{32|64}2-bit targets "
344 "but no output for %select{64|32}2-bit targets")
345 << Name << CombinedOutType << HasOut;
346 } else {
347 // (4) One void* output type, one no-output
348 mRSC->ReportError(EF->getLocation(),
349 "foreach kernel '%0' has untyped output for %select{32|64}1-bit targets "
350 "but no output for %select{64|32}1-bit targets")
351 << Name << HasOut;
352 }
353 }
354 }
355
356 bool BadCount = false;
357 if (foreach.mIns.Size() != InCount) {
358 mRSC->ReportError(EF->getLocation(),
359 "foreach kernel '%0' has %1 input%s1 for 32-bit targets "
360 "but %2 input%s2 for 64-bit targets")
361 << Name << unsigned(foreach.mIns.Size()) << unsigned(InCount);
362 BadCount = true;
363 }
364 if (foreach.mParams.Size() != ParamCount) {
365 mRSC->ReportError(EF->getLocation(),
366 "foreach kernel '%0' has %1 usrData parameter%s1 for 32-bit targets "
367 "but %2 usrData parameter%s2 for 64-bit targets")
368 << Name << unsigned(foreach.mParams.Size()) << unsigned(ParamCount);
369 BadCount = true;
370 }
371
372 if (BadCount)
373 return;
374
375 foreach.mIns.BeginUsing();
376 foreach.mParams.BeginUsing();
377 }
378
379 mForEachFatal = false;
380 }
381
addForEachIn(const RSExportForEach * EF,const RSExportType * Type)382 void ReflectionState::addForEachIn(const RSExportForEach *EF, const RSExportType *Type) {
383 slangAssert(!isClosed());
384 if (!isActive())
385 return;
386
387 slangAssert(mForEachOpen == EF->getOrdinal());
388
389 // Type may be nullptr in the case of void*. See RSExportForEach::Create().
390 if (Type && !EF->isKernelStyle()) {
391 slangAssert(Type->getClass() == RSExportType::ExportClassPointer);
392 Type = static_cast<const RSExportPointerType *>(Type)->getPointeeType();
393 }
394 const std::string TypeName = (Type ? getUniqueTypeName(Type) : std::string());
395
396 auto &ins = mFiles.Current().mForEaches[EF->getOrdinal()].mIns;
397 if (isCollecting()) {
398 ins.CollectNext() = canon(TypeName);
399 }
400 if (isUsing()) {
401 if (mForEachFatal)
402 return;
403
404 if (!ins.UseNext().equals(TypeName)) {
405 if (ins.Current().size() && TypeName.size()) {
406 mRSC->ReportError(EF->getLocation(),
407 "%ordinal0 input of foreach kernel '%1' "
408 "has type '%2' for 32-bit targets "
409 "but type '%3' for 64-bit targets")
410 << unsigned(ins.CurrentIdx() + 1)
411 << EF->getName()
412 << ins.Current().str()
413 << TypeName;
414 } else {
415 const bool hasType64 = TypeName.size();
416 mRSC->ReportError(EF->getLocation(),
417 "%ordinal0 input of foreach kernel '%1' "
418 "has type '%2' for %select{32|64}3-bit targets "
419 "but is untyped for %select{64|32}3-bit targets")
420 << unsigned(ins.CurrentIdx() + 1)
421 << EF->getName()
422 << (ins.Current().str() + TypeName)
423 << hasType64;
424 }
425 }
426 }
427 }
428
addForEachParam(const RSExportForEach * EF,const RSExportType * Type)429 void ReflectionState::addForEachParam(const RSExportForEach *EF, const RSExportType *Type) {
430 slangAssert(!isClosed());
431 if (!isActive())
432 return;
433
434 slangAssert(mForEachOpen == EF->getOrdinal());
435
436 const std::string TypeName = getUniqueTypeName(Type);
437
438 auto ¶ms = mFiles.Current().mForEaches[EF->getOrdinal()].mParams;
439 if (isCollecting()) {
440 params.CollectNext() = canon(TypeName);
441 }
442 if (isUsing()) {
443 if (mForEachFatal)
444 return;
445
446 if (!params.UseNext().equals(TypeName)) {
447 mRSC->ReportError(EF->getLocation(),
448 "%ordinal0 usrData parameter of foreach kernel '%1' "
449 "has type '%2' for 32-bit targets "
450 "but type '%3' for 64-bit targets")
451 << unsigned(params.CurrentIdx() + 1)
452 << EF->getName()
453 << params.Current().str()
454 << TypeName;
455 }
456 }
457 }
458
addForEachSignatureMetadata(const RSExportForEach * EF,unsigned Metadata)459 void ReflectionState::addForEachSignatureMetadata(const RSExportForEach *EF, unsigned Metadata) {
460 slangAssert(!isClosed());
461 if (!isActive())
462 return;
463
464 slangAssert(mForEachOpen == EF->getOrdinal());
465
466 // These are properties in the metadata that we need to check.
467 const unsigned SpecialParameterBits = bcinfo::MD_SIG_X|bcinfo::MD_SIG_Y|bcinfo::MD_SIG_Z|bcinfo::MD_SIG_Ctxt;
468
469 #ifndef __DISABLE_ASSERTS
470 {
471 // These are properties in the metadata that we already check in
472 // some other way.
473 const unsigned BoringBits = bcinfo::MD_SIG_In|bcinfo::MD_SIG_Out|bcinfo::MD_SIG_Usr|bcinfo::MD_SIG_Kernel;
474
475 slangAssert((Metadata & ~(SpecialParameterBits | BoringBits)) == 0);
476 }
477 #endif
478
479 auto &mSignatureMetadata = mFiles.Current().mForEaches[EF->getOrdinal()].mSignatureMetadata;
480 if (isCollecting()) {
481 mSignatureMetadata = Metadata;
482 }
483 if (isUsing()) {
484 if (mForEachFatal)
485 return;
486
487 if ((mSignatureMetadata & SpecialParameterBits) != (Metadata & SpecialParameterBits)) {
488 mRSC->ReportError(EF->getLocation(),
489 "foreach kernel '%0' has different special parameters "
490 "for 32-bit targets than for 64-bit targets")
491 << EF->getName();
492 }
493 }
494 }
495
endForEach()496 void ReflectionState::endForEach() {
497 slangAssert(!isClosed());
498 if (!isActive())
499 return;
500
501 slangAssert(mForEachOpen >= 0);
502 if (isUsing() && !mForEachFatal) {
503 slangAssert(mFiles.Current().mForEaches[mForEachOpen].mIns.isFinished());
504 slangAssert(mFiles.Current().mForEaches[mForEachOpen].mParams.isFinished());
505 }
506
507 mForEachOpen = -1;
508 }
509
endForEaches()510 void ReflectionState::endForEaches() {
511 slangAssert(mForEachOpen < 0);
512 if (!isUsing())
513 return;
514
515 const auto &file = mFiles.Current();
516
517 if (!mForEachesBad.empty()) {
518 std::sort(mForEachesBad.begin(), mForEachesBad.end(),
519 [](const RSExportForEach *a, const RSExportForEach *b) { return a->getOrdinal() < b->getOrdinal(); });
520 // Note that after the sort, all kernels that are bad because of
521 // name mismatch precede all kernels that are bad because of
522 // too-high ordinal.
523
524 // 32-bit and 64-bit compiles need to see foreach kernels in the
525 // same order, because of slot number assignment. Once we see the
526 // first name mismatch in the sequence of foreach kernels, it
527 // doesn't make sense to issue further diagnostics regarding
528 // foreach kernels except those that still happen to match by name
529 // and ordinal (we already handled those diagnostics between
530 // beginForEach() and endForEach()).
531 bool ForEachesOrderFatal = false;
532
533 for (const RSExportForEach *EF : mForEachesBad) {
534 if (EF->getOrdinal() >= file.mForEachCount) {
535 mRSC->ReportError(EF->getLocation(),
536 "foreach kernel '%0' is only present for 64-bit targets")
537 << EF->getName();
538 } else {
539 mRSC->ReportError(EF->getLocation(),
540 "%ordinal0 foreach kernel is '%1' for 32-bit targets "
541 "but '%2' for 64-bit targets")
542 << (EF->getOrdinal() + 1)
543 << mFiles.Current().mForEaches[EF->getOrdinal()].mName
544 << EF->getName();
545 ForEachesOrderFatal = true;
546 break;
547 }
548 }
549
550 mForEachesBad.clear();
551
552 if (ForEachesOrderFatal)
553 return;
554 }
555
556 if (mNumForEachesMatchedByOrdinal == file.mForEachCount)
557 return;
558 for (unsigned ord = 0; ord < file.mForEachCount; ord++) {
559 const auto &fe = file.mForEaches[ord];
560 if (fe.mState == File::ForEach::S_Collected) {
561 mRSC->ReportError("in file '%0' foreach kernel '%1' is only present for 32-bit targets")
562 << file.mRSSourceFileName << fe.mName;
563 }
564 }
565 }
566
567 // Invokable ///////////////////////////////////////////////////////////////////////////////////
568
569 // Keep this in sync with RSReflectionJava::genExportFunction().
declareInvokable(const RSExportFunc * EF)570 void ReflectionState::declareInvokable(const RSExportFunc *EF) {
571 slangAssert(!isClosed());
572 if (!isActive())
573 return;
574
575 const std::string Name = EF->getName(/*Mangle=*/false);
576 const size_t ParamCount = EF->getNumParameters();
577
578 auto &invokables = mFiles.Current().mInvokables;
579 if (isCollecting()) {
580 auto &invokable = invokables.CollectNext();
581 invokable.mName = Name;
582 invokable.mParamCount = ParamCount;
583 if (EF->hasParam()) {
584 unsigned FieldIdx = 0;
585 invokable.mParams = new llvm::StringRef[ParamCount];
586 for (RSExportFunc::const_param_iterator I = EF->params_begin(),
587 E = EF->params_end();
588 I != E; I++, FieldIdx++) {
589 invokable.mParams[FieldIdx] = canon(getUniqueTypeName((*I)->getType()));
590 }
591 }
592 }
593 if (isUsing()) {
594 if (mInvokablesOrderFatal)
595 return;
596
597 if (invokables.isFinished()) {
598 // This doesn't actually break reflection, but that's a
599 // coincidence of the fact that we reflect during the 64-bit
600 // compilation pass rather than the 32-bit compilation pass, and
601 // of the fact that the "extra" invokable(s) are at the end.
602 mRSC->ReportError(EF->getLocation(),
603 "invokable function '%0' is only present for 64-bit targets")
604 << Name;
605 return;
606 }
607
608 auto &invokable = invokables.UseNext();
609
610 if (invokable.mName != Name) {
611 // Order matters because it determines slot number
612 mRSC->ReportError(EF->getLocation(),
613 "%ordinal0 invokable function is '%1' for 32-bit targets "
614 "but '%2' for 64-bit targets")
615 << unsigned(invokables.CurrentIdx() + 1)
616 << invokable.mName
617 << Name;
618 mInvokablesOrderFatal = true;
619 return;
620 }
621
622 if (invokable.mParamCount != ParamCount) {
623 mRSC->ReportError(EF->getLocation(),
624 "invokable function '%0' has %1 parameter%s1 for 32-bit targets "
625 "but %2 parameter%s2 for 64-bit targets")
626 << Name << unsigned(invokable.mParamCount) << unsigned(ParamCount);
627 return;
628 }
629 if (EF->hasParam()) {
630 unsigned FieldIdx = 0;
631 for (RSExportFunc::const_param_iterator I = EF->params_begin(),
632 E = EF->params_end();
633 I != E; I++, FieldIdx++) {
634 const std::string Type = getUniqueTypeName((*I)->getType());
635 if (!invokable.mParams[FieldIdx].equals(Type)) {
636 mRSC->ReportError(EF->getLocation(),
637 "%ordinal0 parameter of invokable function '%1' "
638 "has type '%2' for 32-bit targets "
639 "but type '%3' for 64-bit targets")
640 << (FieldIdx + 1)
641 << Name
642 << invokable.mParams[FieldIdx].str()
643 << Type;
644 }
645 }
646 }
647 }
648 }
649
endInvokables()650 void ReflectionState::endInvokables() {
651 if (!isUsing() || mInvokablesOrderFatal)
652 return;
653
654 auto &invokables = mFiles.Current().mInvokables;
655 while (!invokables.isFinished()) {
656 const auto &invokable = invokables.UseNext();
657 mRSC->ReportError("in file '%0' invokable function '%1' is only present for 32-bit targets")
658 << mFiles.Current().mRSSourceFileName << invokable.mName;
659 }
660 }
661
662 // Record //////////////////////////////////////////////////////////////////////////////////////
663
beginRecords()664 void ReflectionState::beginRecords() {
665 slangAssert(!isClosed());
666 if (!isActive())
667 return;
668
669 slangAssert(mRecordsState != RS_Open);
670 mRecordsState = RS_Open;
671 mNumRecordsMatchedByName = 0;
672 }
673
endRecords()674 void ReflectionState::endRecords() {
675 slangAssert(!isClosed());
676 if (!isActive())
677 return;
678
679 slangAssert(mRecordsState == RS_Open);
680 mRecordsState = RS_Closed;
681
682 if (isUsing()) {
683 const File &file = mFiles.Current();
684 if (mNumRecordsMatchedByName == file.mRecords.size())
685 return;
686 // NOTE: "StringMap iteration order, however, is not guaranteed to
687 // be deterministic". So sort by name before reporting.
688 // Alternatively, if we record additional information, we could
689 // sort by source location or by order in which we discovered the
690 // need to export.
691 std::vector<llvm::StringRef> Non64RecordNames;
692 for (auto I = file.mRecords.begin(), E = file.mRecords.end(); I != E; I++)
693 if (!I->getValue().mMatchedByName && I->getValue().mOrdinary)
694 Non64RecordNames.push_back(I->getKey());
695 std::sort(Non64RecordNames.begin(), Non64RecordNames.end(),
696 [](llvm::StringRef a, llvm::StringRef b) { return a.compare(b)==-1; });
697 for (auto N : Non64RecordNames)
698 mRSC->ReportError("in file '%0' structure '%1' is exported only for 32-bit targets")
699 << file.mRSSourceFileName << N.str();
700 }
701 }
702
declareRecord(const RSExportRecordType * ERT,bool Ordinary)703 void ReflectionState::declareRecord(const RSExportRecordType *ERT, bool Ordinary) {
704 slangAssert(!isClosed());
705 if (!isActive())
706 return;
707
708 slangAssert(mRecordsState == RS_Open);
709
710 auto &records = mFiles.Current().mRecords;
711 if (isCollecting()) {
712 // Keep struct/field layout in sync with
713 // RSReflectionJava::genPackVarOfType() and
714 // RSReflectionJavaElementBuilder::genAddElement()
715
716 // Save properties of record
717
718 const size_t FieldCount = ERT->fields_size();
719 File::Record::Field *Fields = new File::Record::Field[FieldCount];
720
721 size_t Pos = 0; // Relative position of field within record
722 unsigned FieldIdx = 0;
723 for (RSExportRecordType::const_field_iterator I = ERT->fields_begin(), E = ERT->fields_end();
724 I != E; I++, FieldIdx++) {
725 const RSExportRecordType::Field *FieldExport = *I;
726 size_t FieldOffset = FieldExport->getOffsetInParent();
727 const RSExportType *T = FieldExport->getType();
728 size_t FieldStoreSize = T->getStoreSize();
729 size_t FieldAllocSize = T->getAllocSize();
730
731 slangAssert(FieldOffset >= Pos);
732 slangAssert(FieldAllocSize >= FieldStoreSize);
733
734 auto &FieldState = Fields[FieldIdx];
735 FieldState.mName = FieldExport->getName();
736 FieldState.mType = canon(getUniqueTypeName(T));
737 FieldState.mPrePadding = FieldOffset - Pos;
738 FieldState.mPostPadding = FieldAllocSize - FieldStoreSize;
739 FieldState.mOffset = FieldOffset;
740 FieldState.mStoreSize = FieldStoreSize;
741
742 Pos = FieldOffset + FieldAllocSize;
743 }
744
745 slangAssert(ERT->getAllocSize() >= Pos);
746
747 // Insert record into map
748
749 slangAssert(records.find(ERT->getName()) == records.end());
750 File::Record &record = records[ERT->getName()];
751 record.mFields = Fields;
752 record.mFieldCount = FieldCount;
753 record.mPostPadding = ERT->getAllocSize() - Pos;
754 record.mAllocSize = ERT->getAllocSize();
755 record.mOrdinary = Ordinary;
756 record.mMatchedByName = false;
757 }
758 if (isUsing()) {
759 if (!Ordinary)
760 return;
761
762 const auto RIT = records.find(ERT->getName());
763 if (RIT == records.end()) {
764 // This doesn't actually break reflection, but that's a
765 // coincidence of the fact that we reflect during the 64-bit
766 // compilation pass rather than the 32-bit compilation pass, so
767 // a record that's only classified as exported during the 64-bit
768 // compilation pass doesn't cause any problems.
769 mRSC->ReportError(ERT->getLocation(), "structure '%0' is exported only for 64-bit targets")
770 << ERT->getName();
771 return;
772 }
773 File::Record &record = RIT->getValue();
774 record.mMatchedByName = true;
775 ++mNumRecordsMatchedByName;
776 slangAssert(record.mOrdinary);
777
778 if (ERT->fields_size() != record.mFieldCount) {
779 mRSC->ReportError(ERT->getLocation(),
780 "exported structure '%0' has %1 field%s1 for 32-bit targets "
781 "but %2 field%s2 for 64-bit targets")
782 << ERT->getName() << unsigned(record.mFieldCount) << unsigned(ERT->fields_size());
783 return;
784 }
785
786 // Note that we are deliberately NOT comparing layout properties
787 // (such as Field offsets and sizes, or Record allocation size);
788 // we need to tolerate layout differences between 32-bit
789 // compilation and 64-bit compilation.
790
791 unsigned FieldIdx = 0;
792 for (RSExportRecordType::const_field_iterator I = ERT->fields_begin(), E = ERT->fields_end();
793 I != E; I++, FieldIdx++) {
794 const RSExportRecordType::Field &FieldExport = **I;
795 const File::Record::Field &FieldState = record.mFields[FieldIdx];
796 if (FieldState.mName != FieldExport.getName()) {
797 mRSC->ReportError(ERT->getLocation(),
798 "%ordinal0 field of exported structure '%1' "
799 "is '%2' for 32-bit targets "
800 "but '%3' for 64-bit targets")
801 << (FieldIdx + 1) << ERT->getName() << FieldState.mName << FieldExport.getName();
802 return;
803 }
804 const std::string FieldExportType = getUniqueTypeName(FieldExport.getType());
805 if (!FieldState.mType.equals(FieldExportType)) {
806 mRSC->ReportError(ERT->getLocation(),
807 "field '%0' of exported structure '%1' "
808 "has type '%2' for 32-bit targets "
809 "but type '%3' for 64-bit targets")
810 << FieldState.mName << ERT->getName() << FieldState.mType.str() << FieldExportType;
811 }
812 }
813 }
814 }
815
816 ReflectionState::Record32
getRecord32(const RSExportRecordType * ERT)817 ReflectionState::getRecord32(const RSExportRecordType *ERT) {
818 if (isUsing()) {
819 const auto &Records = mFiles.Current().mRecords;
820 const auto RIT = Records.find(ERT->getName());
821 if (RIT != Records.end())
822 return Record32(&RIT->getValue());
823 }
824 return Record32();
825 }
826
827 // Reduce //////////////////////////////////////////////////////////////////////////////////////
828
declareReduce(const RSExportReduce * ER,bool IsExportable)829 void ReflectionState::declareReduce(const RSExportReduce *ER, bool IsExportable) {
830 slangAssert(!isClosed());
831 if (!isActive())
832 return;
833
834 auto &reduces = mFiles.Current().mReduces;
835 if (isCollecting()) {
836 auto &reduce = reduces.CollectNext();
837 reduce.mName = ER->getNameReduce();
838
839 const auto &InTypes = ER->getAccumulatorInTypes();
840 const size_t InTypesSize = InTypes.size();
841 reduce.mAccumInCount = InTypesSize;
842 reduce.mAccumIns = new llvm::StringRef[InTypesSize];
843 unsigned InTypesIdx = 0;
844 for (const auto &InType : InTypes)
845 reduce.mAccumIns[InTypesIdx++] = canon(getUniqueTypeName(InType));
846
847 reduce.mResult = canon(getUniqueTypeName(ER->getResultType()));
848 reduce.mIsExportable = IsExportable;
849 }
850 if (isUsing()) {
851 if (mReducesOrderFatal)
852 return;
853
854 const std::string Name = ER->getNameReduce();
855
856 if (reduces.isFinished()) {
857 // This doesn't actually break reflection, but that's a
858 // coincidence of the fact that we reflect during the 64-bit
859 // compilation pass rather than the 32-bit compilation pass, and
860 // of the fact that the "extra" reduction kernel(s) are at the
861 // end.
862 mRSC->ReportError(ER->getLocation(),
863 "reduction kernel '%0' is only present for 64-bit targets")
864 << Name;
865 return;
866 }
867
868 auto &reduce = reduces.UseNext();
869
870 if (reduce.mName != Name) {
871 // Order matters because it determines slot number. We might be
872 // able to tolerate certain cases if we ignore non-exportable
873 // kernels in the two sequences (32-bit and 64-bit) -- non-exportable
874 // kernels do not take up slot numbers.
875 mRSC->ReportError(ER->getLocation(),
876 "%ordinal0 reduction kernel is '%1' for 32-bit targets "
877 "but '%2' for 64-bit targets")
878 << unsigned(reduces.CurrentIdx() + 1)
879 << reduce.mName
880 << Name;
881 mReducesOrderFatal = true;
882 return;
883 }
884
885 // If at least one of the two kernels (32-bit or 64-bit) is not
886 // exporable, then there will be no reflection for that kernel,
887 // and so any mismatch in result type or in inputs is irrelevant.
888 // However, we may make more kernels exportable in the future.
889 // Therefore, we'll forbid mismatches anyway.
890
891 if (reduce.mIsExportable != IsExportable) {
892 mRSC->ReportError(ER->getLocation(),
893 "reduction kernel '%0' is reflected in Java only for %select{32|64}1-bit targets")
894 << reduce.mName
895 << IsExportable;
896 }
897
898 const std::string ResultType = getUniqueTypeName(ER->getResultType());
899 if (!reduce.mResult.equals(ResultType)) {
900 mRSC->ReportError(ER->getLocation(),
901 "reduction kernel '%0' has result type '%1' for 32-bit targets "
902 "but result type '%2' for 64-bit targets")
903 << reduce.mName << reduce.mResult.str() << ResultType;
904 }
905
906 const auto &InTypes = ER->getAccumulatorInTypes();
907 if (reduce.mAccumInCount != InTypes.size()) {
908 mRSC->ReportError(ER->getLocation(),
909 "reduction kernel '%0' has %1 input%s1 for 32-bit targets "
910 "but %2 input%s2 for 64-bit targets")
911 << Name << unsigned(reduce.mAccumInCount) << unsigned(InTypes.size());
912 return;
913 }
914 unsigned FieldIdx = 0;
915 for (const auto &InType : InTypes) {
916 const std::string InTypeName = getUniqueTypeName(InType);
917 const llvm::StringRef StateInTypeName = reduce.mAccumIns[FieldIdx++];
918 if (!StateInTypeName.equals(InTypeName)) {
919 mRSC->ReportError(ER->getLocation(),
920 "%ordinal0 input of reduction kernel '%1' "
921 "has type '%2' for 32-bit targets "
922 "but type '%3' for 64-bit targets")
923 << FieldIdx
924 << Name
925 << StateInTypeName.str()
926 << InTypeName;
927 }
928 }
929 }
930 }
931
endReduces()932 void ReflectionState::endReduces() {
933 if (!isUsing() || mReducesOrderFatal)
934 return;
935
936 auto &reduces = mFiles.Current().mReduces;
937 while (!reduces.isFinished()) {
938 const auto &reduce = reduces.UseNext();
939 mRSC->ReportError("in file '%0' reduction kernel '%1' is only present for 32-bit targets")
940 << mFiles.Current().mRSSourceFileName << reduce.mName;
941 }
942 }
943
944 // Variable ////////////////////////////////////////////////////////////////////////////////////
945
946 // Keep this in sync with initialization handling in
947 // RSReflectionJava::genScriptClassConstructor().
declareVariable(const RSExportVar * EV)948 ReflectionState::Val32 ReflectionState::declareVariable(const RSExportVar *EV) {
949 slangAssert(!isClosed());
950 if (!isActive())
951 return NoVal32();
952
953 auto &variables = mFiles.Current().mVariables;
954 if (isCollecting()) {
955 auto &variable = variables.CollectNext();
956 variable.mName = EV->getName();
957 variable.mType = canon(getUniqueTypeName(EV->getType()));
958 variable.mAllocSize = EV->getType()->getAllocSize();
959 variable.mIsConst = EV->isConst();
960 if (!EV->getInit().isUninit()) {
961 variable.mInitializerCount = 1;
962 variable.mInitializers = new clang::APValue[1];
963 variable.mInitializers[0] = EV->getInit();
964 } else if (EV->getArraySize()) {
965 variable.mInitializerCount = EV->getNumInits();
966 variable.mInitializers = new clang::APValue[variable.mInitializerCount];
967 for (size_t i = 0; i < variable.mInitializerCount; ++i)
968 variable.mInitializers[i] = EV->getInitArray(i);
969 } else {
970 variable.mInitializerCount = 0;
971 }
972 return NoVal32();
973 }
974
975 /*-- isUsing() -----------------------------------------------------------*/
976
977 slangAssert(isUsing());
978
979 if (mVariablesOrderFatal)
980 return NoVal32();
981
982 if (variables.isFinished()) {
983 // This doesn't actually break reflection, but that's a
984 // coincidence of the fact that we reflect during the 64-bit
985 // compilation pass rather than the 32-bit compilation pass, and
986 // of the fact that the "extra" variable(s) are at the end.
987 mRSC->ReportError(EV->getLocation(), "global variable '%0' is only present for 64-bit targets")
988 << EV->getName();
989 return NoVal32();
990 }
991
992 const auto &variable = variables.UseNext();
993
994 if (variable.mName != EV->getName()) {
995 // Order matters because it determines slot number
996 mRSC->ReportError(EV->getLocation(),
997 "%ordinal0 global variable is '%1' for 32-bit targets "
998 "but '%2' for 64-bit targets")
999 << unsigned(variables.CurrentIdx() + 1)
1000 << variable.mName
1001 << EV->getName();
1002 mVariablesOrderFatal = true;
1003 return NoVal32();
1004 }
1005
1006 const std::string TypeName = getUniqueTypeName(EV->getType());
1007
1008 if (!variable.mType.equals(TypeName)) {
1009 mRSC->ReportError(EV->getLocation(),
1010 "global variable '%0' has type '%1' for 32-bit targets "
1011 "but type '%2' for 64-bit targets")
1012 << EV->getName()
1013 << variable.mType.str()
1014 << TypeName;
1015 return NoVal32();
1016 }
1017
1018 if (variable.mIsConst != EV->isConst()) {
1019 mRSC->ReportError(EV->getLocation(),
1020 "global variable '%0' has inconsistent 'const' qualification "
1021 "between 32-bit targets and 64-bit targets")
1022 << EV->getName();
1023 return NoVal32();
1024 }
1025
1026 // NOTE: Certain syntactically different but semantically
1027 // equivalent initialization patterns are unnecessarily rejected
1028 // as errors.
1029 //
1030 // Background:
1031 //
1032 // . A vector initialized with a scalar value is treated
1033 // by reflection as if all elements of the vector are
1034 // initialized with the scalar value.
1035 // . A vector may be initialized with a vector of greater
1036 // length; reflection ignores the extra initializers.
1037 // . If only the beginning of a vector is explicitly
1038 // initialized, reflection treats it as if trailing elements are
1039 // initialized to zero (by issuing explicit assignments to those
1040 // trailing elements).
1041 // . If only the beginning of an array is explicitly initialized,
1042 // reflection treats it as if trailing elements are initialized
1043 // to zero (by Java rules for newly-created arrays).
1044 //
1045 // Unnecessarily rejected as errors:
1046 //
1047 // . One compile initializes a vector with a scalar, and
1048 // another initializes it with a vector whose elements
1049 // are the scalar, as in
1050 //
1051 // int2 x =
1052 // #ifdef __LP64__
1053 // 1
1054 // #else
1055 // { 1, 1 }
1056 // #endif
1057 //
1058 // . Compiles initialize a vector with vectors of different
1059 // lengths, but the initializers agree up to the length
1060 // of the variable being initialized, as in
1061 //
1062 // int2 x = { 1, 2
1063 // #ifdef __LP64__
1064 // 3
1065 // #else
1066 // 4
1067 // #endif
1068 // };
1069 //
1070 // . Two compiles agree with the initializer for a vector or
1071 // array, except that one has some number of explicit trailing
1072 // zeroes, as in
1073 //
1074 // int x[4] = { 3, 2, 1
1075 // #ifdef __LP64__
1076 // , 0
1077 // #endif
1078 // };
1079
1080 bool MismatchedInitializers = false;
1081 if (!EV->getInit().isUninit()) {
1082 // Use phase has a scalar initializer.
1083 // Make sure that Collect phase had a matching scalar initializer.
1084 if ((variable.mInitializerCount != 1) ||
1085 !equal(variable.mInitializers[0], EV->getInit()))
1086 MismatchedInitializers = true;
1087 } else if (EV->getArraySize()) {
1088 const size_t UseSize = EV->getNumInits();
1089 if (variable.mInitializerCount != UseSize)
1090 MismatchedInitializers = true;
1091 else {
1092 for (int i = 0; i < UseSize; ++i)
1093 if (!equal(variable.mInitializers[i], EV->getInitArray(i))) {
1094 MismatchedInitializers = true;
1095 break;
1096 }
1097 }
1098 } else if (variable.mInitializerCount != 0) {
1099 // Use phase does not have a scalar initializer, variable is not
1100 // an array, and Collect phase has an initializer. This is an error.
1101 MismatchedInitializers = true;
1102 }
1103
1104 if (MismatchedInitializers) {
1105 mRSC->ReportError(EV->getLocation(),
1106 "global variable '%0' is initialized differently for 32-bit targets "
1107 "than for 64-bit targets")
1108 << EV->getName();
1109 return NoVal32();
1110 }
1111
1112 return Val32(true, variable.mAllocSize);
1113 }
1114
endVariables()1115 void ReflectionState::endVariables() {
1116 if (!isUsing() || mVariablesOrderFatal)
1117 return;
1118
1119 auto &variables = mFiles.Current().mVariables;
1120 while (!variables.isFinished()) {
1121 const auto &variable = variables.UseNext();
1122 mRSC->ReportError("in file '%0' global variable '%1' is only present for 32-bit targets")
1123 << mFiles.Current().mRSSourceFileName << variable.mName;
1124 }
1125 }
1126
1127 } // namespace slang
1128