• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017, The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef _FRAMEWORKS_COMPILE_SLANG_REFLECTION_STATE_H_  // NOLINT
18 #define _FRAMEWORKS_COMPILE_SLANG_REFLECTION_STATE_H_
19 
20 #include <string>
21 #include <utility>
22 
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringMap.h"
25 #include "llvm/ADT/StringSet.h"
26 #include "clang/AST/APValue.h"
27 
28 #include "slang_assert.h"
29 
30 namespace slang {
31 
32 class RSContext;
33 class RSExportForEach;
34 class RSExportFunc;
35 class RSExportRecordType;
36 class RSExportReduce;
37 class RSExportType;
38 class RSExportVar;
39 
40 // ---------------------
41 // class ReflectionState
42 // ---------------------
43 //
44 // This class is used to collect data from 32-bit compilation for use
45 // during the reflected code generation that occurs during 64-bit
46 // compilation.  The data is used for two purposes:
47 //
48 // 1) Accommodating rs object handle size differences when laying out
49 //    data (in particular, variables and records).
50 // 2) Emitting errors when differences between 32-bit and 64-bit
51 //    compilation cannot be tolerated in the reflected code (for
52 //    example, size_t has different sizes, and so cannot be part
53 //    of any exportable).
54 //
55 // The model for using this class is as follows:
56 // a) Instantiate a class instance.  The instance is in the S_Initial
57 //    state.
58 // b) Call openJava32() to move the instance to the S_OpenJava32
59 //    ("Collecting") state.
60 // c) Run the reflection pass on all files in 32-bit mode.  Do not
61 //    actually generate reflected code; but call various methods on
62 //    the instance (begin*(), declare*(), end*(), etc.) to collect
63 //    information.
64 // d) Call closeJava32() to move the instance to the S_ClosedJava32
65 //    state.
66 // e) Call openJava64() to move the instance to the S_OpenJava64
67 //    ("Using") state.
68 // f) Run the reflection pass on all files in 64-bit mode.  Call the
69 //    same methods as in step (c), as well as some further methods to
70 //    query the information collected in step (c) in order to handle
71 //    layout differences.  All error reporting for 32-bit versus
72 //    64-bit differences is handled in the methods themselves.
73 // g) Call closeJava64 to move the instance to the S_ClosedJava64
74 //    state.
75 // h) Destroy the instance.
76 //
77 // There are two exceptions to this model:
78 //
79 // 1) If not doing both 32-bit and 64-bit compilation, then skip steps
80 //    (b), (d), (e), and (g).  (This is what happens if reflecting C++
81 //    instead of Java, or reflecting Java but using the -m32 or -m64
82 //    option.)  In this case, the methods called in steps (c) and (f)
83 //    are no-ops: They do not collect information, they do not report
84 //    errors, and they return "no information available" when step (f)
85 //    asks for 32-bit layout related information.
86 // 2) The class instance can be moved to the S_Bad state by class
87 //    ReflectionState::Tentative (see that class for more information)
88 //    when reflection itself aborts due to some error.  The only legal
89 //    thing to do with an instance in this state is invoke its
90 //    destructor.
91 //
92 // All exported entities except for Records have slot numbers assigned
93 // in reflection order.  These slot numbers must match up between
94 // 32-bit and 64-bit compilation.  Therefore, we (generally) require
95 // that entities be presented to ReflectionState (via begin*() or
96 // declare*()) in the same order during the Collecting and Using
97 // phases.  This presentation order is generally the same as lexical
98 // order in the user code, which makes it simple to emit meaningful
99 // diagnostics when the order is inconsistent (for example, 32-bit and
100 // 64-bit compilation disagree on the name of the kernel in a
101 // particular slot).  ReflectionState generally builds up an array of
102 // each sort of entity, in the presentation order.  There are two
103 // exceptions:
104 //
105 // a) Records, as mentioned above.  Exported Records have no slot
106 //    number, and therefore reflection order doesn't matter.  In
107 //    practice, Records aren't necessarily reflected in consistent
108 //    order, because they are determined to be exported as a
109 //    consequence of determining that other entities are to be
110 //    exported; and variations between 32-bit and 64-bit compilation
111 //    can therefore result in inconsistent Record reflection order.
112 //    Therefore, ReflectionState builds up a map of Records.
113 // b) ForEach kernels.  ForEach kernels are not necessarily reflected
114 //    in lexical order (there is some sorting to segregate root
115 //    kernel, old-style kernels, and new-style kernels).  In order to
116 //    give meaningful diagnostics for slot order mismatches, it's
117 //    enough to solve the simpler problem of giving meaningful
118 //    diagnostics for lexical order mismatches (although this is
119 //    stricter than necessary because of the sorting that occurs
120 //    before slot assignment).  Therefore, ReflectionState builds up
121 //    an array of ForEaches in lexical order rather than in
122 //    presentation (slot) order, and accesses the array randomly
123 //    rather than sequentially.
124 //
125 class ReflectionState {
126  private:
127   // Set this to true to turn everything into a no-op, just as if none
128   // of the open*() or close*() methods were ever called.
129   static const bool kDisabled = false;
130 
131  public:
ReflectionState()132   ReflectionState() :
133       mRSC(nullptr),
134       mState(S_Initial),
135       mForEachOpen(-1),
136       mOutputClassOpen(false),
137       mRecordsState(RS_Initial),
138       mStringSet(nullptr) { }
139   ~ReflectionState();
140 
141   ReflectionState(const ReflectionState &) = delete;
142   void operator=(const ReflectionState &) = delete;
143 
144   // For use in the debugger.
145   void dump();
146 
147   // A possibly-present value describing a property for a 32-bit target.
148   // When .first is false, the value is absent, and .second is unspecified.
149   typedef std::pair<bool, size_t> Val32;
NoVal32()150   static Val32 NoVal32() { return Val32(false, ~size_t(0)); }
151 
152   void openJava32(size_t NumFiles);
153   void closeJava32();
154   void openJava64();
155   void closeJava64();
156 
isCollecting()157   bool isCollecting() const { return mState==S_OpenJava32; }
158 
159   // ----------------------------------------------------------------------
160 
161   // Use these methods during the "Collecting" phase to track
162   // information about a class being generated -- a script class or a
163   // type class.  We call such a class "Divergent" if it needs to have
164   // at least one runtime check to distinguish between 32-bit and
165   // 64-bit targets.
166   //
167   //   Indicate that we are beginning to generate the class.
168   //
beginOutputClass()169   void beginOutputClass() {
170     slangAssert(!mOutputClassOpen && !isClosed());
171     mOutputClassOpen = true;
172     mOutputClassDivergent = false;
173   }
174   //
175   //   Record the fact that we've learned the class is divergent.
176   //
setOutputClassDivergent()177   void setOutputClassDivergent() {
178     slangAssert(mOutputClassOpen);
179     mOutputClassDivergent = true;
180   }
181   //
182   //   Indicate that we've finished generating the class.  Returns
183   //   true IFF we've learned the class is divergent.
184   //
endOutputClass()185   bool endOutputClass() {
186     slangAssert(mOutputClassOpen);
187     mOutputClassOpen = false;
188     return mOutputClassDivergent;
189   }
190 
191   // ----------------------------------------------------------------------
192 
193   // --------------------------------
194   // class ReflectionState::Tentative
195   // --------------------------------
196   //
197   // This class aids in error handling.  The model is as follows:
198   // a) Instantiate the class with a pointer to a ReflectionState
199   //    instance.
200   // b) Before destroying the class instance, if there have been no
201   //    errors, call the ok() method on the instance.
202   // c) When the instance is destroyed, if ok() has not been called on
203   //    it, this class will put the ReflectionState into the S_Bad
204   //    state.
205   //
206   // The idea is to "poison" the ReflectionState if we quit reflection
207   // early because of some error -- we don't want to get in a
208   // situation where we only have partial information from the
209   // Collecting phase (because of quitting early) but try to use it
210   // during the Using phase.
211   //
212   friend class Tentative;
213   class Tentative {
214    public:
Tentative(ReflectionState * state)215     Tentative(ReflectionState *state) : mState(state) { }
~Tentative()216     ~Tentative() { if (mState) mState->mState = ReflectionState::S_Bad; }
217 
ok()218     void ok() { mState = nullptr; }
219 
220     Tentative(const Tentative &) = delete;
221     void operator=(const Tentative &) = delete;
222 
223    private:
224     ReflectionState *mState;
225   };
226 
227   // ----------------------------------------------------------------------
228 
229   // Model for ForEach kernels (per File):
230   //
231   // a) beginForEaches(number_of_non_dummy_root_kernels_in_file)
232   // b) mixture of declareForEachDummyRoot() calls and
233   //    beginForEach()..endForEach() calls
234   // c) endForEaches()
235   //
236   // For a given ForEach kernel:
237   //
238   // b1) beginForEach()
239   // b2) call any number of addForEachIn() (one per input)
240   // b3) call any number of addForEachParam() (one per param)
241   // b4) call addForEachSignatureMetadata() (if it's reflected)
242   // b5) call endForEach()
243   //
244   // b2, b3, b4 can occur in any order
245 
246   void beginForEaches(size_t Count);
247 
declareForEachDummyRoot(const RSExportForEach *)248   void declareForEachDummyRoot(const RSExportForEach *) { /* we don't care */ };
249 
250   void beginForEach(const RSExportForEach *EF);
251 
252   void addForEachIn(const RSExportForEach *EF, const RSExportType *Type);
253 
254   void addForEachParam(const RSExportForEach *EF, const RSExportType *Type);
255 
256   void addForEachSignatureMetadata(const RSExportForEach *EF, unsigned Metadata);
257 
258   void endForEach();
259 
260   void endForEaches();
261 
262   // ----------------------------------------------------------------------
263 
264   // Model for Invokable functions (per File):
265   //
266   // a) beginInvokables(number_of_invokables_in_file)
267   // b) declareInvokable() for each Invokable (order must be
268   //    consistent between 32-bit and 64-bit compile)
269   // c) endInvokables()
270 
beginInvokables(size_t Count)271   void beginInvokables(size_t Count) {
272     mInvokablesOrderFatal = false;
273     begin(&File::mInvokables, Count);
274   }
275 
276   void declareInvokable(const RSExportFunc *EF);
277 
278   void endInvokables();
279 
280   // ----------------------------------------------------------------------
281 
282   // Model for reduction kernels (per File):
283   //
284   // a) beginReduces(number_of_reduction_kernels_in_file)
285   // b) declareReduce() for each reduction kernel (order must be
286   //    consistent between 32-bit and 64-bit compile)
287   // c) endReduces()
288 
beginReduces(size_t Count)289   void beginReduces(size_t Count) {
290     mReducesOrderFatal = false;
291     begin(&File::mReduces, Count);
292   }
293 
294   void declareReduce(const RSExportReduce *ER, bool IsExportable);
295 
296   void endReduces();
297 
298   // ----------------------------------------------------------------------
299 
300   // Model for records (per File):
301   //
302   // a) beginRecords()
303   // b) declareRecord() for each Record (order doesn't matter)
304   // c) endRecords()
305   //
306   // And at any time during the Using phase, can call getRecord32() to
307   // get information from the 32-bit compile (Collecting phase).
308 
309   void beginRecords();
310 
311   // An "Ordinary" record is anything other than an
312   // internally-synthesized helper record.  We do not emit diagnostics
313   // for mismatched helper records -- we assume that the constructs
314   // from which those helper records were derived are also mismatched,
315   // and that we'll get diagnostics for those constructs.
316   void declareRecord(const RSExportRecordType *ERT, bool Ordinary = true);
317 
318   void endRecords();
319 
320   class Record32;
321 
322   // During the Using phase, obtain information about a Record from
323   // the Collecting phase.  ERT should be from the Using phase, not
324   // the Collecting phase.  The value returned from this function is
325   // valid for the lifetime of the ReflectionState instance.
326   Record32 getRecord32(const RSExportRecordType *ERT);
327 
328   // ----------------------------------------------------------------------
329 
330   // Model for Variables (per file):
331   //
332   // a) beginVariables(number_of_exported_variables_in_file)
333   // b) declareVariable() for each Variable (order must be consistent
334   //    between 32-bit and 64-bit); in the Using phase, returns some
335   //    information about the Variable from 32-bit compilation
336   // c) endVariables()
337 
beginVariables(size_t Count)338   void beginVariables(size_t Count) {
339     mVariablesOrderFatal = false;
340     begin(&File::mVariables, Count);
341   }
342 
343   // If isUsing(), returns variable's 32-bit AllocSize; otherwise, returns NoVal32().
344   Val32 declareVariable(const RSExportVar *EV);
345 
346   void endVariables();
347 
348   // ----------------------------------------------------------------------
349 
350   // ReflectionState has a notion of "current file".  After an
351   // openJava*() or closeJava*() call, there is no current file.
352   // Calling the nextFile() method when in the Collecting or Using
353   // state "advances" to the next file in the list of files being
354   // compiled, whose properties are specified by the arguments to
355   // nextFile().  All of the various begin*(), declare*(), end*()
356   // etc. calls implicitly refer to entities in the current file.
357   //
358   // RSC must remain valid until the next call to nextFile() or the
359   // next S_* state change.
360   void nextFile(const RSContext *RSC, const std::string &PackageName, const std::string &RSSourceFileName);
361 
362   // ----------------------------------------------------------------------
363 
364  private:
365   enum State {
366     S_Initial,          // No captured information
367     S_OpenJava32,       // Capturing information for 32-bit Java
368     S_ClosedJava32,     // Captured  information for 32-bit Java
369     S_OpenJava64,       // Capturing information for 64-bit Java
370     S_ClosedJava64,     // Captured  information for 64-bit Java
371     S_Bad,              // Abnormal termination
372   };
373 
374   // context associated with compilation of the current file
375   const RSContext *mRSC;
376 
377   State mState;
378 
379   /*== ForEach ==================================================================*/
380 
381   // The data in this section is transient during ForEach processing
382   // for each File.
383 
384   int mForEachOpen;    // if nonnegative, then ordinal of beginForEach() without matching endForEach()
385   bool mForEachFatal;  // fatal mismatch in comparing ForEach; do no further comparisons for it
386 
387   // Tracks mismatches discovered during the Use phase.
388   // There are two possibilities:
389   // - if (ordinal + 1) is greater than the number of ForEaches from the Collecting phase,
390   //   then this is an "extra" ForEach discovered during the Use phase
391   // - otherwise the Collecting phase and the Use phase disagree on the name of the
392   //   ForEach at this ordinal position (the Collecting phase's kernel name is
393   //   available in mFiles.Current().mForEaches[ordinal].mName)
394   llvm::SmallVector<const RSExportForEach *, 0> mForEachesBad;
395 
396   // During the Use phase, keep track of how many ForEach ordinals we
397   // have seen that correspond to ordinals seen during the Collect
398   // phase.  This helps determine whether we have to issue errors at
399   // endForEaches().
400   size_t mNumForEachesMatchedByOrdinal;
401 
402   /*== Invokable ================================================================*/
403 
404   // 32-bit and 64-bit compiles need to see invokables in the same
405   // order, because of slot number assignment.  Once we see the first
406   // name mismatch in the sequence of invokables for a given File, it
407   // doesn't make sense to issue further diagnostics regarding
408   // invokables for that File.
409   bool mInvokablesOrderFatal;
410 
411   /*== OutputClass ==============================================================*/
412 
413   // This data tracks information about a class being generated -- a
414   // script class or a type class.  We call such a class "Divergent"
415   // if it needs to have at least one runtime check to distinguish
416   // between 32-bit and 64-bit targets.
417 
418   bool mOutputClassOpen;  // beginOutputClass() without matching endOutputClass()
419   bool mOutputClassDivergent;  // has class been marked divergent?
420 
421   /*== Record ===================================================================*/
422 
423   // This field enforces necessary discipline on the use of
424   // beginRecords()/declareRecord()/endRecord().
425   enum {
426     RS_Initial,  // no beginRecords() yet for current File
427     RS_Open,     // beginRecords() but no endRecords() for current File
428     RS_Closed    // endRecords() for current File
429   } mRecordsState;
430 
431   // During the Use phase, keep track of how many records we have seen
432   // that have same-named counterparts seen during the Collect phase.
433   // This helps determine whether we have to issue errors at
434   // endRecords().
435   size_t mNumRecordsMatchedByName;
436 
437   /*== Reduce ===================================================================*/
438 
439   // 32-bit and 64-bit compiles need to see reduction kernels in the
440   // same order, because of slot number assignment.  Once we see the
441   // first name mismatch in the sequence of reduction kernels for a
442   // given File, it doesn't make sense to issue further diagnostics
443   // regarding reduction kernels for that File.
444   bool mReducesOrderFatal;
445 
446   /*== Variable =================================================================*/
447 
448   // 32-bit and 64-bit compiles need to see variables in the same
449   // order, because of slot number assignment.  Once we see the first
450   // name mismatch in the sequence of variables for a given File, it
451   // doesn't make sense to issue further diagnostics regarding
452   // variables for that File.
453   bool mVariablesOrderFatal;
454 
455   /*=============================================================================*/
456 
isActive()457   bool isActive() const { return isCollecting() || isUsing(); }
isClosed()458   bool isClosed() const { return mState==S_ClosedJava32 || mState==S_ClosedJava64; }
isUsing()459   bool isUsing() const { return mState==S_OpenJava64; }
460 
461   // For anything with a type (such as a Variable or a Record field),
462   // the type is represented via its name.  To save space, we don't
463   // create multiple instances of the same name -- we have a canonical
464   // instance in mStringSet, and use a StringRef to refer to it.  The
465   // method canon() returns a StringRef to the canonical
466   // instance, creating the instance if necessary.
467   llvm::StringRef canon(const std::string &String);
468   llvm::StringSet<> *mStringSet;
469 
470   // Synthesize a name for the specified type.  There should be a
471   // one-to-one correspondence between the name and a C type (after
472   // typedefs and integer expressions have been "flattened", and
473   // considering a struct type to be identified solely by its name).
474   static std::string getUniqueTypeName(const RSExportType *T);
475 
476   // ------------------------------
477   // template class ArrayWithCursor
478   // ------------------------------
479   //
480   // This class represents a fixed-length dynamically-allocated array
481   // (length is specified by a method call after instantiation) along
482   // with a cursor that traverses the array.  The behavior of the
483   // class is very specific to the needs of ReflectionState.
484   //
485   // The model for using this class is as follows:
486   // a) Instantiate a class instance.  The instance is in the
487   //    S_Initial state.
488   // b) Call BeginCollecting() with an array capacity.  This allocates
489   //    the array members and moves the instance to the S_Collecting
490   //    state.  The array size (contrast with capacity) is zero, and
491   //    the cursor has not been placed.
492   // c) Call CollectNext() a number of times equal to the capacity.
493   //    Each time CollectNext() is called, it extends the array size
494   //    by 1, and advances the cursor to the "new" member.  The idea
495   //    is to set the value of the "new" member at this time.
496   // d) Call BeginUsing().  This moves the instance to the S_Using
497   //    state and "unplaces" the cursor.
498   // e) Call UseNext() a number of times equal to the capacity.  Each
499   //    time UseNext() is called, it advances the cursor to the next
500   //    member (first member, the first time it is called).
501   //    The cursor is stepping through the members that were "created"
502   //    by CollectNext() during the S_Collecting state; the idea is to
503   //    look at their values.
504   // f) Destroy the instance.
505   //
506   template <typename Member> class ArrayWithCursor {
507    public:
ArrayWithCursor()508     ArrayWithCursor() : mState(S_Initial), mMembers(nullptr), mCapacity(0), mSize(0), mCursor(~size_t(0)) { }
509 
~ArrayWithCursor()510     ~ArrayWithCursor() { delete [] mMembers; }
511 
512     ArrayWithCursor(const ArrayWithCursor &) = delete;
513     void operator=(const ArrayWithCursor &) = delete;
514 
BeginCollecting(size_t Size)515     void BeginCollecting(size_t Size) {
516       slangAssert(mState == S_Initial);
517       mState = S_Collecting;
518       mMembers = new Member[Size];
519       mCapacity = Size;
520     }
521     // Increments the array size, advances the cursor to the new
522     // member, and returns a reference to that member.
CollectNext()523     Member &CollectNext() {
524       slangAssert((mState == S_Collecting) && (mCursor + 1 == mSize) && (mSize < mCapacity));
525       ++mSize;
526       return mMembers[++mCursor];
527     }
528 
BeginUsing()529     void BeginUsing() {
530       slangAssert((mState == S_Collecting) && (mCursor + 1 == mSize) && (mSize == mCapacity));
531       mState = S_Using;
532       mCursor = ~size_t(0);
533     }
534     // Advances the cursor to the next member, and returns a reference
535     // to that member.
UseNext()536     Member &UseNext() {
537       slangAssert((mState == S_Using) && (mCursor + 1 < mSize));
538       return mMembers[++mCursor];
539     }
540 
541     // Is the cursor on the last array member?
isFinished()542     bool isFinished() const {
543       return mCursor + 1 == mSize;
544     }
545 
Size()546     size_t Size() const { return mSize; }
547 
548     // Return a reference to the member under the cursor.
Current()549     Member &Current() {
550       slangAssert(mCursor < mSize);
551       return mMembers[mCursor];
552     }
Current()553     const Member &Current() const {
554       slangAssert(mCursor < mSize);
555       return mMembers[mCursor];
556     }
557     // Return the cursor position (zero-based).  Cursor must have been
558     // placed (i.e., if we're Collecting, we must have called
559     // CollectNext() at least once; and if we're Using, we must have
560     // called UseNext() at least once).
CurrentIdx()561     size_t CurrentIdx() const {
562       slangAssert(mCursor < mSize);
563       return mCursor;
564     }
565 
566     // Return a reference to the specified member.  Must be within the
567     // array size (not merely within its capacity).
568     Member &operator[](size_t idx) {
569       slangAssert(idx < mSize);
570       return mMembers[idx];
571     }
572     const Member &operator[](size_t idx) const {
573       slangAssert(idx < mSize);
574       return mMembers[idx];
575     }
576 
577    private:
578     enum State { S_Initial, S_Collecting, S_Using };
579     State mState;
580 
581     Member *mMembers;
582     size_t mCapacity;
583     size_t mSize;
584     size_t mCursor;
585   };
586 
587 
588   struct File {
FileFile589     File() : mForEaches(nullptr) { }
~FileFile590     ~File() { delete [] mForEaches; }
591 
592     File(const File &) = delete;
593     void operator=(const File &) = delete;
594 
595     std::string mPackageName;
596     std::string mRSSourceFileName;
597 
598     struct ForEach {
ForEachFile::ForEach599       ForEach() : mState(S_Initial) { }
600       ForEach(const ForEach &) = delete;
601       void operator=(const ForEach &) = delete;
602 
603       enum {
604         S_Initial,    // ForEach has been instantiated
605         S_Collected,  // beginForEach() has been called while Collecting
606         S_UseMatched  // beginForEach() has been called while Using,
607                       //   and found this ForEach
608       } mState;
609 
610       std::string mName;
611 
612       // Types.  mIns[] and mOut can be null in case we have an
613       // old-style kernel with a void* input or output.
614       ArrayWithCursor<llvm::StringRef> mIns;
615       ArrayWithCursor<llvm::StringRef> mParams;
616       llvm::StringRef mOut;
617       bool mHasOut;  // to distinguish between no output and void* output.
618 
619       unsigned mSignatureMetadata;
620       bool mIsKernel;  // new-style (by-value) rather than old-style
621     };
622     ForEach *mForEaches;  // indexed by ordinal (lexical order)
623     size_t mForEachCount;
624 
625     struct Invokable {
InvokableFile::Invokable626       Invokable() : mParams(nullptr) { }
~InvokableFile::Invokable627       ~Invokable() { delete [] mParams; }
628 
629       Invokable(const Invokable &) = delete;
630       void operator=(const Invokable &) = delete;
631 
632       std::string mName;
633       llvm::StringRef *mParams;  // Types
634       size_t mParamCount;
635     };
636     ArrayWithCursor<Invokable> mInvokables;
637 
638     // There are two things we need to do with a Record:
639     // - Support structure sizes and layouts that differ between
640     //   32-bit and 64-bit compilation.
641     // - Do consistency checking between 32-bit and 64-bit compilation.
642     //
643     // TODO: Move this out of File to avoid duplication?  That is,
644     //       instead of tracking Records on a per-File basis, instead
645     //       track them globally?
646     //
647     //       (Because of ODR, we shouldn't have inconsistencies
648     //       between Files.)
649     //
650     struct Record {
RecordFile::Record651       Record() : mFields(nullptr) { }
~RecordFile::Record652       ~Record() { delete [] mFields; }
653 
654       Record(const Record &) = delete;
655       void operator=(const Record &) = delete;
656 
657       struct Field {
658         std::string mName;
659         llvm::StringRef mType;
660         size_t mPrePadding;   // this.OffsetInParent - (prev.OffsetInParent + prev.AllocSize)
661         size_t mPostPadding;  // this.AllocSize - this.StoreSize
662         size_t mOffset;       // this.OffsetInParent
663         size_t mStoreSize;    // this.StoreSize
664       };
665       Field *mFields;
666       size_t mFieldCount;
667       size_t mPostPadding;    // padding after the end of the padded
668                               //   last field
669       size_t mAllocSize;
670       bool mOrdinary;         // anything other than an
671                               //   internally-synthesized helper
672                               //   record.  We do not emit diagnostics
673                               //   for inconsistent helper records.
674       bool mMatchedByName;    // has declareRecord() been called on
675                               //   this record during the Using phase?
676     };
677     llvm::StringMap<Record> mRecords;
678 
679     struct Reduce {
ReduceFile::Reduce680       Reduce() : mAccumIns(nullptr) { }
~ReduceFile::Reduce681       ~Reduce() { delete [] mAccumIns; }
682 
683       Reduce(const Reduce &) = delete;
684       void operator=(const Reduce &) = delete;
685 
686       std::string mName;
687 
688       // only apply to exportable
689       llvm::StringRef *mAccumIns;  // Types
690       size_t mAccumInCount;
691       llvm::StringRef mResult;  // Type
692 
693       bool mIsExportable;
694     };
695     ArrayWithCursor<Reduce> mReduces;
696 
697     struct Variable {
VariableFile::Variable698       Variable() : mInitializers(nullptr) { }
~VariableFile::Variable699       ~Variable() { delete [] mInitializers; }
700 
701       Variable(const Variable &) = delete;
702       void operator=(const Variable &) = delete;
703 
704       std::string mName;
705       llvm::StringRef mType;
706       clang::APValue *mInitializers;
707       size_t mInitializerCount;
708       size_t mAllocSize;
709       bool mIsConst;
710     };
711     ArrayWithCursor<Variable> mVariables;
712 
713   };
714   ArrayWithCursor<File> mFiles;
715 
716   // Utility template -- common pattern used by many begin*() methods.
717   template <typename Member>
begin(ArrayWithCursor<Member> File::* Array,size_t Count)718   void begin(ArrayWithCursor<Member> File::*Array, size_t Count) {
719     slangAssert(!isClosed());
720     if (!isActive())
721       return;
722 
723     auto &file = mFiles.Current();
724     if (isCollecting())
725       (file.*Array).BeginCollecting(Count);
726     if (isUsing())
727       (file.*Array).BeginUsing();
728   }
729 
730  public:
731 
732   // This class represents 32-bit layout information built up during
733   // the Collecting phase, for use during the Using phase.  It
734   // provides an interface between class ReflectionState and client
735   // code that actually performs reflection.
736   class Record32 {
737     friend class ReflectionState;
738 
739    public:
Record32()740     Record32() : mRecord(nullptr) { }
741 
getRecordPostPadding()742     Val32 getRecordPostPadding() const {
743       if (!mRecord)
744         return NoVal32();
745       return Val32(true, mRecord->mPostPadding);
746     }
747 
getRecordAllocSize()748     Val32 getRecordAllocSize() const {
749       if (!mRecord)
750         return NoVal32();
751       return Val32(true, mRecord->mAllocSize);
752     }
753 
getFieldPreAndPostPadding(unsigned idx)754     std::pair<Val32, Val32> getFieldPreAndPostPadding(unsigned idx) const {
755       if (!mRecord || idx >= mRecord->mFieldCount)
756         return std::make_pair(NoVal32(), NoVal32());
757       const File::Record::Field &field = mRecord->mFields[idx];
758       return std::make_pair(Val32(true, field.mPrePadding), Val32(true, field.mPostPadding));
759     }
760 
getFieldOffsetAndStoreSize(unsigned idx)761     std::pair<Val32, Val32> getFieldOffsetAndStoreSize(unsigned idx) const {
762       if (!mRecord || idx >= mRecord->mFieldCount)
763         return std::make_pair(NoVal32(), NoVal32());
764       const File::Record::Field &field = mRecord->mFields[idx];
765       return std::make_pair(Val32(true, field.mOffset), Val32(true, field.mStoreSize));
766     }
767 
768    private:
Record32(const File::Record * Record)769     Record32(const File::Record *Record) : mRecord(Record) { }
770     const File::Record *mRecord;
771   };
772 };
773 
774 }
775 
776 #endif  // _FRAMEWORKS_COMPILE_SLANG_REFLECTION_STATE_H_  NOLINT
777