1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc. All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 #include <google/protobuf/compiler/cpp/cpp_padding_optimizer.h>
32
33 #include <google/protobuf/compiler/cpp/cpp_helpers.h>
34
35 namespace google {
36 namespace protobuf {
37 namespace compiler {
38 namespace cpp {
39
40 namespace {
41
42 // FieldGroup is just a helper for PaddingOptimizer below. It holds a vector of
43 // fields that are grouped together because they have compatible alignment, and
44 // a preferred location in the final field ordering.
45 class FieldGroup {
46 public:
FieldGroup()47 FieldGroup() : preferred_location_(0) {}
48
49 // A group with a single field.
FieldGroup(float preferred_location,const FieldDescriptor * field)50 FieldGroup(float preferred_location, const FieldDescriptor* field)
51 : preferred_location_(preferred_location), fields_(1, field) {}
52
53 // Append the fields in 'other' to this group.
Append(const FieldGroup & other)54 void Append(const FieldGroup& other) {
55 if (other.fields_.empty()) {
56 return;
57 }
58 // Preferred location is the average among all the fields, so we weight by
59 // the number of fields on each FieldGroup object.
60 preferred_location_ = (preferred_location_ * fields_.size() +
61 (other.preferred_location_ * other.fields_.size())) /
62 (fields_.size() + other.fields_.size());
63 fields_.insert(fields_.end(), other.fields_.begin(), other.fields_.end());
64 }
65
SetPreferredLocation(float location)66 void SetPreferredLocation(float location) { preferred_location_ = location; }
fields() const67 const std::vector<const FieldDescriptor*>& fields() const { return fields_; }
68
69 // FieldGroup objects sort by their preferred location.
operator <(const FieldGroup & other) const70 bool operator<(const FieldGroup& other) const {
71 return preferred_location_ < other.preferred_location_;
72 }
73
74 private:
75 // "preferred_location_" is an estimate of where this group should go in the
76 // final list of fields. We compute this by taking the average index of each
77 // field in this group in the original ordering of fields. This is very
78 // approximate, but should put this group close to where its member fields
79 // originally went.
80 float preferred_location_;
81 std::vector<const FieldDescriptor*> fields_;
82 // We rely on the default copy constructor and operator= so this type can be
83 // used in a vector.
84 };
85
86 } // namespace
87
88 // Reorder 'fields' so that if the fields are output into a c++ class in the new
89 // order, fields of similar family (see below) are together and within each
90 // family, alignment padding is minimized.
91 //
92 // We try to do this while keeping each field as close as possible to its field
93 // number order so that we don't reduce cache locality much for function that
94 // access each field in order. Originally, OptimizePadding used declaration
95 // order for its decisions, but generated code minus the serializer/parsers uses
96 // the output of OptimizePadding as well (stored in
97 // MessageGenerator::optimized_order_). Since the serializers use field number
98 // order, we use that as a tie-breaker.
99 //
100 // We classify each field into a particular "family" of fields, that we perform
101 // the same operation on in our generated functions.
102 //
103 // REPEATED is placed first, as the C++ compiler automatically initializes
104 // these fields in layout order.
105 //
106 // STRING is grouped next, as our Clear/SharedCtor/SharedDtor walks it and
107 // calls ArenaStringPtr::Destroy on each.
108 //
109 // LAZY_MESSAGE is grouped next, as it interferes with the ability to memset
110 // non-repeated fields otherwise.
111 //
112 // MESSAGE is grouped next, as our Clear/SharedDtor code walks it and calls
113 // delete on each. We initialize these fields with a NULL pointer (see
114 // MessageFieldGenerator::GenerateConstructorCode), which allows them to be
115 // memset.
116 //
117 // ZERO_INITIALIZABLE is memset in Clear/SharedCtor
118 //
119 // OTHER these fields are initialized one-by-one.
OptimizeLayout(std::vector<const FieldDescriptor * > * fields,const Options & options)120 void PaddingOptimizer::OptimizeLayout(
121 std::vector<const FieldDescriptor*>* fields, const Options& options) {
122 // The sorted numeric order of Family determines the declaration order in the
123 // memory layout.
124 enum Family {
125 REPEATED = 0,
126 STRING = 1,
127 // Laying out LAZY_MESSAGE before MESSAGE allows a single memset to zero
128 // MESSAGE and ZERO_INITIALIZABLE fields together.
129 LAZY_MESSAGE = 2,
130 MESSAGE = 3,
131 ZERO_INITIALIZABLE = 4,
132 OTHER = 5,
133 kMaxFamily
134 };
135
136 // First divide fields into those that align to 1 byte, 4 bytes or 8 bytes.
137 std::vector<FieldGroup> aligned_to_1[kMaxFamily];
138 std::vector<FieldGroup> aligned_to_4[kMaxFamily];
139 std::vector<FieldGroup> aligned_to_8[kMaxFamily];
140 for (int i = 0; i < fields->size(); ++i) {
141 const FieldDescriptor* field = (*fields)[i];
142
143 Family f = OTHER;
144 if (field->is_repeated()) {
145 f = REPEATED;
146 } else if (field->cpp_type() == FieldDescriptor::CPPTYPE_STRING) {
147 f = STRING;
148 } else if (field->cpp_type() == FieldDescriptor::CPPTYPE_MESSAGE) {
149 f = MESSAGE;
150 if (IsLazy(field, options)) {
151 f = LAZY_MESSAGE;
152 }
153 } else if (CanInitializeByZeroing(field)) {
154 f = ZERO_INITIALIZABLE;
155 }
156
157 const int j = field->number();
158 switch (EstimateAlignmentSize(field)) {
159 case 1:
160 aligned_to_1[f].push_back(FieldGroup(j, field));
161 break;
162 case 4:
163 aligned_to_4[f].push_back(FieldGroup(j, field));
164 break;
165 case 8:
166 aligned_to_8[f].push_back(FieldGroup(j, field));
167 break;
168 default:
169 GOOGLE_LOG(FATAL) << "Unknown alignment size " << EstimateAlignmentSize(field)
170 << "for a field " << field->full_name() << ".";
171 }
172 }
173
174 // For each family, group fields to optimize padding.
175 for (int f = 0; f < kMaxFamily; f++) {
176 // Now group fields aligned to 1 byte into sets of 4, and treat those like a
177 // single field aligned to 4 bytes.
178 for (int i = 0; i < aligned_to_1[f].size(); i += 4) {
179 FieldGroup field_group;
180 for (int j = i; j < aligned_to_1[f].size() && j < i + 4; ++j) {
181 field_group.Append(aligned_to_1[f][j]);
182 }
183 aligned_to_4[f].push_back(field_group);
184 }
185 // Sort by preferred location to keep fields as close to their field number
186 // order as possible. Using stable_sort ensures that the output is
187 // consistent across runs.
188 std::stable_sort(aligned_to_4[f].begin(), aligned_to_4[f].end());
189
190 // Now group fields aligned to 4 bytes (or the 4-field groups created above)
191 // into pairs, and treat those like a single field aligned to 8 bytes.
192 for (int i = 0; i < aligned_to_4[f].size(); i += 2) {
193 FieldGroup field_group;
194 for (int j = i; j < aligned_to_4[f].size() && j < i + 2; ++j) {
195 field_group.Append(aligned_to_4[f][j]);
196 }
197 if (i == aligned_to_4[f].size() - 1) {
198 if (f == OTHER) {
199 // Move incomplete 4-byte block to the beginning. This is done to
200 // pair with the (possible) leftover blocks from the
201 // ZERO_INITIALIZABLE family.
202 field_group.SetPreferredLocation(-1);
203 } else {
204 // Move incomplete 4-byte block to the end.
205 field_group.SetPreferredLocation(fields->size() + 1);
206 }
207 }
208 aligned_to_8[f].push_back(field_group);
209 }
210 // Sort by preferred location.
211 std::stable_sort(aligned_to_8[f].begin(), aligned_to_8[f].end());
212 }
213
214 // Now pull out all the FieldDescriptors in order.
215 fields->clear();
216 for (int f = 0; f < kMaxFamily; ++f) {
217 for (int i = 0; i < aligned_to_8[f].size(); ++i) {
218 fields->insert(fields->end(), aligned_to_8[f][i].fields().begin(),
219 aligned_to_8[f][i].fields().end());
220 }
221 }
222 }
223
224 } // namespace cpp
225 } // namespace compiler
226 } // namespace protobuf
227 } // namespace google
228