• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
7 
8 #include <atomic>
9 #include <cstdint>
10 
11 #include "base/allocator/partition_allocator/dangling_raw_ptr_checks.h"
12 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
13 #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
14 #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
15 #include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
16 #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
17 #include "base/allocator/partition_allocator/partition_alloc_check.h"
18 #include "base/allocator/partition_allocator/partition_alloc_config.h"
19 #include "base/allocator/partition_allocator/partition_alloc_constants.h"
20 #include "base/allocator/partition_allocator/partition_alloc_forward.h"
21 #include "base/allocator/partition_allocator/tagging.h"
22 #include "build/build_config.h"
23 
24 namespace partition_alloc::internal {
25 
26 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
27 
28 // Special-purpose atomic reference count class used by RawPtrBackupRefImpl.
29 // The least significant bit of the count is reserved for tracking the liveness
30 // state of an allocation: it's set when the allocation is created and cleared
31 // on free(). So the count can be:
32 //
33 // 1 for an allocation that is just returned from Alloc()
34 // 2 * k + 1 for a "live" allocation with k references
35 // 2 * k for an allocation with k dangling references after Free()
36 //
37 // This protects against double-free's, as we check whether the reference count
38 // is odd in |ReleaseFromAllocator()|, and if not we have a double-free.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)39 class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
40  public:
41   // This class holds an atomic bit field: `count_`. It holds up to 5 values:
42   //
43   // bits   name                   description
44   // -----  ---------------------  ----------------------------------------
45   // 0      is_allocated           Whether or not the memory is held by the
46   //                               allocator.
47   //                               - 1 at construction time.
48   //                               - Decreased in ReleaseFromAllocator();
49   //
50   // 1-31   ptr_count              Number of raw_ptr<T>.
51   //                               - Increased in Acquire()
52   //                               - Decreased in Release()
53   //
54   // 32     dangling_detected      A dangling raw_ptr<> has been detected.
55   // 33     needs_mac11_malloc_    Whether malloc_size() return value needs to
56   //          size_hack            be adjusted for this allocation.
57   //
58   // 34-63  unprotected_ptr_count  Number of
59   //                               raw_ptr<T, DisableDanglingPtrDetection>
60   //                               - Increased in AcquireFromUnprotectedPtr().
61   //                               - Decreased in ReleaseFromUnprotectedPtr().
62   //
63   // The allocation is reclaimed if all of:
64   // - |is_allocated|
65   // - |ptr_count|
66   // - |unprotected_ptr_count|
67   // are zero.
68   //
69   // During ReleaseFromAllocator(), if |ptr_count| is not zero,
70   // |dangling_detected| is set and the error is reported via
71   // DanglingRawPtrDetected(id). The matching DanglingRawPtrReleased(id) will be
72   // called when the last raw_ptr<> is released.
73 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
74   using CountType = uint64_t;
75   static constexpr CountType kMemoryHeldByAllocatorBit = 0x0000'0000'0000'0001;
76   static constexpr CountType kPtrCountMask = 0x0000'0000'FFFF'FFFE;
77   static constexpr CountType kUnprotectedPtrCountMask = 0xFFFF'FFFC'0000'0000;
78   static constexpr CountType kDanglingRawPtrDetectedBit = 0x0000'0001'0000'0000;
79   static constexpr CountType kNeedsMac11MallocSizeHackBit =
80       0x0000'0002'0000'0000;
81 
82   static constexpr CountType kPtrInc = 0x0000'0000'0000'0002;
83   static constexpr CountType kUnprotectedPtrInc = 0x0000'0004'0000'0000;
84 #else
85   using CountType = uint32_t;
86   static constexpr CountType kMemoryHeldByAllocatorBit = 0x0000'0001;
87 
88   static constexpr CountType kPtrCountMask = 0x7FFF'FFFE;
89   static constexpr CountType kUnprotectedPtrCountMask = 0x0000'0000;
90   static constexpr CountType kDanglingRawPtrDetectedBit = 0x0000'0000;
91   static constexpr CountType kNeedsMac11MallocSizeHackBit = 0x8000'0000;
92 
93   static constexpr CountType kPtrInc = 0x0000'0002;
94 #endif
95 
96   PA_ALWAYS_INLINE explicit PartitionRefCount(
97       bool needs_mac11_malloc_size_hack);
98 
99   // Incrementing the counter doesn't imply any visibility about modified
100   // memory, hence relaxed atomics. For decrement, visibility is required before
101   // the memory gets freed, necessitating an acquire/release barrier before
102   // freeing the memory.
103   //
104   // For details, see base::AtomicRefCount, which has the same constraints and
105   // characteristics.
106   //
107   // FYI: The assembly produced by the compiler on every platform, in particular
108   // the uint64_t fetch_add on 32bit CPU.
109   // https://docs.google.com/document/d/1cSTVDVEE-8l2dXLPcfyN75r6ihMbeiSp1ncL9ae3RZE
110   PA_ALWAYS_INLINE void Acquire() {
111     CheckCookieIfSupported();
112 
113 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
114     constexpr CountType kInc = kUnprotectedPtrInc;
115     constexpr CountType kMask = kUnprotectedPtrCountMask;
116 #else
117     constexpr CountType kInc = kPtrInc;
118     constexpr CountType kMask = kPtrCountMask;
119 #endif
120     CountType old_count = count_.fetch_add(kInc, std::memory_order_relaxed);
121     // Check overflow.
122     PA_CHECK((old_count & kMask) != kMask);
123   }
124 
125   // Similar to |Acquire()|, but for raw_ptr<T, DisableDanglingPtrDetection>
126   // instead of raw_ptr<T>.
127   PA_ALWAYS_INLINE void AcquireFromUnprotectedPtr() {
128 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
129     CheckCookieIfSupported();
130     CountType old_count =
131         count_.fetch_add(kUnprotectedPtrInc, std::memory_order_relaxed);
132     // Check overflow.
133     PA_CHECK((old_count & kUnprotectedPtrCountMask) !=
134              kUnprotectedPtrCountMask);
135 #else
136     Acquire();
137 #endif
138   }
139 
140   // Returns true if the allocation should be reclaimed.
141   PA_ALWAYS_INLINE bool Release() {
142 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT)
143     constexpr CountType kInc = kUnprotectedPtrInc;
144     constexpr CountType kMask = kUnprotectedPtrCountMask;
145 #else
146     constexpr CountType kInc = kPtrInc;
147     constexpr CountType kMask = kPtrCountMask;
148 #endif
149     CheckCookieIfSupported();
150 
151     CountType old_count = count_.fetch_sub(kInc, std::memory_order_release);
152     // Check underflow.
153     PA_DCHECK(old_count & kMask);
154 
155 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
156     // If a dangling raw_ptr<> was detected, report it.
157     if (PA_UNLIKELY((old_count & kDanglingRawPtrDetectedBit) ==
158                     kDanglingRawPtrDetectedBit)) {
159       partition_alloc::internal::DanglingRawPtrReleased(
160           reinterpret_cast<uintptr_t>(this));
161     }
162 #endif
163 
164     return ReleaseCommon(old_count - kInc);
165   }
166 
167   // Similar to |Release()|, but for raw_ptr<T, DisableDanglingPtrDetection>
168   // instead of raw_ptr<T>.
169   PA_ALWAYS_INLINE bool ReleaseFromUnprotectedPtr() {
170 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
171     CheckCookieIfSupported();
172 
173     CountType old_count =
174         count_.fetch_sub(kUnprotectedPtrInc, std::memory_order_release);
175     // Check underflow.
176     PA_DCHECK(old_count & kUnprotectedPtrCountMask);
177 
178     return ReleaseCommon(old_count - kUnprotectedPtrInc);
179 #else
180     return Release();
181 #endif
182   }
183 
184   // Returns true if the allocation should be reclaimed.
185   // This function should be called by the allocator during Free().
186   PA_ALWAYS_INLINE bool ReleaseFromAllocator() {
187     CheckCookieIfSupported();
188 
189     // TODO(bartekn): Make the double-free check more effective. Once freed, the
190     // ref-count is overwritten by an encoded freelist-next pointer.
191     CountType old_count =
192         count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
193 
194     if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
195       DoubleFreeOrCorruptionDetected(old_count);
196     }
197 
198     if (PA_LIKELY((old_count & ~kNeedsMac11MallocSizeHackBit) ==
199                   kMemoryHeldByAllocatorBit)) {
200       std::atomic_thread_fence(std::memory_order_acquire);
201       // The allocation is about to get freed, so clear the cookie.
202       ClearCookieIfSupported();
203       return true;
204     }
205 
206 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
207     // Check if any raw_ptr<> still exists. It is now dangling.
208     if (PA_UNLIKELY(old_count & kPtrCountMask)) {
209       count_.fetch_or(kDanglingRawPtrDetectedBit, std::memory_order_relaxed);
210       partition_alloc::internal::DanglingRawPtrDetected(
211           reinterpret_cast<uintptr_t>(this));
212     }
213 #endif  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
214     return false;
215   }
216 
217   // "IsAlive" means is allocated and not freed. "KnownRefs" refers to
218   // raw_ptr<T> references. There may be other references from raw pointers or
219   // unique_ptr, but we have no way of tracking them, so we hope for the best.
220   // To summarize, the function returns whether we believe the allocation can be
221   // safely freed.
222   PA_ALWAYS_INLINE bool IsAliveWithNoKnownRefs() {
223     CheckCookieIfSupported();
224     return (count_.load(std::memory_order_acquire) &
225             ~kNeedsMac11MallocSizeHackBit) == kMemoryHeldByAllocatorBit;
226   }
227 
228   PA_ALWAYS_INLINE bool IsAlive() {
229     bool alive =
230         count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
231     if (alive) {
232       CheckCookieIfSupported();
233     }
234     return alive;
235   }
236 
237   // Called when a raw_ptr is not banning dangling ptrs, but the user still
238   // wants to ensure the pointer is not currently dangling. This is currently
239   // used in UnretainedWrapper to make sure callbacks are not invoked with
240   // dangling pointers. If such a raw_ptr exists but the allocation is no longer
241   // alive, then we have a dangling pointer to a dead object.
242   PA_ALWAYS_INLINE void ReportIfDangling() {
243     if (!IsAlive()) {
244       partition_alloc::internal::UnretainedDanglingRawPtrDetected(
245           reinterpret_cast<uintptr_t>(this));
246     }
247   }
248 
249   // GWP-ASan slots are assigned an extra reference (note `kPtrInc` below) to
250   // make sure the `raw_ptr<T>` release operation will never attempt to call the
251   // PA `free` on such a slot. GWP-ASan takes the extra reference into account
252   // when determining whether the slot can be reused.
253   PA_ALWAYS_INLINE void InitalizeForGwpAsan() {
254 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
255     brp_cookie_ = CalculateCookie();
256 #endif
257     count_.store(kPtrInc | kMemoryHeldByAllocatorBit,
258                  std::memory_order_release);
259   }
260 
261   PA_ALWAYS_INLINE bool CanBeReusedByGwpAsan() {
262     return (count_.load(std::memory_order_acquire) &
263             ~kNeedsMac11MallocSizeHackBit) ==
264            (kPtrInc | kMemoryHeldByAllocatorBit);
265   }
266 
267   bool NeedsMac11MallocSizeHack() {
268     return count_.load(std::memory_order_relaxed) &
269            kNeedsMac11MallocSizeHackBit;
270   }
271 
272 #if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
273   PA_ALWAYS_INLINE void SetRequestedSize(size_t size) {
274     requested_size_ = static_cast<uint32_t>(size);
275   }
276   PA_ALWAYS_INLINE uint32_t requested_size() const { return requested_size_; }
277 #endif  // PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
278 
279  private:
280   // The common parts shared by Release() and ReleaseFromUnprotectedPtr().
281   // Called after updating the ref counts, |count| is the new value of |count_|
282   // set by fetch_sub. Returns true if memory can be reclaimed.
283   PA_ALWAYS_INLINE bool ReleaseCommon(CountType count) {
284     // Do not release memory, if it is still held by any of:
285     // - The allocator
286     // - A raw_ptr<T>
287     // - A raw_ptr<T, DisableDanglingPtrDetection>
288     //
289     // Assuming this raw_ptr is not dangling, the memory must still be held at
290     // least by the allocator, so this is PA_LIKELY true.
291     if (PA_LIKELY((count & (kMemoryHeldByAllocatorBit | kPtrCountMask |
292                             kUnprotectedPtrCountMask)))) {
293       return false;  // Do not release the memory.
294     }
295 
296     // In most thread-safe reference count implementations, an acquire
297     // barrier is required so that all changes made to an object from other
298     // threads are visible to its destructor. In our case, the destructor
299     // finishes before the final `Release` call, so it shouldn't be a problem.
300     // However, we will keep it as a precautionary measure.
301     std::atomic_thread_fence(std::memory_order_acquire);
302 
303     // The allocation is about to get freed, so clear the cookie.
304     ClearCookieIfSupported();
305     return true;
306   }
307 
308   // The cookie helps us ensure that:
309   // 1) The reference count pointer calculation is correct.
310   // 2) The returned allocation slot is not freed.
311   PA_ALWAYS_INLINE void CheckCookieIfSupported() {
312 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
313     PA_CHECK(brp_cookie_ == CalculateCookie());
314 #endif
315   }
316 
317   PA_ALWAYS_INLINE void ClearCookieIfSupported() {
318 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
319     brp_cookie_ = 0;
320 #endif
321   }
322 
323 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
324   PA_ALWAYS_INLINE uint32_t CalculateCookie() {
325     return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)) ^
326            kCookieSalt;
327   }
328 #endif  // PA_CONFIG(REF_COUNT_CHECK_COOKIE)
329 
330   [[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void
331   DoubleFreeOrCorruptionDetected(CountType count) {
332     PA_DEBUG_DATA_ON_STACK("refcount", count);
333     PA_NO_CODE_FOLDING();
334     PA_IMMEDIATE_CRASH();
335   }
336 
337   // Note that in free slots, this is overwritten by encoded freelist
338   // pointer(s). The way the pointers are encoded on 64-bit little-endian
339   // architectures, count_ happens stay even, which works well with the
340   // double-free-detection in ReleaseFromAllocator(). Don't change the layout of
341   // this class, to preserve this functionality.
342   std::atomic<CountType> count_;
343 
344 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
345   static constexpr uint32_t kCookieSalt = 0xc01dbeef;
346   volatile uint32_t brp_cookie_;
347 #endif
348 
349 #if PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
350   uint32_t requested_size_;
351 #endif
352 };
353 
PartitionRefCount(bool needs_mac11_malloc_size_hack)354 PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount(
355     bool needs_mac11_malloc_size_hack)
356     : count_(kMemoryHeldByAllocatorBit |
357              (needs_mac11_malloc_size_hack ? kNeedsMac11MallocSizeHackBit : 0))
358 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
359       ,
360       brp_cookie_(CalculateCookie())
361 #endif
362 {
363 }
364 
365 #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
366 
367 static_assert(kAlignment % alignof(PartitionRefCount) == 0,
368               "kAlignment must be multiples of alignof(PartitionRefCount).");
369 
370 // Allocate extra space for the reference count to satisfy the alignment
371 // requirement.
372 static constexpr size_t kInSlotRefCountBufferSize = sizeof(PartitionRefCount);
373 constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
374 constexpr size_t kPartitionPastAllocationAdjustment = 0;
375 
376 #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
377 
378 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE) || \
379     PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
380 static constexpr size_t kPartitionRefCountSizeShift = 4;
381 #else
382 static constexpr size_t kPartitionRefCountSizeShift = 3;
383 #endif
384 
385 #else  // BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)
386 
387 #if PA_CONFIG(REF_COUNT_CHECK_COOKIE) && \
388     PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
389 static constexpr size_t kPartitionRefCountSizeShift = 4;
390 #elif PA_CONFIG(REF_COUNT_CHECK_COOKIE) || \
391     PA_CONFIG(REF_COUNT_STORE_REQUESTED_SIZE)
392 static constexpr size_t kPartitionRefCountSizeShift = 3;
393 #else
394 static constexpr size_t kPartitionRefCountSizeShift = 2;
395 #endif
396 
397 #endif  // PA_CONFIG(REF_COUNT_CHECK_COOKIE)
398 static_assert((1 << kPartitionRefCountSizeShift) == sizeof(PartitionRefCount));
399 
400 // We need one PartitionRefCount for each system page in a super page. They take
401 // `x = sizeof(PartitionRefCount) * (kSuperPageSize / SystemPageSize())` space.
402 // They need to fit into a system page of metadata as sparsely as possible to
403 // minimize cache line sharing, hence we calculate a multiplier as
404 // `SystemPageSize() / x`.
405 //
406 // The multiplier is expressed as a bitshift to optimize the code generation.
407 // SystemPageSize() isn't always a constrexpr, in which case the compiler
408 // wouldn't know it's a power of two. The equivalence of these calculations is
409 // checked in PartitionAllocGlobalInit().
410 PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
GetPartitionRefCountIndexMultiplierShift()411 GetPartitionRefCountIndexMultiplierShift() {
412   return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift;
413 }
414 
PartitionRefCountPointer(uintptr_t slot_start)415 PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
416     uintptr_t slot_start) {
417 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
418   CheckThatSlotOffsetIsZero(slot_start);
419 #endif
420   if (PA_LIKELY(slot_start & SystemPageOffsetMask())) {
421     uintptr_t refcount_address = slot_start - sizeof(PartitionRefCount);
422 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
423     PA_CHECK(refcount_address % alignof(PartitionRefCount) == 0);
424 #endif
425     // Have to MTE-tag, because the address is untagged, but lies within a slot
426     // area, which is protected by MTE.
427     //
428     // There could be a race condition though if the previous slot is
429     // freed/retagged concurrently, so ideally the ref count should occupy its
430     // own MTE granule.
431     // TODO(richard.townsend@arm.com): improve this.
432     return static_cast<PartitionRefCount*>(TagAddr(refcount_address));
433   } else {
434     // No need to tag, as the metadata region isn't protected by MTE.
435     PartitionRefCount* bitmap_base = reinterpret_cast<PartitionRefCount*>(
436         (slot_start & kSuperPageBaseMask) + SystemPageSize() * 2);
437     size_t index = ((slot_start & kSuperPageOffsetMask) >> SystemPageShift())
438                    << GetPartitionRefCountIndexMultiplierShift();
439 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
440     PA_CHECK(sizeof(PartitionRefCount) * index <= SystemPageSize());
441 #endif
442     return bitmap_base + index;
443   }
444 }
445 
446 #else  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
447 
448 // Allocate extra space for the reference count to satisfy the alignment
449 // requirement.
450 static constexpr size_t kInSlotRefCountBufferSize = kAlignment;
451 constexpr size_t kPartitionRefCountOffsetAdjustment = kInSlotRefCountBufferSize;
452 
453 // This is for adjustment of pointers right past the allocation, which may point
454 // to the next slot. First subtract 1 to bring them to the intended slot, and
455 // only then we'll be able to find ref-count in that slot.
456 constexpr size_t kPartitionPastAllocationAdjustment = 1;
457 
PartitionRefCountPointer(uintptr_t slot_start)458 PA_ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(
459     uintptr_t slot_start) {
460 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
461   CheckThatSlotOffsetIsZero(slot_start);
462 #endif
463   // Have to MTE-tag, because the address is untagged, but lies within a slot
464   // area, which is protected by MTE.
465   return static_cast<PartitionRefCount*>(TagAddr(slot_start));
466 }
467 
468 #endif  // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
469 
470 static_assert(sizeof(PartitionRefCount) <= kInSlotRefCountBufferSize,
471               "PartitionRefCount should fit into the in-slot buffer.");
472 
473 #else  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
474 
475 static constexpr size_t kInSlotRefCountBufferSize = 0;
476 constexpr size_t kPartitionRefCountOffsetAdjustment = 0;
477 
478 #endif  // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
479 
480 constexpr size_t kPartitionRefCountSizeAdjustment = kInSlotRefCountBufferSize;
481 
482 }  // namespace partition_alloc::internal
483 
484 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
485