• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, you can access it online at
17  * http://www.gnu.org/licenses/gpl-2.0.html.
18  *
19  * SPDX-License-Identifier: GPL-2.0
20  *
21  */
22 
23 #ifndef KBASE_H_
24 #define KBASE_H_
25 
26 #include <mali_malisw.h>
27 
28 #include <mali_kbase_debug.h>
29 
30 #include <linux/atomic.h>
31 #include <linux/highmem.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/list.h>
35 #include <linux/mm.h>
36 #include <linux/mutex.h>
37 #include <linux/rwsem.h>
38 #include <linux/sched.h>
39 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
40 #include <linux/sched/mm.h>
41 #endif
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/vmalloc.h>
45 #include <linux/wait.h>
46 #include <linux/workqueue.h>
47 #include <linux/interrupt.h>
48 
49 #include "mali_base_kernel.h"
50 #include <mali_kbase_linux.h>
51 
52 /*
53  * Include mali_kbase_defs.h first as this provides types needed by other local
54  * header files.
55  */
56 #include "mali_kbase_defs.h"
57 
58 #include "debug/mali_kbase_debug_ktrace.h"
59 #include "context/mali_kbase_context.h"
60 #include "mali_kbase_strings.h"
61 #include "mali_kbase_mem_lowlevel.h"
62 #include "mali_kbase_utility.h"
63 #include "mali_kbase_mem.h"
64 #include "mmu/mali_kbase_mmu.h"
65 #include "mali_kbase_gpu_memory_debugfs.h"
66 #include "mali_kbase_mem_profile_debugfs.h"
67 #include "mali_kbase_gpuprops.h"
68 #include "mali_kbase_ioctl.h"
69 #if !MALI_USE_CSF
70 #include "mali_kbase_debug_job_fault.h"
71 #include "mali_kbase_jd_debugfs.h"
72 #include "mali_kbase_jm.h"
73 #include "mali_kbase_js.h"
74 #endif /* !MALI_USE_CSF */
75 
76 #include "ipa/mali_kbase_ipa.h"
77 
78 #ifdef CONFIG_GPU_TRACEPOINTS
79 #include <trace/events/gpu.h>
80 #endif
81 
82 #include "mali_linux_trace.h"
83 
84 #if MALI_USE_CSF
85 #include "csf/mali_kbase_csf.h"
86 #endif
87 
88 #ifndef u64_to_user_ptr
89 /* Introduced in Linux v4.6 */
90 #define u64_to_user_ptr(x) ((void __user *)(uintptr_t)(x))
91 #endif
92 
93 #if MALI_USE_CSF
94 /* Physical memory group ID for command stream frontend user I/O.
95  */
96 #define KBASE_MEM_GROUP_CSF_IO BASE_MEM_GROUP_DEFAULT
97 
98 /* Physical memory group ID for command stream frontend firmware.
99  */
100 #define KBASE_MEM_GROUP_CSF_FW BASE_MEM_GROUP_DEFAULT
101 #endif
102 
103 /* Physical memory group ID for a special page which can alias several regions.
104  */
105 #define KBASE_MEM_GROUP_SINK BASE_MEM_GROUP_DEFAULT
106 
107 /*
108  * Kernel-side Base (KBase) APIs
109  */
110 
111 struct kbase_device *kbase_device_alloc(void);
112 /*
113  * note: configuration attributes member of kbdev needs to have
114  * been setup before calling kbase_device_init
115  */
116 
117 int kbase_device_misc_init(struct kbase_device *kbdev);
118 void kbase_device_misc_term(struct kbase_device *kbdev);
119 void kbase_device_free(struct kbase_device *kbdev);
120 int kbase_device_has_feature(struct kbase_device *kbdev, u32 feature);
121 
122 /* Needed for gator integration and for reporting vsync information */
123 struct kbase_device *kbase_find_device(int minor);
124 void kbase_release_device(struct kbase_device *kbdev);
125 
126 /**
127  * kbase_context_get_unmapped_area() - get an address range which is currently
128  *                                     unmapped.
129  * @kctx: A kernel base context (which has its own GPU address space).
130  * @addr: CPU mapped address (set to 0 since MAP_FIXED mapping is not allowed
131  *        as Mali GPU driver decides about the mapping).
132  * @len: Length of the address range.
133  * @pgoff: Page offset within the GPU address space of the kbase context.
134  * @flags: Flags for the allocation.
135  *
136  * Finds the unmapped address range which satisfies requirements specific to
137  * GPU and those provided by the call parameters.
138  *
139  * 1) Requirement for allocations greater than 2MB:
140  * - alignment offset is set to 2MB and the alignment mask to 2MB decremented
141  * by 1.
142  *
143  * 2) Requirements imposed for the shader memory alignment:
144  * - alignment is decided by the number of GPU pc bits which can be read from
145  * GPU properties of the device associated with this kbase context; alignment
146  * offset is set to this value in bytes and the alignment mask to the offset
147  * decremented by 1.
148  * - allocations must not to be at 4GB boundaries. Such cases are indicated
149  * by the flag KBASE_REG_GPU_NX not being set (check the flags of the kbase
150  * region). 4GB boundaries can be checked against @ref BASE_MEM_MASK_4GB.
151  *
152  * 3) Requirements imposed for tiler memory alignment, cases indicated by
153  * the flag @ref KBASE_REG_TILER_ALIGN_TOP (check the flags of the kbase
154  * region):
155  * - alignment offset is set to the difference between the kbase region
156  * extent (converted from the original value in pages to bytes) and the kbase
157  * region initial_commit (also converted from the original value in pages to
158  * bytes); alignment mask is set to the kbase region extent in bytes and
159  * decremented by 1.
160  *
161  * Return: if successful, address of the unmapped area aligned as required;
162  *         error code (negative) in case of failure;
163  */
164 unsigned long kbase_context_get_unmapped_area(struct kbase_context *kctx, const unsigned long addr,
165                                               const unsigned long len, const unsigned long pgoff,
166                                               const unsigned long flags);
167 
168 int assign_irqs(struct kbase_device *kbdev);
169 
170 int kbase_sysfs_init(struct kbase_device *kbdev);
171 void kbase_sysfs_term(struct kbase_device *kbdev);
172 
173 int kbase_protected_mode_init(struct kbase_device *kbdev);
174 void kbase_protected_mode_term(struct kbase_device *kbdev);
175 
176 /**
177  * kbase_device_pm_init() - Performs power management initialization and
178  * Verifies device tree configurations.
179  * @kbdev: The kbase device structure for the device (must be a valid pointer)
180  *
181  * Return: 0 if successful, otherwise a standard Linux error code
182  */
183 int kbase_device_pm_init(struct kbase_device *kbdev);
184 
185 /**
186  * kbase_device_pm_term() - Performs power management deinitialization and
187  * Free resources.
188  * @kbdev: The kbase device structure for the device (must be a valid pointer)
189  *
190  * Clean up all the resources
191  */
192 void kbase_device_pm_term(struct kbase_device *kbdev);
193 
194 int power_control_init(struct kbase_device *kbdev);
195 void power_control_term(struct kbase_device *kbdev);
196 
197 #ifdef CONFIG_DEBUG_FS
198 void kbase_device_debugfs_term(struct kbase_device *kbdev);
199 int kbase_device_debugfs_init(struct kbase_device *kbdev);
200 #else  /* CONFIG_DEBUG_FS */
kbase_device_debugfs_init(struct kbase_device * kbdev)201 static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
202 {
203     return 0;
204 }
205 
kbase_device_debugfs_term(struct kbase_device * kbdev)206 static inline void kbase_device_debugfs_term(struct kbase_device *kbdev)
207 {
208 }
209 #endif /* CONFIG_DEBUG_FS */
210 
211 int registers_map(struct kbase_device *const kbdev);
212 void registers_unmap(struct kbase_device *kbdev);
213 
214 int kbase_device_coherency_init(struct kbase_device *kbdev);
215 
216 #ifdef CONFIG_MALI_BUSLOG
217 int buslog_init(struct kbase_device *kbdev);
218 void buslog_term(struct kbase_device *kbdev);
219 #endif
220 
221 #if !MALI_USE_CSF
222 int kbase_jd_init(struct kbase_context *kctx);
223 void kbase_jd_exit(struct kbase_context *kctx);
224 
225 /**
226  * kbase_jd_submit - Submit atoms to the job dispatcher
227  *
228  * @kctx: The kbase context to submit to
229  * @user_addr: The address in user space of the struct base_jd_atom array
230  * @nr_atoms: The number of atoms in the array
231  * @stride: sizeof(struct base_jd_atom)
232  * @uk6_atom: true if the atoms are legacy atoms (struct base_jd_atom_v2_uk6)
233  *
234  * Return: 0 on success or error code
235  */
236 int kbase_jd_submit(struct kbase_context *kctx, void __user *user_addr, u32 nr_atoms, u32 stride, bool uk6_atom);
237 
238 /**
239  * kbase_jd_done_worker - Handle a job completion
240  * @data: a &struct work_struct
241  *
242  * This function requeues the job from the runpool (if it was soft-stopped or
243  * removed from NEXT registers).
244  *
245  * Removes it from the system if it finished/failed/was cancelled.
246  *
247  * Resolves dependencies to add dependent jobs to the context, potentially
248  * starting them if necessary (which may add more references to the context)
249  *
250  * Releases the reference to the context from the no-longer-running job.
251  *
252  * Handles retrying submission outside of IRQ context if it failed from within
253  * IRQ context.
254  */
255 void kbase_jd_done_worker(struct work_struct *data);
256 
257 void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr, ktime_t *end_timestamp,
258                    kbasep_js_atom_done_code done_code);
259 void kbase_jd_cancel(struct kbase_device *kbdev, struct kbase_jd_atom *katom);
260 void kbase_jd_zap_context(struct kbase_context *kctx);
261 bool jd_done_nolock(struct kbase_jd_atom *katom, struct list_head *completed_jobs_ctx);
262 void kbase_jd_free_external_resources(struct kbase_jd_atom *katom);
263 void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom);
264 
265 /**
266  * kbase_job_done - Process completed jobs from job interrupt
267  * @kbdev: Pointer to the kbase device.
268  * @done: Bitmask of done or failed jobs, from JOB_IRQ_STAT register
269  *
270  * This function processes the completed, or failed, jobs from the GPU job
271  * slots, for the bits set in the @done bitmask.
272  *
273  * The hwaccess_lock must be held when calling this function.
274  */
275 void kbase_job_done(struct kbase_device *kbdev, u32 done);
276 
277 /**
278  * kbase_job_slot_ctx_priority_check_locked(): - Check for lower priority atoms
279  *                                               and soft stop them
280  * @kctx: Pointer to context to check.
281  * @katom: Pointer to priority atom.
282  *
283  * Atoms from @kctx on the same job slot as @katom, which have lower priority
284  * than @katom will be soft stopped and put back in the queue, so that atoms
285  * with higher priority can run.
286  *
287  * The hwaccess_lock must be held when calling this function.
288  */
289 void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx, struct kbase_jd_atom *katom);
290 
291 /**
292  * kbase_job_slot_softstop_start_rp() - Soft-stop the atom at the start
293  *                                      of a renderpass.
294  * @kctx: Pointer to a kernel base context.
295  * @reg:  Reference of a growable GPU memory region in the same context.
296  *        Takes ownership of the reference if successful.
297  *
298  * Used to switch to incremental rendering if we have nearly run out of
299  * virtual address space in a growable memory region and the atom currently
300  * executing on a job slot is the tiler job chain at the start of a renderpass.
301  *
302  * Return 0 if successful, otherwise a negative error code.
303  */
304 int kbase_job_slot_softstop_start_rp(struct kbase_context *kctx, struct kbase_va_region *reg);
305 
306 void kbase_job_slot_softstop(struct kbase_device *kbdev, int js, struct kbase_jd_atom *target_katom);
307 void kbase_job_slot_softstop_swflags(struct kbase_device *kbdev, int js, struct kbase_jd_atom *target_katom,
308                                      u32 sw_flags);
309 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js, struct kbase_jd_atom *target_katom);
310 void kbase_job_check_enter_disjoint(struct kbase_device *kbdev, u32 action, base_jd_core_req core_reqs,
311                                     struct kbase_jd_atom *target_katom);
312 void kbase_job_check_leave_disjoint(struct kbase_device *kbdev, struct kbase_jd_atom *target_katom);
313 
314 #endif /* !MALI_USE_CSF */
315 
316 void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom);
317 #if !MALI_USE_CSF
318 int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent);
319 #endif /* !MALI_USE_CSF */
320 int kbase_event_pending(struct kbase_context *ctx);
321 int kbase_event_init(struct kbase_context *kctx);
322 void kbase_event_close(struct kbase_context *kctx);
323 void kbase_event_cleanup(struct kbase_context *kctx);
324 void kbase_event_wakeup(struct kbase_context *kctx);
325 
326 /**
327  * kbasep_jit_alloc_validate() - Validate the JIT allocation info.
328  *
329  * @kctx:    Pointer to the kbase context within which the JIT
330  *        allocation is to be validated.
331  * @info:    Pointer to struct @base_jit_alloc_info
332  *            which is to be validated.
333  * @return: 0 if jit allocation is valid; negative error code otherwise
334  */
335 int kbasep_jit_alloc_validate(struct kbase_context *kctx, struct base_jit_alloc_info *info);
336 
337 /**
338  * kbase_jit_retry_pending_alloc() - Retry blocked just-in-time memory
339  *                                   allocations.
340  *
341  * @kctx:    Pointer to the kbase context within which the just-in-time
342  *        memory allocations are to be retried.
343  */
344 void kbase_jit_retry_pending_alloc(struct kbase_context *kctx);
345 
346 /**
347  * kbase_free_user_buffer() - Free memory allocated for struct
348  *        @kbase_debug_copy_buffer.
349  *
350  * @buffer:    Pointer to the memory location allocated for the object
351  *        of the type struct @kbase_debug_copy_buffer.
352  */
kbase_free_user_buffer(struct kbase_debug_copy_buffer * buffer)353 static inline void kbase_free_user_buffer(struct kbase_debug_copy_buffer *buffer)
354 {
355     struct page **pages = buffer->extres_pages;
356     int nr_pages = buffer->nr_extres_pages;
357 
358     if (pages) {
359         int i;
360 
361         for (i = 0; i < nr_pages; i++) {
362             struct page *pg = pages[i];
363 
364             if (pg) {
365                 put_page(pg);
366             }
367         }
368         kfree(pages);
369     }
370 }
371 
372 /**
373  * kbase_mem_copy_from_extres() - Copy from external resources.
374  *
375  * @kctx:    kbase context within which the copying is to take place.
376  * @buf_data:    Pointer to the information about external resources:
377  *        pages pertaining to the external resource, number of
378  *        pages to copy.
379  */
380 int kbase_mem_copy_from_extres(struct kbase_context *kctx, struct kbase_debug_copy_buffer *buf_data);
381 #if !MALI_USE_CSF
382 int kbase_process_soft_job(struct kbase_jd_atom *katom);
383 int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
384 void kbase_finish_soft_job(struct kbase_jd_atom *katom);
385 void kbase_cancel_soft_job(struct kbase_jd_atom *katom);
386 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev);
387 void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom);
388 #if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
389 void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom);
390 #endif
391 int kbase_soft_event_update(struct kbase_context *kctx, u64 event, unsigned char new_status);
392 
393 void kbasep_soft_job_timeout_worker(struct timer_list *timer);
394 void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
395 #endif /* !MALI_USE_CSF */
396 
397 void kbasep_as_do_poke(struct work_struct *work);
398 
399 /**
400  * Check whether a system suspend is in progress, or has already been suspended
401  *
402  * The caller should ensure that either kbdev->pm.active_count_lock is held, or
403  * a dmb was executed recently (to ensure the value is most
404  * up-to-date). However, without a lock the value could change afterwards.
405  *
406  * @return false if a suspend is not in progress
407  * @return !=false otherwise
408  */
kbase_pm_is_suspending(struct kbase_device * kbdev)409 static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
410 {
411     return kbdev->pm.suspending;
412 }
413 
414 #ifdef CONFIG_MALI_ARBITER_SUPPORT
415 /*
416  * Check whether a gpu lost is in progress
417  *
418  * @kbdev: The kbase device structure for the device (must be a valid pointer)
419  *
420  * Indicates whether a gpu lost has been received and jobs are no longer
421  * being scheduled
422  *
423  * Return: false if gpu is lost
424  * Return: != false otherwise
425  */
kbase_pm_is_gpu_lost(struct kbase_device * kbdev)426 static inline bool kbase_pm_is_gpu_lost(struct kbase_device *kbdev)
427 {
428     return (atomic_read(&kbdev->pm.gpu_lost) == 0 ? false : true);
429 }
430 
431 /*
432  * Set or clear gpu lost state
433  *
434  * @kbdev: The kbase device structure for the device (must be a valid pointer)
435  * @gpu_lost: true to activate GPU lost state, FALSE is deactive it
436  *
437  * Puts power management code into gpu lost state or takes it out of the
438  * state.  Once in gpu lost state new GPU jobs will no longer be
439  * scheduled.
440  */
kbase_pm_set_gpu_lost(struct kbase_device * kbdev,bool gpu_lost)441 static inline void kbase_pm_set_gpu_lost(struct kbase_device *kbdev, bool gpu_lost)
442 {
443     atomic_set(&kbdev->pm.gpu_lost, (gpu_lost ? 1 : 0));
444 }
445 #endif
446 
447 /**
448  * kbase_pm_is_active - Determine whether the GPU is active
449  *
450  * @kbdev: The kbase device structure for the device (must be a valid pointer)
451  *
452  * This takes into account whether there is an active context reference.
453  *
454  * Return: true if the GPU is active, false otherwise
455  */
kbase_pm_is_active(struct kbase_device * kbdev)456 static inline bool kbase_pm_is_active(struct kbase_device *kbdev)
457 {
458     return kbdev->pm.active_count > 0;
459 }
460 
461 /**
462  * kbase_pm_metrics_start - Start the utilization metrics timer
463  * @kbdev: Pointer to the kbase device for which to start the utilization
464  *         metrics calculation thread.
465  *
466  * Start the timer that drives the metrics calculation, runs the custom DVFS.
467  */
468 void kbase_pm_metrics_start(struct kbase_device *kbdev);
469 
470 /**
471  * kbase_pm_metrics_stop - Stop the utilization metrics timer
472  * @kbdev: Pointer to the kbase device for which to stop the utilization
473  *         metrics calculation thread.
474  *
475  * Stop the timer that drives the metrics calculation, runs the custom DVFS.
476  */
477 void kbase_pm_metrics_stop(struct kbase_device *kbdev);
478 
479 #if !MALI_USE_CSF
480 /**
481  * Return the atom's ID, as was originally supplied by userspace in
482  * base_jd_atom::atom_number
483  */
kbase_jd_atom_id(struct kbase_context * kctx,struct kbase_jd_atom * katom)484 static inline int kbase_jd_atom_id(struct kbase_context *kctx, struct kbase_jd_atom *katom)
485 {
486     int result;
487 
488     KBASE_DEBUG_ASSERT(kctx);
489     KBASE_DEBUG_ASSERT(katom);
490     KBASE_DEBUG_ASSERT(katom->kctx == kctx);
491 
492     result = katom - &kctx->jctx.atoms[0];
493     KBASE_DEBUG_ASSERT(result >= 0 && result <= BASE_JD_ATOM_COUNT);
494     return result;
495 }
496 
497 /**
498  * kbase_jd_atom_from_id - Return the atom structure for the given atom ID
499  * @kctx: Context pointer
500  * @id:   ID of atom to retrieve
501  *
502  * Return: Pointer to struct kbase_jd_atom associated with the supplied ID
503  */
kbase_jd_atom_from_id(struct kbase_context * kctx,int id)504 static inline struct kbase_jd_atom *kbase_jd_atom_from_id(struct kbase_context *kctx, int id)
505 {
506     return &kctx->jctx.atoms[id];
507 }
508 #endif /* !MALI_USE_CSF */
509 
510 /**
511  * Initialize the disjoint state
512  *
513  * The disjoint event count and state are both set to zero.
514  *
515  * Disjoint functions usage:
516  * The disjoint event count should be incremented whenever a disjoint event occurs.
517  *
518  * There are several cases which are regarded as disjoint behavior. Rather than just increment
519  * the counter during disjoint events we also increment the counter when jobs may be affected
520  * by what the GPU is currently doing. To facilitate this we have the concept of disjoint state.
521  *
522  * Disjoint state is entered during GPU reset. Increasing the disjoint state also increases
523  * the count of disjoint events.
524  *
525  * The disjoint state is then used to increase the count of disjoint events during job submission
526  * and job completion. Any atom submitted or completed while the disjoint state is greater than
527  * zero is regarded as a disjoint event.
528  *
529  * The disjoint event counter is also incremented immediately whenever a job is soft stopped
530  * and during context creation.
531  *
532  * @param kbdev The kbase device
533  *
534  * Return: 0 on success and non-zero value on failure.
535  */
536 void kbase_disjoint_init(struct kbase_device *kbdev);
537 
538 /**
539  * Increase the count of disjoint events
540  * called when a disjoint event has happened
541  *
542  * @param kbdev The kbase device
543  */
544 void kbase_disjoint_event(struct kbase_device *kbdev);
545 
546 /**
547  * Increase the count of disjoint events only if the GPU is in a disjoint state
548  *
549  * This should be called when something happens which could be disjoint if the GPU
550  * is in a disjoint state. The state refcount keeps track of this.
551  *
552  * @param kbdev The kbase device
553  */
554 void kbase_disjoint_event_potential(struct kbase_device *kbdev);
555 
556 /**
557  * Returns the count of disjoint events
558  *
559  * @param kbdev The kbase device
560  * @return the count of disjoint events
561  */
562 u32 kbase_disjoint_event_get(struct kbase_device *kbdev);
563 
564 /**
565  * Increment the refcount state indicating that the GPU is in a disjoint state.
566  *
567  * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
568  * eventually after the disjoint state has completed @ref kbase_disjoint_state_down
569  * should be called
570  *
571  * @param kbdev The kbase device
572  */
573 void kbase_disjoint_state_up(struct kbase_device *kbdev);
574 
575 /**
576  * Decrement the refcount state
577  *
578  * Also Increment the disjoint event count (calls @ref kbase_disjoint_event)
579  *
580  * Called after @ref kbase_disjoint_state_up once the disjoint state is over
581  *
582  * @param kbdev The kbase device
583  */
584 void kbase_disjoint_state_down(struct kbase_device *kbdev);
585 
586 /**
587  * If a job is soft stopped and the number of contexts is >= this value
588  * it is reported as a disjoint event
589  */
590 #define KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD 2
591 
592 #if !defined(UINT64_MAX)
593 #define UINT64_MAX ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
594 #endif
595 
596 #endif
597