• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017 Lima Project
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #include <string.h>
26 
27 #include "util/ralloc.h"
28 
29 #include "gpir.h"
30 
gpir_instr_create(gpir_block * block)31 gpir_instr *gpir_instr_create(gpir_block *block)
32 {
33    gpir_instr *instr = rzalloc(block, gpir_instr);
34    if (unlikely(!instr))
35       return NULL;
36 
37    block->comp->num_instr++;
38    if (block->comp->num_instr > 512) {
39       gpir_error("shader exceeds limit of 512 instructions\n");
40       return NULL;
41    }
42 
43    instr->index = block->sched.instr_index++;
44    instr->alu_num_slot_free = 6;
45    instr->alu_non_cplx_slot_free = 5;
46    instr->alu_max_allowed_next_max = 5;
47 
48    list_add(&instr->list, &block->instr_list);
49    return instr;
50 }
51 
gpir_instr_get_the_other_acc_node(gpir_instr * instr,int slot)52 static gpir_node *gpir_instr_get_the_other_acc_node(gpir_instr *instr, int slot)
53 {
54    if (slot == GPIR_INSTR_SLOT_ADD0)
55       return instr->slots[GPIR_INSTR_SLOT_ADD1];
56    else if (slot == GPIR_INSTR_SLOT_ADD1)
57       return instr->slots[GPIR_INSTR_SLOT_ADD0];
58 
59    return NULL;
60 }
61 
gpir_instr_check_acc_same_op(gpir_instr * instr,gpir_node * node,int slot)62 static bool gpir_instr_check_acc_same_op(gpir_instr *instr, gpir_node *node, int slot)
63 {
64    /* two ACC slots must share the same op code */
65    gpir_node *acc_node = gpir_instr_get_the_other_acc_node(instr, slot);
66 
67    /* spill move case may get acc_node == node */
68    if (acc_node && acc_node != node &&
69        !gpir_codegen_acc_same_op(node->op, acc_node->op))
70       return false;
71 
72    return true;
73 }
74 
gpir_instr_get_consume_slot(gpir_instr * instr,gpir_node * node)75 static int gpir_instr_get_consume_slot(gpir_instr *instr, gpir_node *node)
76 {
77    if (gpir_op_infos[node->op].may_consume_two_slots) {
78       gpir_node *acc_node = gpir_instr_get_the_other_acc_node(instr, node->sched.pos);
79       if (acc_node)
80          /* at this point node must have the same acc op with acc_node,
81           * so it just consumes the extra slot acc_node consumed */
82          return 0;
83       else
84          return 2;
85    }
86    else
87       return 1;
88 }
89 
gpir_instr_insert_alu_check(gpir_instr * instr,gpir_node * node)90 static bool gpir_instr_insert_alu_check(gpir_instr *instr, gpir_node *node)
91 {
92    if (!gpir_instr_check_acc_same_op(instr, node, node->sched.pos))
93       return false;
94 
95    if (node->sched.next_max_node && !node->sched.complex_allowed &&
96        node->sched.pos == GPIR_INSTR_SLOT_COMPLEX)
97       return false;
98 
99    int consume_slot = gpir_instr_get_consume_slot(instr, node);
100    int non_cplx_consume_slot =
101       node->sched.pos == GPIR_INSTR_SLOT_COMPLEX ? 0 : consume_slot;
102    int store_reduce_slot = 0;
103    int non_cplx_store_reduce_slot = 0;
104    int max_reduce_slot = node->sched.max_node ? 1 : 0;
105    int next_max_reduce_slot = node->sched.next_max_node ? 1 : 0;
106    int alu_new_max_allowed_next_max =
107       node->op == gpir_op_complex1 ? 4 : instr->alu_max_allowed_next_max;
108 
109    /* check if this node is child of one store node.
110     * complex1 won't be any of this instr's store node's child,
111     * because it has two instr latency before store can use it.
112     */
113    for (int i = GPIR_INSTR_SLOT_STORE0; i <= GPIR_INSTR_SLOT_STORE3; i++) {
114       gpir_store_node *s = gpir_node_to_store(instr->slots[i]);
115       if (s && s->child == node) {
116          store_reduce_slot = 1;
117          if (node->sched.next_max_node && !node->sched.complex_allowed)
118             non_cplx_store_reduce_slot = 1;
119          break;
120       }
121    }
122 
123    /* Check that the invariants will be maintained after we adjust everything
124     */
125 
126    int slot_difference =
127        instr->alu_num_slot_needed_by_store - store_reduce_slot +
128        instr->alu_num_slot_needed_by_max - max_reduce_slot +
129        MAX2(instr->alu_num_unscheduled_next_max - next_max_reduce_slot -
130             alu_new_max_allowed_next_max, 0) -
131       (instr->alu_num_slot_free - consume_slot);
132    if (slot_difference > 0) {
133       gpir_debug("failed %d because of alu slot\n", node->index);
134       instr->slot_difference = slot_difference;
135    }
136 
137    int non_cplx_slot_difference =
138        instr->alu_num_slot_needed_by_max - max_reduce_slot +
139        instr->alu_num_slot_needed_by_non_cplx_store - non_cplx_store_reduce_slot -
140        (instr->alu_non_cplx_slot_free - non_cplx_consume_slot);
141    if (non_cplx_slot_difference > 0) {
142       gpir_debug("failed %d because of alu slot\n", node->index);
143       instr->non_cplx_slot_difference = non_cplx_slot_difference;
144    }
145 
146    if (slot_difference > 0 || non_cplx_slot_difference > 0)
147       return false;
148 
149    instr->alu_num_slot_free -= consume_slot;
150    instr->alu_non_cplx_slot_free -= non_cplx_consume_slot;
151    instr->alu_num_slot_needed_by_store -= store_reduce_slot;
152    instr->alu_num_slot_needed_by_non_cplx_store -= non_cplx_store_reduce_slot;
153    instr->alu_num_slot_needed_by_max -= max_reduce_slot;
154    instr->alu_num_unscheduled_next_max -= next_max_reduce_slot;
155    instr->alu_max_allowed_next_max = alu_new_max_allowed_next_max;
156    return true;
157 }
158 
gpir_instr_remove_alu(gpir_instr * instr,gpir_node * node)159 static void gpir_instr_remove_alu(gpir_instr *instr, gpir_node *node)
160 {
161    int consume_slot = gpir_instr_get_consume_slot(instr, node);
162 
163    for (int i = GPIR_INSTR_SLOT_STORE0; i <= GPIR_INSTR_SLOT_STORE3; i++) {
164       gpir_store_node *s = gpir_node_to_store(instr->slots[i]);
165       if (s && s->child == node) {
166          instr->alu_num_slot_needed_by_store++;
167          if (node->sched.next_max_node && !node->sched.complex_allowed)
168             instr->alu_num_slot_needed_by_non_cplx_store++;
169          break;
170       }
171    }
172 
173    instr->alu_num_slot_free += consume_slot;
174    if (node->sched.pos != GPIR_INSTR_SLOT_COMPLEX)
175       instr->alu_non_cplx_slot_free += consume_slot;
176    if (node->sched.max_node)
177       instr->alu_num_slot_needed_by_max++;
178    if (node->sched.next_max_node)
179       instr->alu_num_unscheduled_next_max++;
180    if (node->op == gpir_op_complex1)
181       instr->alu_max_allowed_next_max = 5;
182 }
183 
gpir_instr_insert_reg0_check(gpir_instr * instr,gpir_node * node)184 static bool gpir_instr_insert_reg0_check(gpir_instr *instr, gpir_node *node)
185 {
186    gpir_load_node *load = gpir_node_to_load(node);
187    int i = node->sched.pos - GPIR_INSTR_SLOT_REG0_LOAD0;
188 
189    if (load->component != i)
190       return false;
191 
192    if (instr->reg0_is_attr && node->op != gpir_op_load_attribute)
193       return false;
194 
195    if (instr->reg0_use_count) {
196        if (instr->reg0_index != load->index)
197           return false;
198    }
199    else {
200       instr->reg0_is_attr = node->op == gpir_op_load_attribute;
201       instr->reg0_index = load->index;
202    }
203 
204    instr->reg0_use_count++;
205    return true;
206 }
207 
gpir_instr_remove_reg0(gpir_instr * instr,gpir_node * node)208 static void gpir_instr_remove_reg0(gpir_instr *instr, gpir_node *node)
209 {
210    instr->reg0_use_count--;
211    if (!instr->reg0_use_count)
212       instr->reg0_is_attr = false;
213 }
214 
gpir_instr_insert_reg1_check(gpir_instr * instr,gpir_node * node)215 static bool gpir_instr_insert_reg1_check(gpir_instr *instr, gpir_node *node)
216 {
217    gpir_load_node *load = gpir_node_to_load(node);
218    int i = node->sched.pos - GPIR_INSTR_SLOT_REG1_LOAD0;
219 
220    if (load->component != i)
221       return false;
222 
223    if (instr->reg1_use_count) {
224        if (instr->reg1_index != load->index)
225           return false;
226    }
227    else
228       instr->reg1_index = load->index;
229 
230    instr->reg1_use_count++;
231    return true;
232 }
233 
gpir_instr_remove_reg1(gpir_instr * instr,gpir_node * node)234 static void gpir_instr_remove_reg1(gpir_instr *instr, gpir_node *node)
235 {
236    instr->reg1_use_count--;
237 }
238 
gpir_instr_insert_mem_check(gpir_instr * instr,gpir_node * node)239 static bool gpir_instr_insert_mem_check(gpir_instr *instr, gpir_node *node)
240 {
241    gpir_load_node *load = gpir_node_to_load(node);
242    int i = node->sched.pos - GPIR_INSTR_SLOT_MEM_LOAD0;
243 
244    if (load->component != i)
245       return false;
246 
247    if (instr->mem_is_temp && node->op != gpir_op_load_temp)
248       return false;
249 
250    if (instr->mem_use_count) {
251        if (instr->mem_index != load->index)
252           return false;
253    }
254    else {
255       instr->mem_is_temp = node->op == gpir_op_load_temp;
256       instr->mem_index = load->index;
257    }
258 
259    instr->mem_use_count++;
260    return true;
261 }
262 
gpir_instr_remove_mem(gpir_instr * instr,gpir_node * node)263 static void gpir_instr_remove_mem(gpir_instr *instr, gpir_node *node)
264 {
265    instr->mem_use_count--;
266    if (!instr->mem_use_count)
267       instr->mem_is_temp = false;
268 }
269 
gpir_instr_insert_store_check(gpir_instr * instr,gpir_node * node)270 static bool gpir_instr_insert_store_check(gpir_instr *instr, gpir_node *node)
271 {
272    gpir_store_node *store = gpir_node_to_store(node);
273    int i = node->sched.pos - GPIR_INSTR_SLOT_STORE0;
274 
275    if (store->component != i)
276       return false;
277 
278    i >>= 1;
279    switch (instr->store_content[i]) {
280    case GPIR_INSTR_STORE_NONE:
281       /* store temp has only one address reg for two store unit */
282       if (node->op == gpir_op_store_temp &&
283           instr->store_content[!i] == GPIR_INSTR_STORE_TEMP &&
284           instr->store_index[!i] != store->index)
285          return false;
286       break;
287 
288    case GPIR_INSTR_STORE_VARYING:
289       if (node->op != gpir_op_store_varying ||
290           instr->store_index[i] != store->index)
291          return false;
292       break;
293 
294    case GPIR_INSTR_STORE_REG:
295       if (node->op != gpir_op_store_reg ||
296           instr->store_index[i] != store->index)
297          return false;
298       break;
299 
300    case GPIR_INSTR_STORE_TEMP:
301       if (node->op != gpir_op_store_temp ||
302           instr->store_index[i] != store->index)
303          return false;
304       break;
305    }
306 
307    /* check if any store node has the same child as this node */
308    for (int j = GPIR_INSTR_SLOT_STORE0; j <= GPIR_INSTR_SLOT_STORE3; j++) {
309       gpir_store_node *s = gpir_node_to_store(instr->slots[j]);
310       if (s && s->child == store->child)
311          goto out;
312    }
313 
314    /* check if the child is already in this instr's alu slot,
315     * this may happen when store an scheduled alu node to reg
316     */
317    for (int j = GPIR_INSTR_SLOT_ALU_BEGIN; j <= GPIR_INSTR_SLOT_ALU_END; j++) {
318       if (store->child == instr->slots[j])
319          goto out;
320    }
321 
322    /* Check the invariants documented in gpir.h, similar to the ALU case.
323     * When the only thing that changes is alu_num_slot_needed_by_store, we
324     * can get away with just checking the first one.
325     */
326    int slot_difference = instr->alu_num_slot_needed_by_store + 1
327       + instr->alu_num_slot_needed_by_max +
328       MAX2(instr->alu_num_unscheduled_next_max - instr->alu_max_allowed_next_max, 0) -
329       instr->alu_num_slot_free;
330    if (slot_difference > 0) {
331       instr->slot_difference = slot_difference;
332       return false;
333    }
334 
335    if (store->child->sched.next_max_node &&
336        !store->child->sched.complex_allowed) {
337       /* The child of the store is already partially ready, and has a use one
338        * cycle ago that disqualifies it (or a move replacing it) from being
339        * put in the complex slot. Therefore we have to check the non-complex
340        * invariant.
341        */
342       int non_cplx_slot_difference =
343           instr->alu_num_slot_needed_by_max +
344           instr->alu_num_slot_needed_by_non_cplx_store + 1 -
345           instr->alu_non_cplx_slot_free;
346       if (non_cplx_slot_difference > 0) {
347          instr->non_cplx_slot_difference = non_cplx_slot_difference;
348          return false;
349       }
350 
351       instr->alu_num_slot_needed_by_non_cplx_store++;
352    }
353 
354    instr->alu_num_slot_needed_by_store++;
355 
356 out:
357    if (instr->store_content[i] == GPIR_INSTR_STORE_NONE) {
358       if (node->op == gpir_op_store_varying)
359          instr->store_content[i] = GPIR_INSTR_STORE_VARYING;
360       else if (node->op == gpir_op_store_reg)
361          instr->store_content[i] = GPIR_INSTR_STORE_REG;
362       else
363          instr->store_content[i] = GPIR_INSTR_STORE_TEMP;
364 
365       instr->store_index[i] = store->index;
366    }
367    return true;
368 }
369 
gpir_instr_remove_store(gpir_instr * instr,gpir_node * node)370 static void gpir_instr_remove_store(gpir_instr *instr, gpir_node *node)
371 {
372    gpir_store_node *store = gpir_node_to_store(node);
373    int component = node->sched.pos - GPIR_INSTR_SLOT_STORE0;
374    int other_slot = GPIR_INSTR_SLOT_STORE0 + (component ^ 1);
375 
376    for (int j = GPIR_INSTR_SLOT_STORE0; j <= GPIR_INSTR_SLOT_STORE3; j++) {
377       if (j == node->sched.pos)
378          continue;
379 
380       gpir_store_node *s = gpir_node_to_store(instr->slots[j]);
381       if (s && s->child == store->child)
382          goto out;
383    }
384 
385    for (int j = GPIR_INSTR_SLOT_ALU_BEGIN; j <= GPIR_INSTR_SLOT_ALU_END; j++) {
386       if (store->child == instr->slots[j])
387          goto out;
388    }
389 
390    instr->alu_num_slot_needed_by_store--;
391 
392    if (store->child->sched.next_max_node &&
393        !store->child->sched.complex_allowed) {
394       instr->alu_num_slot_needed_by_non_cplx_store--;
395    }
396 
397 out:
398    if (!instr->slots[other_slot])
399       instr->store_content[component >> 1] = GPIR_INSTR_STORE_NONE;
400 }
401 
gpir_instr_spill_move(gpir_instr * instr,int slot,int spill_to_start)402 static bool gpir_instr_spill_move(gpir_instr *instr, int slot, int spill_to_start)
403 {
404    gpir_node *node = instr->slots[slot];
405    if (!node)
406       return true;
407 
408    if (node->op != gpir_op_mov)
409       return false;
410 
411    for (int i = spill_to_start; i <= GPIR_INSTR_SLOT_DIST_TWO_END; i++) {
412       if (i != slot && !instr->slots[i] &&
413           gpir_instr_check_acc_same_op(instr, node, i)) {
414          instr->slots[i] = node;
415          instr->slots[slot] = NULL;
416          node->sched.pos = i;
417 
418          gpir_debug("instr %d spill move %d from slot %d to %d\n",
419                     instr->index, node->index, slot, i);
420          return true;
421       }
422    }
423 
424    return false;
425 }
426 
gpir_instr_slot_free(gpir_instr * instr,gpir_node * node)427 static bool gpir_instr_slot_free(gpir_instr *instr, gpir_node *node)
428 {
429    if (node->op == gpir_op_mov ||
430        node->sched.pos > GPIR_INSTR_SLOT_DIST_TWO_END) {
431       if (instr->slots[node->sched.pos])
432          return false;
433    }
434    else {
435       /* for node needs dist two slot, if the slot has a move, we can
436        * spill it to other dist two slot without any side effect */
437       int spill_to_start = GPIR_INSTR_SLOT_MUL0;
438       if (node->op == gpir_op_complex1 || node->op == gpir_op_select)
439          spill_to_start = GPIR_INSTR_SLOT_ADD0;
440 
441       if (!gpir_instr_spill_move(instr, node->sched.pos, spill_to_start))
442          return false;
443 
444       if (node->op == gpir_op_complex1 || node->op == gpir_op_select) {
445          if (!gpir_instr_spill_move(instr, GPIR_INSTR_SLOT_MUL1, spill_to_start))
446             return false;
447       }
448    }
449 
450    return true;
451 }
452 
gpir_instr_try_insert_node(gpir_instr * instr,gpir_node * node)453 bool gpir_instr_try_insert_node(gpir_instr *instr, gpir_node *node)
454 {
455    instr->slot_difference = 0;
456    instr->non_cplx_slot_difference = 0;
457 
458    if (!gpir_instr_slot_free(instr, node))
459       return false;
460 
461    if (node->sched.pos >= GPIR_INSTR_SLOT_ALU_BEGIN &&
462        node->sched.pos <= GPIR_INSTR_SLOT_ALU_END) {
463       if (!gpir_instr_insert_alu_check(instr, node))
464          return false;
465    }
466    else if (node->sched.pos >= GPIR_INSTR_SLOT_REG0_LOAD0 &&
467             node->sched.pos <= GPIR_INSTR_SLOT_REG0_LOAD3) {
468       if (!gpir_instr_insert_reg0_check(instr, node))
469          return false;
470    }
471    else if (node->sched.pos >= GPIR_INSTR_SLOT_REG1_LOAD0 &&
472             node->sched.pos <= GPIR_INSTR_SLOT_REG1_LOAD3) {
473       if (!gpir_instr_insert_reg1_check(instr, node))
474          return false;
475    }
476    else if (node->sched.pos >= GPIR_INSTR_SLOT_MEM_LOAD0 &&
477             node->sched.pos <= GPIR_INSTR_SLOT_MEM_LOAD3) {
478       if (!gpir_instr_insert_mem_check(instr, node))
479          return false;
480    }
481    else if (node->sched.pos >= GPIR_INSTR_SLOT_STORE0 &&
482             node->sched.pos <= GPIR_INSTR_SLOT_STORE3) {
483       if (!gpir_instr_insert_store_check(instr, node))
484          return false;
485    }
486 
487    instr->slots[node->sched.pos] = node;
488 
489    if (node->op == gpir_op_complex1 || node->op == gpir_op_select)
490       instr->slots[GPIR_INSTR_SLOT_MUL1] = node;
491 
492    return true;
493 }
494 
gpir_instr_remove_node(gpir_instr * instr,gpir_node * node)495 void gpir_instr_remove_node(gpir_instr *instr, gpir_node *node)
496 {
497    assert(node->sched.pos >= 0);
498 
499    /* This can happen if we merge duplicate loads in the scheduler. */
500    if (instr->slots[node->sched.pos] != node) {
501       node->sched.pos = -1;
502       node->sched.instr = NULL;
503       return;
504    }
505 
506    if (node->sched.pos >= GPIR_INSTR_SLOT_ALU_BEGIN &&
507        node->sched.pos <= GPIR_INSTR_SLOT_ALU_END)
508       gpir_instr_remove_alu(instr, node);
509    else if (node->sched.pos >= GPIR_INSTR_SLOT_REG0_LOAD0 &&
510             node->sched.pos <= GPIR_INSTR_SLOT_REG0_LOAD3)
511       gpir_instr_remove_reg0(instr, node);
512    else if (node->sched.pos >= GPIR_INSTR_SLOT_REG1_LOAD0 &&
513             node->sched.pos <= GPIR_INSTR_SLOT_REG1_LOAD3)
514       gpir_instr_remove_reg1(instr, node);
515    else if (node->sched.pos >= GPIR_INSTR_SLOT_MEM_LOAD0 &&
516             node->sched.pos <= GPIR_INSTR_SLOT_MEM_LOAD3)
517       gpir_instr_remove_mem(instr, node);
518    else if (node->sched.pos >= GPIR_INSTR_SLOT_STORE0 &&
519             node->sched.pos <= GPIR_INSTR_SLOT_STORE3)
520       gpir_instr_remove_store(instr, node);
521 
522    instr->slots[node->sched.pos] = NULL;
523 
524    if (node->op == gpir_op_complex1 || node->op == gpir_op_select)
525       instr->slots[GPIR_INSTR_SLOT_MUL1] = NULL;
526 
527    node->sched.pos = -1;
528    node->sched.instr = NULL;
529 }
530 
gpir_instr_print_prog(gpir_compiler * comp)531 void gpir_instr_print_prog(gpir_compiler *comp)
532 {
533    struct {
534       int len;
535       char *name;
536    } fields[] = {
537       [GPIR_INSTR_SLOT_MUL0] = { 4, "mul0" },
538       [GPIR_INSTR_SLOT_MUL1] = { 4, "mul1" },
539       [GPIR_INSTR_SLOT_ADD0] = { 4, "add0" },
540       [GPIR_INSTR_SLOT_ADD1] = { 4, "add1" },
541       [GPIR_INSTR_SLOT_REG0_LOAD3] = { 15, "load0" },
542       [GPIR_INSTR_SLOT_REG1_LOAD3] = { 15, "load1" },
543       [GPIR_INSTR_SLOT_MEM_LOAD3] = { 15, "load2" },
544       [GPIR_INSTR_SLOT_STORE3] = { 15, "store" },
545       [GPIR_INSTR_SLOT_COMPLEX] = { 4, "cmpl" },
546       [GPIR_INSTR_SLOT_PASS] = { 4, "pass" },
547    };
548 
549    printf("========prog instr========\n");
550    printf("     ");
551    for (int i = 0; i < GPIR_INSTR_SLOT_NUM; i++) {
552       if (fields[i].len)
553          printf("%-*s ", fields[i].len, fields[i].name);
554    }
555    printf("\n");
556 
557    int index = 0;
558    list_for_each_entry(gpir_block, block, &comp->block_list, list) {
559       list_for_each_entry(gpir_instr, instr, &block->instr_list, list) {
560          printf("%03d: ", index++);
561 
562          char buff[16] = "null";
563          int start = 0;
564          for (int j = 0; j < GPIR_INSTR_SLOT_NUM; j++) {
565             gpir_node *node = instr->slots[j];
566             if (fields[j].len) {
567                if (node)
568                   snprintf(buff + start, sizeof(buff) - start, "%d", node->index);
569                printf("%-*s ", fields[j].len, buff);
570 
571                strcpy(buff, "null");
572                start = 0;
573             }
574             else {
575                if (node)
576                   start += snprintf(buff + start, sizeof(buff) - start, "%d", node->index);
577                start += snprintf(buff + start, sizeof(buff) - start, "|");
578             }
579          }
580          printf("\n");
581       }
582       printf("-----------------------\n");
583    }
584    printf("==========================\n");
585 }
586