• Home
  • Raw
  • Download

Lines Matching refs:cr_block_size

1411     const size_t cr_block_size = min(c - cr_block_start, cr);  in xnn_pack_f32_dwconv_ghw_w()  local
1413 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_dwconv_ghw_w()
1417 size_t n = cr_block_size; in xnn_pack_f32_dwconv_ghw_w()
1422 packed_w += cr - cr_block_size; in xnn_pack_f32_dwconv_ghw_w()
1425 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_dwconv_ghw_w()
1429 packed_w += cr - cr_block_size; in xnn_pack_f32_dwconv_ghw_w()
1432 packed_w += (primary_tile - (h * w)) * cr_block_size; in xnn_pack_f32_dwconv_ghw_w()
1450 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_f16_dwconv_ghw_w() local
1452 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f16_dwconv_ghw_w()
1456 size_t n = cr_block_size; in xnn_pack_f16_dwconv_ghw_w()
1461 packed_w += cr - cr_block_size; in xnn_pack_f16_dwconv_ghw_w()
1464 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f16_dwconv_ghw_w()
1468 packed_w += cr - cr_block_size; in xnn_pack_f16_dwconv_ghw_w()
1471 packed_w += (primary_tile - (h * w)) * cr_block_size; in xnn_pack_f16_dwconv_ghw_w()
1489 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_f32_to_f16_dwconv_ghw_w() local
1491 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_to_f16_dwconv_ghw_w()
1495 size_t n = cr_block_size; in xnn_pack_f32_to_f16_dwconv_ghw_w()
1500 packed_w += cr - cr_block_size; in xnn_pack_f32_to_f16_dwconv_ghw_w()
1503 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_to_f16_dwconv_ghw_w()
1507 packed_w += cr - cr_block_size; in xnn_pack_f32_to_f16_dwconv_ghw_w()
1510 packed_w += (primary_tile - (h * w)) * cr_block_size; in xnn_pack_f32_to_f16_dwconv_ghw_w()
1530 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_qu8_dwconv_ghw_w() local
1533 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_qu8_dwconv_ghw_w()
1538 size_t n = cr_block_size; in xnn_pack_qu8_dwconv_ghw_w()
1544 packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(int32_t)); in xnn_pack_qu8_dwconv_ghw_w()
1547 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_qu8_dwconv_ghw_w()
1553 packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(uint8_t)); in xnn_pack_qu8_dwconv_ghw_w()
1556 …packed_w = (void*) ((uintptr_t) packed_w + (primary_tile - (h * w)) * cr_block_size * sizeof(uint8… in xnn_pack_qu8_dwconv_ghw_w()
1575 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_qs8_dwconv_ghw_w() local
1578 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_qs8_dwconv_ghw_w()
1583 size_t n = cr_block_size; in xnn_pack_qs8_dwconv_ghw_w()
1589 packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(int32_t)); in xnn_pack_qs8_dwconv_ghw_w()
1592 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_qs8_dwconv_ghw_w()
1598 packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(int8_t)); in xnn_pack_qs8_dwconv_ghw_w()
1601 …packed_w = (void*) ((uintptr_t) packed_w + (primary_tile - (h * w)) * cr_block_size * sizeof(int8_… in xnn_pack_qs8_dwconv_ghw_w()
1619 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_f32_dwconv_hwg_w() local
1621 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_dwconv_hwg_w()
1625 size_t n = cr_block_size; in xnn_pack_f32_dwconv_hwg_w()
1630 packed_w += cr - cr_block_size; in xnn_pack_f32_dwconv_hwg_w()
1633 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_dwconv_hwg_w()
1637 packed_w += cr - cr_block_size; in xnn_pack_f32_dwconv_hwg_w()
1640 packed_w += (primary_tile - (h * w)) * cr_block_size; in xnn_pack_f32_dwconv_hwg_w()
1658 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_f16_dwconv_hwg_w() local
1660 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f16_dwconv_hwg_w()
1664 size_t n = cr_block_size; in xnn_pack_f16_dwconv_hwg_w()
1669 packed_w += cr - cr_block_size; in xnn_pack_f16_dwconv_hwg_w()
1672 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f16_dwconv_hwg_w()
1676 packed_w += cr - cr_block_size; in xnn_pack_f16_dwconv_hwg_w()
1679 packed_w += (primary_tile - (h * w)) * cr_block_size; in xnn_pack_f16_dwconv_hwg_w()
1697 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_f32_to_f16_dwconv_hwg_w() local
1699 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_to_f16_dwconv_hwg_w()
1703 size_t n = cr_block_size; in xnn_pack_f32_to_f16_dwconv_hwg_w()
1708 packed_w += cr - cr_block_size; in xnn_pack_f32_to_f16_dwconv_hwg_w()
1711 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_to_f16_dwconv_hwg_w()
1715 packed_w += cr - cr_block_size; in xnn_pack_f32_to_f16_dwconv_hwg_w()
1718 packed_w += (primary_tile - (h * w)) * cr_block_size; in xnn_pack_f32_to_f16_dwconv_hwg_w()
1738 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_qu8_dwconv_hwg_w() local
1741 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_qu8_dwconv_hwg_w()
1746 size_t n = cr_block_size; in xnn_pack_qu8_dwconv_hwg_w()
1752 packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(int32_t)); in xnn_pack_qu8_dwconv_hwg_w()
1755 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_qu8_dwconv_hwg_w()
1761 packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(uint8_t)); in xnn_pack_qu8_dwconv_hwg_w()
1764 …packed_w = (void*) ((uintptr_t) packed_w + (primary_tile - (h * w)) * cr_block_size * sizeof(uint8… in xnn_pack_qu8_dwconv_hwg_w()
1783 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_qs8_dwconv_hwg_w() local
1786 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_qs8_dwconv_hwg_w()
1791 size_t n = cr_block_size; in xnn_pack_qs8_dwconv_hwg_w()
1797 packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(int32_t)); in xnn_pack_qs8_dwconv_hwg_w()
1800 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_qs8_dwconv_hwg_w()
1806 packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(int8_t)); in xnn_pack_qs8_dwconv_hwg_w()
1809 …packed_w = (void*) ((uintptr_t) packed_w + (primary_tile - (h * w)) * cr_block_size * sizeof(int8_… in xnn_pack_qs8_dwconv_hwg_w()
2030 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_f32_vmulcaddc_w() local
2031 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_vmulcaddc_w()
2034 packed_w += cr - cr_block_size; in xnn_pack_f32_vmulcaddc_w()
2036 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_vmulcaddc_w()
2040 size_t n = cr_block_size; in xnn_pack_f32_vmulcaddc_w()
2045 packed_w += cr - cr_block_size; in xnn_pack_f32_vmulcaddc_w()
2058 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_f16_vmulcaddc_w() local
2059 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f16_vmulcaddc_w()
2062 packed_w += cr - cr_block_size; in xnn_pack_f16_vmulcaddc_w()
2064 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f16_vmulcaddc_w()
2068 size_t n = cr_block_size; in xnn_pack_f16_vmulcaddc_w()
2073 packed_w += cr - cr_block_size; in xnn_pack_f16_vmulcaddc_w()
2086 const size_t cr_block_size = min(c - cr_block_start, cr); in xnn_pack_f32_to_f16_vmulcaddc_w() local
2087 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_to_f16_vmulcaddc_w()
2090 packed_w += cr - cr_block_size; in xnn_pack_f32_to_f16_vmulcaddc_w()
2092 for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) { in xnn_pack_f32_to_f16_vmulcaddc_w()
2096 size_t n = cr_block_size; in xnn_pack_f32_to_f16_vmulcaddc_w()
2101 packed_w += cr - cr_block_size; in xnn_pack_f32_to_f16_vmulcaddc_w()