Lines Matching refs:ib
356 volatile u32 *ib = p->ib.ptr; in r600_cs_track_validate_cb() local
466 ib[track->cb_color_size_idx[i]] = tmp; in r600_cs_track_validate_cb()
525 volatile u32 *ib = p->ib.ptr; in r600_cs_track_validate_db() local
563 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); in r600_cs_track_validate_db()
832 volatile uint32_t *ib; in r600_cs_common_vline_parse() local
834 ib = p->ib.ptr; in r600_cs_common_vline_parse()
897 ib[h_idx + 2] = PACKET2(0); in r600_cs_common_vline_parse()
898 ib[h_idx + 3] = PACKET2(0); in r600_cs_common_vline_parse()
899 ib[h_idx + 4] = PACKET2(0); in r600_cs_common_vline_parse()
900 ib[h_idx + 5] = PACKET2(0); in r600_cs_common_vline_parse()
901 ib[h_idx + 6] = PACKET2(0); in r600_cs_common_vline_parse()
902 ib[h_idx + 7] = PACKET2(0); in r600_cs_common_vline_parse()
903 ib[h_idx + 8] = PACKET2(0); in r600_cs_common_vline_parse()
907 ib[h_idx] = header; in r600_cs_common_vline_parse()
908 ib[h_idx + 4] = vline_status[crtc_id] >> 2; in r600_cs_common_vline_parse()
970 u32 m, i, tmp, *ib; in r600_cs_check_reg() local
981 ib = p->ib.ptr; in r600_cs_check_reg()
1020 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg()
1039 ib[idx] &= C_028010_ARRAY_MODE; in r600_cs_check_reg()
1042 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); in r600_cs_check_reg()
1045 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); in r600_cs_check_reg()
1082 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg()
1103 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg()
1141 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); in r600_cs_check_reg()
1144 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); in r600_cs_check_reg()
1203 ib[idx] = track->cb_color_base_last[tmp]; in r600_cs_check_reg()
1211 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; in r600_cs_check_reg()
1212 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg()
1234 ib[idx] = track->cb_color_base_last[tmp]; in r600_cs_check_reg()
1242 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; in r600_cs_check_reg()
1243 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg()
1279 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg()
1280 track->cb_color_base_last[tmp] = ib[idx]; in r600_cs_check_reg()
1293 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg()
1306 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg()
1313 ib[idx] |= 3; in r600_cs_check_reg()
1375 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg()
1384 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_cs_check_reg()
1631 volatile u32 *ib; in r600_packet3_check() local
1639 ib = p->ib.ptr; in r600_packet3_check()
1677 ib[idx + 0] = offset; in r600_packet3_check()
1678 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); in r600_packet3_check()
1718 ib[idx+0] = offset; in r600_packet3_check()
1719 ib[idx+1] = upper_32_bits(offset) & 0xff; in r600_packet3_check()
1770 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); in r600_packet3_check()
1771 ib[idx+2] = upper_32_bits(offset) & 0xff; in r600_packet3_check()
1814 ib[idx] = offset; in r600_packet3_check()
1815 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); in r600_packet3_check()
1844 ib[idx+2] = offset; in r600_packet3_check()
1845 ib[idx+3] = upper_32_bits(offset) & 0xff; in r600_packet3_check()
1862 ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_packet3_check()
1882 ib[idx+1] = offset & 0xfffffff8; in r600_packet3_check()
1883 ib[idx+2] = upper_32_bits(offset) & 0xff; in r600_packet3_check()
1904 ib[idx+1] = offset & 0xfffffffc; in r600_packet3_check()
1905 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); in r600_packet3_check()
1968 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); in r600_packet3_check()
1970 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); in r600_packet3_check()
1988 ib[idx+1+(i*7)+2] += base_offset; in r600_packet3_check()
1989 ib[idx+1+(i*7)+3] += mip_offset; in r600_packet3_check()
2006 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; in r600_packet3_check()
2010 ib[idx+1+(i*8)+0] = offset64; in r600_packet3_check()
2011 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | in r600_packet3_check()
2119 ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); in r600_packet3_check()
2153 ib[idx+1] = offset; in r600_packet3_check()
2154 ib[idx+2] = upper_32_bits(offset) & 0xff; in r600_packet3_check()
2172 ib[idx+3] = offset; in r600_packet3_check()
2173 ib[idx+4] = upper_32_bits(offset) & 0xff; in r600_packet3_check()
2201 ib[idx+0] = offset; in r600_packet3_check()
2202 ib[idx+1] = upper_32_bits(offset) & 0xff; in r600_packet3_check()
2226 ib[idx+1] = offset; in r600_packet3_check()
2227 ib[idx+2] = upper_32_bits(offset) & 0xff; in r600_packet3_check()
2250 ib[idx+3] = offset; in r600_packet3_check()
2251 ib[idx+4] = upper_32_bits(offset) & 0xff; in r600_packet3_check()
2321 for (r = 0; r < p->ib.length_dw; r++) { in r600_cs_parse()
2322 pr_info("%05d 0x%08X\n", r, p->ib.ptr[r]); in r600_cs_parse()
2381 volatile u32 *ib = p->ib.ptr; in r600_dma_cs_parse() local
2409 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); in r600_dma_cs_parse()
2415 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); in r600_dma_cs_parse()
2416 ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; in r600_dma_cs_parse()
2443 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); in r600_dma_cs_parse()
2447 ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); in r600_dma_cs_parse()
2448 ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; in r600_dma_cs_parse()
2453 ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc); in r600_dma_cs_parse()
2454 ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff; in r600_dma_cs_parse()
2458 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); in r600_dma_cs_parse()
2468 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); in r600_dma_cs_parse()
2469 ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); in r600_dma_cs_parse()
2470 ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff; in r600_dma_cs_parse()
2471 ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff; in r600_dma_cs_parse()
2479 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); in r600_dma_cs_parse()
2480 ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc); in r600_dma_cs_parse()
2481 ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff; in r600_dma_cs_parse()
2482 ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16; in r600_dma_cs_parse()
2514 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); in r600_dma_cs_parse()
2515 ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000; in r600_dma_cs_parse()
2527 for (r = 0; r < p->ib->length_dw; r++) { in r600_dma_cs_parse()
2528 pr_info("%05d 0x%08X\n", r, p->ib.ptr[r]); in r600_dma_cs_parse()