1/* 2 * H.26L/H.264/AVC/JVT/14496-10/... direct mb/block decoding 3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at> 4 * 5 * This file is part of Libav. 6 * 7 * Libav is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * Libav is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with Libav; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21 22/** 23 * @file 24 * H.264 / AVC / MPEG4 part10 direct mb/block decoding. 25 * @author Michael Niedermayer <michaelni@gmx.at> 26 */ 27 28#include "internal.h" 29#include "dsputil.h" 30#include "avcodec.h" 31#include "mpegvideo.h" 32#include "h264.h" 33#include "rectangle.h" 34#include "thread.h" 35 36//#undef NDEBUG 37#include <assert.h> 38 39 40static int get_scale_factor(H264Context * const h, int poc, int poc1, int i){ 41 int poc0 = h->ref_list[0][i].poc; 42 int td = av_clip(poc1 - poc0, -128, 127); 43 if(td == 0 || h->ref_list[0][i].long_ref){ 44 return 256; 45 }else{ 46 int tb = av_clip(poc - poc0, -128, 127); 47 int tx = (16384 + (FFABS(td) >> 1)) / td; 48 return av_clip((tb*tx + 32) >> 6, -1024, 1023); 49 } 50} 51 52void ff_h264_direct_dist_scale_factor(H264Context * const h){ 53 MpegEncContext * const s = &h->s; 54 const int poc = h->s.current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ]; 55 const int poc1 = h->ref_list[1][0].poc; 56 int i, field; 57 for(field=0; field<2; field++){ 58 const int poc = h->s.current_picture_ptr->field_poc[field]; 59 const int poc1 = h->ref_list[1][0].field_poc[field]; 60 for(i=0; i < 2*h->ref_count[0]; i++) 61 h->dist_scale_factor_field[field][i^field] = get_scale_factor(h, poc, poc1, i+16); 62 } 63 64 for(i=0; i<h->ref_count[0]; i++){ 65 h->dist_scale_factor[i] = get_scale_factor(h, poc, poc1, i); 66 } 67} 68 69static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, int colfield, int mbafi){ 70 MpegEncContext * const s = &h->s; 71 Picture * const ref1 = &h->ref_list[1][0]; 72 int j, old_ref, rfield; 73 int start= mbafi ? 16 : 0; 74 int end = mbafi ? 16+2*h->ref_count[0] : h->ref_count[0]; 75 int interl= mbafi || s->picture_structure != PICT_FRAME; 76 77 /* bogus; fills in for missing frames */ 78 memset(map[list], 0, sizeof(map[list])); 79 80 for(rfield=0; rfield<2; rfield++){ 81 for(old_ref=0; old_ref<ref1->ref_count[colfield][list]; old_ref++){ 82 int poc = ref1->ref_poc[colfield][list][old_ref]; 83 84 if (!interl) 85 poc |= 3; 86 else if( interl && (poc&3) == 3) //FIXME store all MBAFF references so this isnt needed 87 poc= (poc&~3) + rfield + 1; 88 89 for(j=start; j<end; j++){ 90 if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) { 91 int cur_ref= mbafi ? (j-16)^field : j; 92 map[list][2*old_ref + (rfield^field) + 16] = cur_ref; 93 if(rfield == field || !interl) 94 map[list][old_ref] = cur_ref; 95 break; 96 } 97 } 98 } 99 } 100} 101 102void ff_h264_direct_ref_list_init(H264Context * const h){ 103 MpegEncContext * const s = &h->s; 104 Picture * const ref1 = &h->ref_list[1][0]; 105 Picture * const cur = s->current_picture_ptr; 106 int list, j, field; 107 int sidx= (s->picture_structure&1)^1; 108 int ref1sidx = (ref1->f.reference&1)^1; 109 110 for(list=0; list<2; list++){ 111 cur->ref_count[sidx][list] = h->ref_count[list]; 112 for(j=0; j<h->ref_count[list]; j++) 113 cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3); 114 } 115 116 if(s->picture_structure == PICT_FRAME){ 117 memcpy(cur->ref_count[1], cur->ref_count[0], sizeof(cur->ref_count[0])); 118 memcpy(cur->ref_poc [1], cur->ref_poc [0], sizeof(cur->ref_poc [0])); 119 } 120 121 cur->mbaff= FRAME_MBAFF; 122 123 h->col_fieldoff= 0; 124 if(s->picture_structure == PICT_FRAME){ 125 int cur_poc = s->current_picture_ptr->poc; 126 int *col_poc = h->ref_list[1]->field_poc; 127 h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc)); 128 ref1sidx=sidx= h->col_parity; 129 } else if (!(s->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity 130 h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3; 131 } 132 133 if (cur->f.pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred) 134 return; 135 136 for(list=0; list<2; list++){ 137 fill_colmap(h, h->map_col_to_list0, list, sidx, ref1sidx, 0); 138 if(FRAME_MBAFF) 139 for(field=0; field<2; field++) 140 fill_colmap(h, h->map_col_to_list0_field[field], list, field, field, 1); 141 } 142} 143 144static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y) 145{ 146 int ref_field = ref->f.reference - 1; 147 int ref_field_picture = ref->field_picture; 148 int ref_height = 16*h->s.mb_height >> ref_field_picture; 149 150 if(!HAVE_THREADS || !(h->s.avctx->active_thread_type&FF_THREAD_FRAME)) 151 return; 152 153 //FIXME it can be safe to access mb stuff 154 //even if pixels aren't deblocked yet 155 156 ff_thread_await_progress((AVFrame*)ref, FFMIN(16*mb_y >> ref_field_picture, ref_height-1), 157 ref_field_picture && ref_field); 158} 159 160static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){ 161 MpegEncContext * const s = &h->s; 162 int b8_stride = 2; 163 int b4_stride = h->b_stride; 164 int mb_xy = h->mb_xy, mb_y = s->mb_y; 165 int mb_type_col[2]; 166 const int16_t (*l1mv0)[2], (*l1mv1)[2]; 167 const int8_t *l1ref0, *l1ref1; 168 const int is_b8x8 = IS_8X8(*mb_type); 169 unsigned int sub_mb_type= MB_TYPE_L0L1; 170 int i8, i4; 171 int ref[2]; 172 int mv[2]; 173 int list; 174 175 assert(h->ref_list[1][0].f.reference & 3); 176 177 await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type)); 178 179#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM) 180 181 182 /* ref = min(neighbors) */ 183 for(list=0; list<2; list++){ 184 int left_ref = h->ref_cache[list][scan8[0] - 1]; 185 int top_ref = h->ref_cache[list][scan8[0] - 8]; 186 int refc = h->ref_cache[list][scan8[0] - 8 + 4]; 187 const int16_t *C= h->mv_cache[list][ scan8[0] - 8 + 4]; 188 if(refc == PART_NOT_AVAILABLE){ 189 refc = h->ref_cache[list][scan8[0] - 8 - 1]; 190 C = h-> mv_cache[list][scan8[0] - 8 - 1]; 191 } 192 ref[list] = FFMIN3((unsigned)left_ref, (unsigned)top_ref, (unsigned)refc); 193 if(ref[list] >= 0){ 194 //this is just pred_motion() but with the cases removed that cannot happen for direct blocks 195 const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ]; 196 const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ]; 197 198 int match_count= (left_ref==ref[list]) + (top_ref==ref[list]) + (refc==ref[list]); 199 if(match_count > 1){ //most common 200 mv[list]= pack16to32(mid_pred(A[0], B[0], C[0]), 201 mid_pred(A[1], B[1], C[1]) ); 202 }else { 203 assert(match_count==1); 204 if(left_ref==ref[list]){ 205 mv[list]= AV_RN32A(A); 206 }else if(top_ref==ref[list]){ 207 mv[list]= AV_RN32A(B); 208 }else{ 209 mv[list]= AV_RN32A(C); 210 } 211 } 212 }else{ 213 int mask= ~(MB_TYPE_L0 << (2*list)); 214 mv[list] = 0; 215 ref[list] = -1; 216 if(!is_b8x8) 217 *mb_type &= mask; 218 sub_mb_type &= mask; 219 } 220 } 221 if(ref[0] < 0 && ref[1] < 0){ 222 ref[0] = ref[1] = 0; 223 if(!is_b8x8) 224 *mb_type |= MB_TYPE_L0L1; 225 sub_mb_type |= MB_TYPE_L0L1; 226 } 227 228 if(!(is_b8x8|mv[0]|mv[1])){ 229 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1); 230 fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1); 231 fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4); 232 fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4); 233 *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2; 234 return; 235 } 236 237 if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL 238 if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL 239 mb_y = (s->mb_y&~1) + h->col_parity; 240 mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride; 241 b8_stride = 0; 242 }else{ 243 mb_y += h->col_fieldoff; 244 mb_xy += s->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity 245 } 246 goto single_col; 247 }else{ // AFL/AFR/FR/FL -> AFR/FR 248 if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR 249 mb_y = s->mb_y&~1; 250 mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride; 251 mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; 252 mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride]; 253 b8_stride = 2+4*s->mb_stride; 254 b4_stride *= 6; 255 if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { 256 mb_type_col[0] &= ~MB_TYPE_INTERLACED; 257 mb_type_col[1] &= ~MB_TYPE_INTERLACED; 258 } 259 260 sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ 261 if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) 262 && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) 263 && !is_b8x8){ 264 *mb_type |= MB_TYPE_16x8 |MB_TYPE_DIRECT2; /* B_16x8 */ 265 }else{ 266 *mb_type |= MB_TYPE_8x8; 267 } 268 }else{ // AFR/FR -> AFR/FR 269single_col: 270 mb_type_col[0] = 271 mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy]; 272 273 sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ 274 if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){ 275 *mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_16x16 */ 276 }else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){ 277 *mb_type |= MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16)); 278 }else{ 279 if(!h->sps.direct_8x8_inference_flag){ 280 /* FIXME save sub mb types from previous frames (or derive from MVs) 281 * so we know exactly what block size to use */ 282 sub_mb_type += (MB_TYPE_8x8-MB_TYPE_16x16); /* B_SUB_4x4 */ 283 } 284 *mb_type |= MB_TYPE_8x8; 285 } 286 } 287 } 288 289 await_reference_mb_row(h, &h->ref_list[1][0], mb_y); 290 291 l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]]; 292 l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]]; 293 l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; 294 l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; 295 if(!b8_stride){ 296 if(s->mb_y&1){ 297 l1ref0 += 2; 298 l1ref1 += 2; 299 l1mv0 += 2*b4_stride; 300 l1mv1 += 2*b4_stride; 301 } 302 } 303 304 305 if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){ 306 int n=0; 307 for(i8=0; i8<4; i8++){ 308 int x8 = i8&1; 309 int y8 = i8>>1; 310 int xy8 = x8+y8*b8_stride; 311 int xy4 = 3*x8+y8*b4_stride; 312 int a,b; 313 314 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8])) 315 continue; 316 h->sub_mb_type[i8] = sub_mb_type; 317 318 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1); 319 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1); 320 if(!IS_INTRA(mb_type_col[y8]) && !h->ref_list[1][0].long_ref 321 && ( (l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1) 322 || (l1ref0[xy8] < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))){ 323 a=b=0; 324 if(ref[0] > 0) 325 a= mv[0]; 326 if(ref[1] > 0) 327 b= mv[1]; 328 n++; 329 }else{ 330 a= mv[0]; 331 b= mv[1]; 332 } 333 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, a, 4); 334 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, b, 4); 335 } 336 if(!is_b8x8 && !(n&3)) 337 *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2; 338 }else if(IS_16X16(*mb_type)){ 339 int a,b; 340 341 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1); 342 fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1); 343 if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref 344 && ( (l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1) 345 || (l1ref0[0] < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1 346 && h->x264_build>33U))){ 347 a=b=0; 348 if(ref[0] > 0) 349 a= mv[0]; 350 if(ref[1] > 0) 351 b= mv[1]; 352 }else{ 353 a= mv[0]; 354 b= mv[1]; 355 } 356 fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4); 357 fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4); 358 }else{ 359 int n=0; 360 for(i8=0; i8<4; i8++){ 361 const int x8 = i8&1; 362 const int y8 = i8>>1; 363 364 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8])) 365 continue; 366 h->sub_mb_type[i8] = sub_mb_type; 367 368 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, mv[0], 4); 369 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, mv[1], 4); 370 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1); 371 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1); 372 373 assert(b8_stride==2); 374 /* col_zero_flag */ 375 if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref && ( l1ref0[i8] == 0 376 || (l1ref0[i8] < 0 && l1ref1[i8] == 0 377 && h->x264_build>33U))){ 378 const int16_t (*l1mv)[2]= l1ref0[i8] == 0 ? l1mv0 : l1mv1; 379 if(IS_SUB_8X8(sub_mb_type)){ 380 const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride]; 381 if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){ 382 if(ref[0] == 0) 383 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4); 384 if(ref[1] == 0) 385 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4); 386 n+=4; 387 } 388 }else{ 389 int m=0; 390 for(i4=0; i4<4; i4++){ 391 const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride]; 392 if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){ 393 if(ref[0] == 0) 394 AV_ZERO32(h->mv_cache[0][scan8[i8*4+i4]]); 395 if(ref[1] == 0) 396 AV_ZERO32(h->mv_cache[1][scan8[i8*4+i4]]); 397 m++; 398 } 399 } 400 if(!(m&3)) 401 h->sub_mb_type[i8]+= MB_TYPE_16x16 - MB_TYPE_8x8; 402 n+=m; 403 } 404 } 405 } 406 if(!is_b8x8 && !(n&15)) 407 *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2; 408 } 409} 410 411static void pred_temp_direct_motion(H264Context * const h, int *mb_type){ 412 MpegEncContext * const s = &h->s; 413 int b8_stride = 2; 414 int b4_stride = h->b_stride; 415 int mb_xy = h->mb_xy, mb_y = s->mb_y; 416 int mb_type_col[2]; 417 const int16_t (*l1mv0)[2], (*l1mv1)[2]; 418 const int8_t *l1ref0, *l1ref1; 419 const int is_b8x8 = IS_8X8(*mb_type); 420 unsigned int sub_mb_type; 421 int i8, i4; 422 423 assert(h->ref_list[1][0].f.reference & 3); 424 425 await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type)); 426 427 if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL 428 if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL 429 mb_y = (s->mb_y&~1) + h->col_parity; 430 mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride; 431 b8_stride = 0; 432 }else{ 433 mb_y += h->col_fieldoff; 434 mb_xy += s->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity 435 } 436 goto single_col; 437 }else{ // AFL/AFR/FR/FL -> AFR/FR 438 if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR 439 mb_y = s->mb_y&~1; 440 mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride; 441 mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy]; 442 mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride]; 443 b8_stride = 2+4*s->mb_stride; 444 b4_stride *= 6; 445 if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) { 446 mb_type_col[0] &= ~MB_TYPE_INTERLACED; 447 mb_type_col[1] &= ~MB_TYPE_INTERLACED; 448 } 449 450 sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ 451 452 if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) 453 && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) 454 && !is_b8x8){ 455 *mb_type |= MB_TYPE_16x8 |MB_TYPE_L0L1|MB_TYPE_DIRECT2; /* B_16x8 */ 456 }else{ 457 *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1; 458 } 459 }else{ // AFR/FR -> AFR/FR 460single_col: 461 mb_type_col[0] = 462 mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy]; 463 464 sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */ 465 if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){ 466 *mb_type |= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */ 467 }else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){ 468 *mb_type |= MB_TYPE_L0L1|MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16)); 469 }else{ 470 if(!h->sps.direct_8x8_inference_flag){ 471 /* FIXME save sub mb types from previous frames (or derive from MVs) 472 * so we know exactly what block size to use */ 473 sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */ 474 } 475 *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1; 476 } 477 } 478 } 479 480 await_reference_mb_row(h, &h->ref_list[1][0], mb_y); 481 482 l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]]; 483 l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]]; 484 l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy]; 485 l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy]; 486 if(!b8_stride){ 487 if(s->mb_y&1){ 488 l1ref0 += 2; 489 l1ref1 += 2; 490 l1mv0 += 2*b4_stride; 491 l1mv1 += 2*b4_stride; 492 } 493 } 494 495 { 496 const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]}; 497 const int *dist_scale_factor = h->dist_scale_factor; 498 int ref_offset; 499 500 if(FRAME_MBAFF && IS_INTERLACED(*mb_type)){ 501 map_col_to_list0[0] = h->map_col_to_list0_field[s->mb_y&1][0]; 502 map_col_to_list0[1] = h->map_col_to_list0_field[s->mb_y&1][1]; 503 dist_scale_factor =h->dist_scale_factor_field[s->mb_y&1]; 504 } 505 ref_offset = (h->ref_list[1][0].mbaff<<4) & (mb_type_col[0]>>3); //if(h->ref_list[1][0].mbaff && IS_INTERLACED(mb_type_col[0])) ref_offset=16 else 0 506 507 if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){ 508 int y_shift = 2*!IS_INTERLACED(*mb_type); 509 assert(h->sps.direct_8x8_inference_flag); 510 511 for(i8=0; i8<4; i8++){ 512 const int x8 = i8&1; 513 const int y8 = i8>>1; 514 int ref0, scale; 515 const int16_t (*l1mv)[2]= l1mv0; 516 517 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8])) 518 continue; 519 h->sub_mb_type[i8] = sub_mb_type; 520 521 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1); 522 if(IS_INTRA(mb_type_col[y8])){ 523 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1); 524 fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4); 525 fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4); 526 continue; 527 } 528 529 ref0 = l1ref0[x8 + y8*b8_stride]; 530 if(ref0 >= 0) 531 ref0 = map_col_to_list0[0][ref0 + ref_offset]; 532 else{ 533 ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride] + ref_offset]; 534 l1mv= l1mv1; 535 } 536 scale = dist_scale_factor[ref0]; 537 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1); 538 539 { 540 const int16_t *mv_col = l1mv[x8*3 + y8*b4_stride]; 541 int my_col = (mv_col[1]<<y_shift)/2; 542 int mx = (scale * mv_col[0] + 128) >> 8; 543 int my = (scale * my_col + 128) >> 8; 544 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4); 545 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4); 546 } 547 } 548 return; 549 } 550 551 /* one-to-one mv scaling */ 552 553 if(IS_16X16(*mb_type)){ 554 int ref, mv0, mv1; 555 556 fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1); 557 if(IS_INTRA(mb_type_col[0])){ 558 ref=mv0=mv1=0; 559 }else{ 560 const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset] 561 : map_col_to_list0[1][l1ref1[0] + ref_offset]; 562 const int scale = dist_scale_factor[ref0]; 563 const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0]; 564 int mv_l0[2]; 565 mv_l0[0] = (scale * mv_col[0] + 128) >> 8; 566 mv_l0[1] = (scale * mv_col[1] + 128) >> 8; 567 ref= ref0; 568 mv0= pack16to32(mv_l0[0],mv_l0[1]); 569 mv1= pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]); 570 } 571 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1); 572 fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4); 573 fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4); 574 }else{ 575 for(i8=0; i8<4; i8++){ 576 const int x8 = i8&1; 577 const int y8 = i8>>1; 578 int ref0, scale; 579 const int16_t (*l1mv)[2]= l1mv0; 580 581 if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8])) 582 continue; 583 h->sub_mb_type[i8] = sub_mb_type; 584 fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1); 585 if(IS_INTRA(mb_type_col[0])){ 586 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1); 587 fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4); 588 fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4); 589 continue; 590 } 591 592 assert(b8_stride == 2); 593 ref0 = l1ref0[i8]; 594 if(ref0 >= 0) 595 ref0 = map_col_to_list0[0][ref0 + ref_offset]; 596 else{ 597 ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset]; 598 l1mv= l1mv1; 599 } 600 scale = dist_scale_factor[ref0]; 601 602 fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1); 603 if(IS_SUB_8X8(sub_mb_type)){ 604 const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride]; 605 int mx = (scale * mv_col[0] + 128) >> 8; 606 int my = (scale * mv_col[1] + 128) >> 8; 607 fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4); 608 fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4); 609 }else 610 for(i4=0; i4<4; i4++){ 611 const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride]; 612 int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]]; 613 mv_l0[0] = (scale * mv_col[0] + 128) >> 8; 614 mv_l0[1] = (scale * mv_col[1] + 128) >> 8; 615 AV_WN32A(h->mv_cache[1][scan8[i8*4+i4]], 616 pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1])); 617 } 618 } 619 } 620 } 621} 622 623void ff_h264_pred_direct_motion(H264Context * const h, int *mb_type){ 624 if(h->direct_spatial_mv_pred){ 625 pred_spatial_direct_motion(h, mb_type); 626 }else{ 627 pred_temp_direct_motion(h, mb_type); 628 } 629} 630