1/*
2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7 *
8 * This file is part of FFmpeg.
9 *
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25/**
26 * @file
27 * The simplest mpeg encoder (well, it was the simplest!).
28 */
29
30#include "libavutil/attributes.h"
31#include "libavutil/avassert.h"
32#include "libavutil/imgutils.h"
33#include "libavutil/internal.h"
34#include "libavutil/timer.h"
35#include "avcodec.h"
36#include "blockdsp.h"
37#include "h264chroma.h"
38#include "idctdsp.h"
39#include "internal.h"
40#include "mathops.h"
41#include "mpegutils.h"
42#include "mpegvideo.h"
43#include "mjpegenc.h"
44#include "msmpeg4.h"
45#include "qpeldsp.h"
46#include "thread.h"
47#include <limits.h>
48
49static const uint8_t ff_default_chroma_qscale_table[32] = {
50//   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
51     0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
52    16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
53};
54
55const uint8_t ff_mpeg1_dc_scale_table[128] = {
56//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
57    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65};
66
67static const uint8_t mpeg2_dc_scale_table1[128] = {
68//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
69    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77};
78
79static const uint8_t mpeg2_dc_scale_table2[128] = {
80//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
81    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89};
90
91static const uint8_t mpeg2_dc_scale_table3[128] = {
92//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
93    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101};
102
103const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104    ff_mpeg1_dc_scale_table,
105    mpeg2_dc_scale_table1,
106    mpeg2_dc_scale_table2,
107    mpeg2_dc_scale_table3,
108};
109
110const uint8_t ff_alternate_horizontal_scan[64] = {
111     0,  1,  2,  3,  8,  9, 16, 17,
112    10, 11,  4,  5,  6,  7, 15, 14,
113    13, 12, 19, 18, 24, 25, 32, 33,
114    26, 27, 20, 21, 22, 23, 28, 29,
115    30, 31, 34, 35, 40, 41, 48, 49,
116    42, 43, 36, 37, 38, 39, 44, 45,
117    46, 47, 50, 51, 56, 57, 58, 59,
118    52, 53, 54, 55, 60, 61, 62, 63,
119};
120
121const uint8_t ff_alternate_vertical_scan[64] = {
122     0,  8, 16, 24,  1,  9,  2, 10,
123    17, 25, 32, 40, 48, 56, 57, 49,
124    41, 33, 26, 18,  3, 11,  4, 12,
125    19, 27, 34, 42, 50, 58, 35, 43,
126    51, 59, 20, 28,  5, 13,  6, 14,
127    21, 29, 36, 44, 52, 60, 37, 45,
128    53, 61, 22, 30,  7, 15, 23, 31,
129    38, 46, 54, 62, 39, 47, 55, 63,
130};
131
132static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133                                   int16_t *block, int n, int qscale)
134{
135    int i, level, nCoeffs;
136    const uint16_t *quant_matrix;
137
138    nCoeffs= s->block_last_index[n];
139
140    block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
141    /* XXX: only mpeg1 */
142    quant_matrix = s->intra_matrix;
143    for(i=1;i<=nCoeffs;i++) {
144        int j= s->intra_scantable.permutated[i];
145        level = block[j];
146        if (level) {
147            if (level < 0) {
148                level = -level;
149                level = (int)(level * qscale * quant_matrix[j]) >> 3;
150                level = (level - 1) | 1;
151                level = -level;
152            } else {
153                level = (int)(level * qscale * quant_matrix[j]) >> 3;
154                level = (level - 1) | 1;
155            }
156            block[j] = level;
157        }
158    }
159}
160
161static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
162                                   int16_t *block, int n, int qscale)
163{
164    int i, level, nCoeffs;
165    const uint16_t *quant_matrix;
166
167    nCoeffs= s->block_last_index[n];
168
169    quant_matrix = s->inter_matrix;
170    for(i=0; i<=nCoeffs; i++) {
171        int j= s->intra_scantable.permutated[i];
172        level = block[j];
173        if (level) {
174            if (level < 0) {
175                level = -level;
176                level = (((level << 1) + 1) * qscale *
177                         ((int) (quant_matrix[j]))) >> 4;
178                level = (level - 1) | 1;
179                level = -level;
180            } else {
181                level = (((level << 1) + 1) * qscale *
182                         ((int) (quant_matrix[j]))) >> 4;
183                level = (level - 1) | 1;
184            }
185            block[j] = level;
186        }
187    }
188}
189
190static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
191                                   int16_t *block, int n, int qscale)
192{
193    int i, level, nCoeffs;
194    const uint16_t *quant_matrix;
195
196    if(s->alternate_scan) nCoeffs= 63;
197    else nCoeffs= s->block_last_index[n];
198
199    block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
200    quant_matrix = s->intra_matrix;
201    for(i=1;i<=nCoeffs;i++) {
202        int j= s->intra_scantable.permutated[i];
203        level = block[j];
204        if (level) {
205            if (level < 0) {
206                level = -level;
207                level = (int)(level * qscale * quant_matrix[j]) >> 3;
208                level = -level;
209            } else {
210                level = (int)(level * qscale * quant_matrix[j]) >> 3;
211            }
212            block[j] = level;
213        }
214    }
215}
216
217static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
218                                   int16_t *block, int n, int qscale)
219{
220    int i, level, nCoeffs;
221    const uint16_t *quant_matrix;
222    int sum=-1;
223
224    if(s->alternate_scan) nCoeffs= 63;
225    else nCoeffs= s->block_last_index[n];
226
227    block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
228    sum += block[0];
229    quant_matrix = s->intra_matrix;
230    for(i=1;i<=nCoeffs;i++) {
231        int j= s->intra_scantable.permutated[i];
232        level = block[j];
233        if (level) {
234            if (level < 0) {
235                level = -level;
236                level = (int)(level * qscale * quant_matrix[j]) >> 3;
237                level = -level;
238            } else {
239                level = (int)(level * qscale * quant_matrix[j]) >> 3;
240            }
241            block[j] = level;
242            sum+=level;
243        }
244    }
245    block[63]^=sum&1;
246}
247
248static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
249                                   int16_t *block, int n, int qscale)
250{
251    int i, level, nCoeffs;
252    const uint16_t *quant_matrix;
253    int sum=-1;
254
255    if(s->alternate_scan) nCoeffs= 63;
256    else nCoeffs= s->block_last_index[n];
257
258    quant_matrix = s->inter_matrix;
259    for(i=0; i<=nCoeffs; i++) {
260        int j= s->intra_scantable.permutated[i];
261        level = block[j];
262        if (level) {
263            if (level < 0) {
264                level = -level;
265                level = (((level << 1) + 1) * qscale *
266                         ((int) (quant_matrix[j]))) >> 4;
267                level = -level;
268            } else {
269                level = (((level << 1) + 1) * qscale *
270                         ((int) (quant_matrix[j]))) >> 4;
271            }
272            block[j] = level;
273            sum+=level;
274        }
275    }
276    block[63]^=sum&1;
277}
278
279static void dct_unquantize_h263_intra_c(MpegEncContext *s,
280                                  int16_t *block, int n, int qscale)
281{
282    int i, level, qmul, qadd;
283    int nCoeffs;
284
285    av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
286
287    qmul = qscale << 1;
288
289    if (!s->h263_aic) {
290        block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
291        qadd = (qscale - 1) | 1;
292    }else{
293        qadd = 0;
294    }
295    if(s->ac_pred)
296        nCoeffs=63;
297    else
298        nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
299
300    for(i=1; i<=nCoeffs; i++) {
301        level = block[i];
302        if (level) {
303            if (level < 0) {
304                level = level * qmul - qadd;
305            } else {
306                level = level * qmul + qadd;
307            }
308            block[i] = level;
309        }
310    }
311}
312
313static void dct_unquantize_h263_inter_c(MpegEncContext *s,
314                                  int16_t *block, int n, int qscale)
315{
316    int i, level, qmul, qadd;
317    int nCoeffs;
318
319    av_assert2(s->block_last_index[n]>=0);
320
321    qadd = (qscale - 1) | 1;
322    qmul = qscale << 1;
323
324    nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
325
326    for(i=0; i<=nCoeffs; i++) {
327        level = block[i];
328        if (level) {
329            if (level < 0) {
330                level = level * qmul - qadd;
331            } else {
332                level = level * qmul + qadd;
333            }
334            block[i] = level;
335        }
336    }
337}
338
339static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
340                              int (*mv)[2][4][2],
341                              int mb_x, int mb_y, int mb_intra, int mb_skipped)
342{
343    MpegEncContext *s = opaque;
344
345    s->mv_dir     = mv_dir;
346    s->mv_type    = mv_type;
347    s->mb_intra   = mb_intra;
348    s->mb_skipped = mb_skipped;
349    s->mb_x       = mb_x;
350    s->mb_y       = mb_y;
351    memcpy(s->mv, mv, sizeof(*mv));
352
353    ff_init_block_index(s);
354    ff_update_block_index(s);
355
356    s->bdsp.clear_blocks(s->block[0]);
357
358    s->dest[0] = s->current_picture.f->data[0] + (s->mb_y *  16                       * s->linesize)   + s->mb_x *  16;
359    s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
360    s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361
362    if (ref)
363        av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
364    ff_MPV_decode_mb(s, s->block);
365}
366
367static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
368{
369    while(h--)
370        memset(dst + h*linesize, 128, 16);
371}
372
373static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
374{
375    while(h--)
376        memset(dst + h*linesize, 128, 8);
377}
378
379/* init common dct for both encoder and decoder */
380av_cold int ff_dct_common_init(MpegEncContext *s)
381{
382    ff_blockdsp_init(&s->bdsp, s->avctx);
383    ff_dsputil_init(&s->dsp, s->avctx);
384    ff_h264chroma_init(&s->h264chroma, 8); //for lowres
385    ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
386    ff_idctdsp_init(&s->idsp, s->avctx);
387    ff_mpegvideodsp_init(&s->mdsp);
388    ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
389
390    if (s->avctx->debug & FF_DEBUG_NOMC) {
391        int i;
392        for (i=0; i<4; i++) {
393            s->hdsp.avg_pixels_tab[0][i] = gray16;
394            s->hdsp.put_pixels_tab[0][i] = gray16;
395            s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
396
397            s->hdsp.avg_pixels_tab[1][i] = gray8;
398            s->hdsp.put_pixels_tab[1][i] = gray8;
399            s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
400        }
401    }
402
403    s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
404    s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
405    s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
406    s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
407    s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
408    if (s->flags & CODEC_FLAG_BITEXACT)
409        s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
410    s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
411
412    if (ARCH_ALPHA)
413        ff_MPV_common_init_axp(s);
414    if (ARCH_ARM)
415        ff_MPV_common_init_arm(s);
416    if (ARCH_PPC)
417        ff_MPV_common_init_ppc(s);
418    if (ARCH_X86)
419        ff_MPV_common_init_x86(s);
420
421    /* load & permutate scantables
422     * note: only wmv uses different ones
423     */
424    if (s->alternate_scan) {
425        ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
426        ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
427    } else {
428        ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
429        ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
430    }
431    ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
432    ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
433
434    return 0;
435}
436
437static int frame_size_alloc(MpegEncContext *s, int linesize)
438{
439    int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
440
441    if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
442        return 0;
443
444    if (linesize < 24) {
445        av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
446        return AVERROR_PATCHWELCOME;
447    }
448
449    // edge emu needs blocksize + filter length - 1
450    // (= 17x17 for  halfpel / 21x21 for  h264)
451    // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
452    // at uvlinesize. It supports only YUV420 so 24x24 is enough
453    // linesize * interlaced * MBsize
454    // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
455    FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 68,
456                      fail);
457
458    FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
459                      fail)
460    s->me.temp         = s->me.scratchpad;
461    s->rd_scratchpad   = s->me.scratchpad;
462    s->b_scratchpad    = s->me.scratchpad;
463    s->obmc_scratchpad = s->me.scratchpad + 16;
464
465    return 0;
466fail:
467    av_freep(&s->edge_emu_buffer);
468    return AVERROR(ENOMEM);
469}
470
471/**
472 * Allocate a frame buffer
473 */
474static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
475{
476    int edges_needed = av_codec_is_encoder(s->avctx->codec);
477    int r, ret;
478
479    pic->tf.f = pic->f;
480    if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
481        s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
482        s->codec_id != AV_CODEC_ID_MSS2) {
483        if (edges_needed) {
484            pic->f->width  = s->avctx->width  + 2 * EDGE_WIDTH;
485            pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
486        }
487
488        r = ff_thread_get_buffer(s->avctx, &pic->tf,
489                                 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
490    } else {
491        pic->f->width  = s->avctx->width;
492        pic->f->height = s->avctx->height;
493        pic->f->format = s->avctx->pix_fmt;
494        r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
495    }
496
497    if (r < 0 || !pic->f->buf[0]) {
498        av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
499               r, pic->f->data[0]);
500        return -1;
501    }
502
503    if (edges_needed) {
504        int i;
505        for (i = 0; pic->f->data[i]; i++) {
506            int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
507                         pic->f->linesize[i] +
508                         (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
509            pic->f->data[i] += offset;
510        }
511        pic->f->width  = s->avctx->width;
512        pic->f->height = s->avctx->height;
513    }
514
515    if (s->avctx->hwaccel) {
516        assert(!pic->hwaccel_picture_private);
517        if (s->avctx->hwaccel->frame_priv_data_size) {
518            pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
519            if (!pic->hwaccel_priv_buf) {
520                av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
521                return -1;
522            }
523            pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
524        }
525    }
526
527    if (s->linesize && (s->linesize   != pic->f->linesize[0] ||
528                        s->uvlinesize != pic->f->linesize[1])) {
529        av_log(s->avctx, AV_LOG_ERROR,
530               "get_buffer() failed (stride changed)\n");
531        ff_mpeg_unref_picture(s, pic);
532        return -1;
533    }
534
535    if (pic->f->linesize[1] != pic->f->linesize[2]) {
536        av_log(s->avctx, AV_LOG_ERROR,
537               "get_buffer() failed (uv stride mismatch)\n");
538        ff_mpeg_unref_picture(s, pic);
539        return -1;
540    }
541
542    if (!s->edge_emu_buffer &&
543        (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
544        av_log(s->avctx, AV_LOG_ERROR,
545               "get_buffer() failed to allocate context scratch buffers.\n");
546        ff_mpeg_unref_picture(s, pic);
547        return ret;
548    }
549
550    return 0;
551}
552
553void ff_free_picture_tables(Picture *pic)
554{
555    int i;
556
557    pic->alloc_mb_width  =
558    pic->alloc_mb_height = 0;
559
560    av_buffer_unref(&pic->mb_var_buf);
561    av_buffer_unref(&pic->mc_mb_var_buf);
562    av_buffer_unref(&pic->mb_mean_buf);
563    av_buffer_unref(&pic->mbskip_table_buf);
564    av_buffer_unref(&pic->qscale_table_buf);
565    av_buffer_unref(&pic->mb_type_buf);
566
567    for (i = 0; i < 2; i++) {
568        av_buffer_unref(&pic->motion_val_buf[i]);
569        av_buffer_unref(&pic->ref_index_buf[i]);
570    }
571}
572
573static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
574{
575    const int big_mb_num    = s->mb_stride * (s->mb_height + 1) + 1;
576    const int mb_array_size = s->mb_stride * s->mb_height;
577    const int b8_array_size = s->b8_stride * s->mb_height * 2;
578    int i;
579
580
581    pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
582    pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
583    pic->mb_type_buf      = av_buffer_allocz((big_mb_num + s->mb_stride) *
584                                             sizeof(uint32_t));
585    if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
586        return AVERROR(ENOMEM);
587
588    if (s->encoding) {
589        pic->mb_var_buf    = av_buffer_allocz(mb_array_size * sizeof(int16_t));
590        pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
591        pic->mb_mean_buf   = av_buffer_allocz(mb_array_size);
592        if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
593            return AVERROR(ENOMEM);
594    }
595
596    if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
597        int mv_size        = 2 * (b8_array_size + 4) * sizeof(int16_t);
598        int ref_index_size = 4 * mb_array_size;
599
600        for (i = 0; mv_size && i < 2; i++) {
601            pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
602            pic->ref_index_buf[i]  = av_buffer_allocz(ref_index_size);
603            if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
604                return AVERROR(ENOMEM);
605        }
606    }
607
608    pic->alloc_mb_width  = s->mb_width;
609    pic->alloc_mb_height = s->mb_height;
610
611    return 0;
612}
613
614static int make_tables_writable(Picture *pic)
615{
616    int ret, i;
617#define MAKE_WRITABLE(table) \
618do {\
619    if (pic->table &&\
620       (ret = av_buffer_make_writable(&pic->table)) < 0)\
621    return ret;\
622} while (0)
623
624    MAKE_WRITABLE(mb_var_buf);
625    MAKE_WRITABLE(mc_mb_var_buf);
626    MAKE_WRITABLE(mb_mean_buf);
627    MAKE_WRITABLE(mbskip_table_buf);
628    MAKE_WRITABLE(qscale_table_buf);
629    MAKE_WRITABLE(mb_type_buf);
630
631    for (i = 0; i < 2; i++) {
632        MAKE_WRITABLE(motion_val_buf[i]);
633        MAKE_WRITABLE(ref_index_buf[i]);
634    }
635
636    return 0;
637}
638
639/**
640 * Allocate a Picture.
641 * The pixels are allocated/set by calling get_buffer() if shared = 0
642 */
643int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
644{
645    int i, ret;
646
647    if (pic->qscale_table_buf)
648        if (   pic->alloc_mb_width  != s->mb_width
649            || pic->alloc_mb_height != s->mb_height)
650            ff_free_picture_tables(pic);
651
652    if (shared) {
653        av_assert0(pic->f->data[0]);
654        pic->shared = 1;
655    } else {
656        av_assert0(!pic->f->buf[0]);
657
658        if (alloc_frame_buffer(s, pic) < 0)
659            return -1;
660
661        s->linesize   = pic->f->linesize[0];
662        s->uvlinesize = pic->f->linesize[1];
663    }
664
665    if (!pic->qscale_table_buf)
666        ret = alloc_picture_tables(s, pic);
667    else
668        ret = make_tables_writable(pic);
669    if (ret < 0)
670        goto fail;
671
672    if (s->encoding) {
673        pic->mb_var    = (uint16_t*)pic->mb_var_buf->data;
674        pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
675        pic->mb_mean   = pic->mb_mean_buf->data;
676    }
677
678    pic->mbskip_table = pic->mbskip_table_buf->data;
679    pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
680    pic->mb_type      = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
681
682    if (pic->motion_val_buf[0]) {
683        for (i = 0; i < 2; i++) {
684            pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
685            pic->ref_index[i]  = pic->ref_index_buf[i]->data;
686        }
687    }
688
689    return 0;
690fail:
691    av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
692    ff_mpeg_unref_picture(s, pic);
693    ff_free_picture_tables(pic);
694    return AVERROR(ENOMEM);
695}
696
697/**
698 * Deallocate a picture.
699 */
700void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
701{
702    int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
703
704    pic->tf.f = pic->f;
705    /* WM Image / Screen codecs allocate internal buffers with different
706     * dimensions / colorspaces; ignore user-defined callbacks for these. */
707    if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
708        s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
709        s->codec_id != AV_CODEC_ID_MSS2)
710        ff_thread_release_buffer(s->avctx, &pic->tf);
711    else if (pic->f)
712        av_frame_unref(pic->f);
713
714    av_buffer_unref(&pic->hwaccel_priv_buf);
715
716    if (pic->needs_realloc)
717        ff_free_picture_tables(pic);
718
719    memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
720}
721
722static int update_picture_tables(Picture *dst, Picture *src)
723{
724     int i;
725
726#define UPDATE_TABLE(table)\
727do {\
728    if (src->table &&\
729        (!dst->table || dst->table->buffer != src->table->buffer)) {\
730        av_buffer_unref(&dst->table);\
731        dst->table = av_buffer_ref(src->table);\
732        if (!dst->table) {\
733            ff_free_picture_tables(dst);\
734            return AVERROR(ENOMEM);\
735        }\
736    }\
737} while (0)
738
739    UPDATE_TABLE(mb_var_buf);
740    UPDATE_TABLE(mc_mb_var_buf);
741    UPDATE_TABLE(mb_mean_buf);
742    UPDATE_TABLE(mbskip_table_buf);
743    UPDATE_TABLE(qscale_table_buf);
744    UPDATE_TABLE(mb_type_buf);
745    for (i = 0; i < 2; i++) {
746        UPDATE_TABLE(motion_val_buf[i]);
747        UPDATE_TABLE(ref_index_buf[i]);
748    }
749
750    dst->mb_var        = src->mb_var;
751    dst->mc_mb_var     = src->mc_mb_var;
752    dst->mb_mean       = src->mb_mean;
753    dst->mbskip_table  = src->mbskip_table;
754    dst->qscale_table  = src->qscale_table;
755    dst->mb_type       = src->mb_type;
756    for (i = 0; i < 2; i++) {
757        dst->motion_val[i] = src->motion_val[i];
758        dst->ref_index[i]  = src->ref_index[i];
759    }
760
761    dst->alloc_mb_width  = src->alloc_mb_width;
762    dst->alloc_mb_height = src->alloc_mb_height;
763
764    return 0;
765}
766
767int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
768{
769    int ret;
770
771    av_assert0(!dst->f->buf[0]);
772    av_assert0(src->f->buf[0]);
773
774    src->tf.f = src->f;
775    dst->tf.f = dst->f;
776    ret = ff_thread_ref_frame(&dst->tf, &src->tf);
777    if (ret < 0)
778        goto fail;
779
780    ret = update_picture_tables(dst, src);
781    if (ret < 0)
782        goto fail;
783
784    if (src->hwaccel_picture_private) {
785        dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
786        if (!dst->hwaccel_priv_buf)
787            goto fail;
788        dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
789    }
790
791    dst->field_picture           = src->field_picture;
792    dst->mb_var_sum              = src->mb_var_sum;
793    dst->mc_mb_var_sum           = src->mc_mb_var_sum;
794    dst->b_frame_score           = src->b_frame_score;
795    dst->needs_realloc           = src->needs_realloc;
796    dst->reference               = src->reference;
797    dst->shared                  = src->shared;
798
799    return 0;
800fail:
801    ff_mpeg_unref_picture(s, dst);
802    return ret;
803}
804
805static void exchange_uv(MpegEncContext *s)
806{
807    int16_t (*tmp)[64];
808
809    tmp           = s->pblocks[4];
810    s->pblocks[4] = s->pblocks[5];
811    s->pblocks[5] = tmp;
812}
813
814static int init_duplicate_context(MpegEncContext *s)
815{
816    int y_size = s->b8_stride * (2 * s->mb_height + 1);
817    int c_size = s->mb_stride * (s->mb_height + 1);
818    int yc_size = y_size + 2 * c_size;
819    int i;
820
821    if (s->mb_height & 1)
822        yc_size += 2*s->b8_stride + 2*s->mb_stride;
823
824    s->edge_emu_buffer =
825    s->me.scratchpad   =
826    s->me.temp         =
827    s->rd_scratchpad   =
828    s->b_scratchpad    =
829    s->obmc_scratchpad = NULL;
830
831    if (s->encoding) {
832        FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
833                          ME_MAP_SIZE * sizeof(uint32_t), fail)
834        FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
835                          ME_MAP_SIZE * sizeof(uint32_t), fail)
836        if (s->avctx->noise_reduction) {
837            FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
838                              2 * 64 * sizeof(int), fail)
839        }
840    }
841    FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
842    s->block = s->blocks[0];
843
844    for (i = 0; i < 12; i++) {
845        s->pblocks[i] = &s->block[i];
846    }
847    if (s->avctx->codec_tag == AV_RL32("VCR2"))
848        exchange_uv(s);
849
850    if (s->out_format == FMT_H263) {
851        /* ac values */
852        FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
853                          yc_size * sizeof(int16_t) * 16, fail);
854        s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
855        s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
856        s->ac_val[2] = s->ac_val[1] + c_size;
857    }
858
859    return 0;
860fail:
861    return -1; // free() through ff_MPV_common_end()
862}
863
864static void free_duplicate_context(MpegEncContext *s)
865{
866    if (s == NULL)
867        return;
868
869    av_freep(&s->edge_emu_buffer);
870    av_freep(&s->me.scratchpad);
871    s->me.temp =
872    s->rd_scratchpad =
873    s->b_scratchpad =
874    s->obmc_scratchpad = NULL;
875
876    av_freep(&s->dct_error_sum);
877    av_freep(&s->me.map);
878    av_freep(&s->me.score_map);
879    av_freep(&s->blocks);
880    av_freep(&s->ac_val_base);
881    s->block = NULL;
882}
883
884static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
885{
886#define COPY(a) bak->a = src->a
887    COPY(edge_emu_buffer);
888    COPY(me.scratchpad);
889    COPY(me.temp);
890    COPY(rd_scratchpad);
891    COPY(b_scratchpad);
892    COPY(obmc_scratchpad);
893    COPY(me.map);
894    COPY(me.score_map);
895    COPY(blocks);
896    COPY(block);
897    COPY(start_mb_y);
898    COPY(end_mb_y);
899    COPY(me.map_generation);
900    COPY(pb);
901    COPY(dct_error_sum);
902    COPY(dct_count[0]);
903    COPY(dct_count[1]);
904    COPY(ac_val_base);
905    COPY(ac_val[0]);
906    COPY(ac_val[1]);
907    COPY(ac_val[2]);
908#undef COPY
909}
910
911int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
912{
913    MpegEncContext bak;
914    int i, ret;
915    // FIXME copy only needed parts
916    // START_TIMER
917    backup_duplicate_context(&bak, dst);
918    memcpy(dst, src, sizeof(MpegEncContext));
919    backup_duplicate_context(dst, &bak);
920    for (i = 0; i < 12; i++) {
921        dst->pblocks[i] = &dst->block[i];
922    }
923    if (dst->avctx->codec_tag == AV_RL32("VCR2"))
924        exchange_uv(dst);
925    if (!dst->edge_emu_buffer &&
926        (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
927        av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
928               "scratch buffers.\n");
929        return ret;
930    }
931    // STOP_TIMER("update_duplicate_context")
932    // about 10k cycles / 0.01 sec for  1000frames on 1ghz with 2 threads
933    return 0;
934}
935
936int ff_mpeg_update_thread_context(AVCodecContext *dst,
937                                  const AVCodecContext *src)
938{
939    int i, ret;
940    MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
941
942    if (dst == src)
943        return 0;
944
945    av_assert0(s != s1);
946
947    // FIXME can parameters change on I-frames?
948    // in that case dst may need a reinit
949    if (!s->context_initialized) {
950        memcpy(s, s1, sizeof(MpegEncContext));
951
952        s->avctx                 = dst;
953        s->bitstream_buffer      = NULL;
954        s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
955
956        if (s1->context_initialized){
957//             s->picture_range_start  += MAX_PICTURE_COUNT;
958//             s->picture_range_end    += MAX_PICTURE_COUNT;
959            if((ret = ff_MPV_common_init(s)) < 0){
960                memset(s, 0, sizeof(MpegEncContext));
961                s->avctx = dst;
962                return ret;
963            }
964        }
965    }
966
967    if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
968        s->context_reinit = 0;
969        s->height = s1->height;
970        s->width  = s1->width;
971        if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
972            return ret;
973    }
974
975    s->avctx->coded_height  = s1->avctx->coded_height;
976    s->avctx->coded_width   = s1->avctx->coded_width;
977    s->avctx->width         = s1->avctx->width;
978    s->avctx->height        = s1->avctx->height;
979
980    s->coded_picture_number = s1->coded_picture_number;
981    s->picture_number       = s1->picture_number;
982
983    av_assert0(!s->picture || s->picture != s1->picture);
984    if(s->picture)
985    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
986        ff_mpeg_unref_picture(s, &s->picture[i]);
987        if (s1->picture[i].f->buf[0] &&
988            (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
989            return ret;
990    }
991
992#define UPDATE_PICTURE(pic)\
993do {\
994    ff_mpeg_unref_picture(s, &s->pic);\
995    if (s1->pic.f && s1->pic.f->buf[0])\
996        ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
997    else\
998        ret = update_picture_tables(&s->pic, &s1->pic);\
999    if (ret < 0)\
1000        return ret;\
1001} while (0)
1002
1003    UPDATE_PICTURE(current_picture);
1004    UPDATE_PICTURE(last_picture);
1005    UPDATE_PICTURE(next_picture);
1006
1007    s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
1008    s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1009    s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
1010
1011    // Error/bug resilience
1012    s->next_p_frame_damaged = s1->next_p_frame_damaged;
1013    s->workaround_bugs      = s1->workaround_bugs;
1014    s->padding_bug_score    = s1->padding_bug_score;
1015
1016    // MPEG4 timing info
1017    memcpy(&s->last_time_base, &s1->last_time_base,
1018           (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1019           (char *) &s1->last_time_base);
1020
1021    // B-frame info
1022    s->max_b_frames = s1->max_b_frames;
1023    s->low_delay    = s1->low_delay;
1024    s->droppable    = s1->droppable;
1025
1026    // DivX handling (doesn't work)
1027    s->divx_packed  = s1->divx_packed;
1028
1029    if (s1->bitstream_buffer) {
1030        if (s1->bitstream_buffer_size +
1031            FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1032            av_fast_malloc(&s->bitstream_buffer,
1033                           &s->allocated_bitstream_buffer_size,
1034                           s1->allocated_bitstream_buffer_size);
1035            s->bitstream_buffer_size = s1->bitstream_buffer_size;
1036        memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1037               s1->bitstream_buffer_size);
1038        memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1039               FF_INPUT_BUFFER_PADDING_SIZE);
1040    }
1041
1042    // linesize dependend scratch buffer allocation
1043    if (!s->edge_emu_buffer)
1044        if (s1->linesize) {
1045            if (frame_size_alloc(s, s1->linesize) < 0) {
1046                av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1047                       "scratch buffers.\n");
1048                return AVERROR(ENOMEM);
1049            }
1050        } else {
1051            av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1052                   "be allocated due to unknown size.\n");
1053        }
1054
1055    // MPEG2/interlacing info
1056    memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1057           (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1058
1059    if (!s1->first_field) {
1060        s->last_pict_type = s1->pict_type;
1061        if (s1->current_picture_ptr)
1062            s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1063    }
1064
1065    return 0;
1066}
1067
1068/**
1069 * Set the given MpegEncContext to common defaults
1070 * (same for encoding and decoding).
1071 * The changed fields will not depend upon the
1072 * prior state of the MpegEncContext.
1073 */
1074void ff_MPV_common_defaults(MpegEncContext *s)
1075{
1076    s->y_dc_scale_table      =
1077    s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
1078    s->chroma_qscale_table   = ff_default_chroma_qscale_table;
1079    s->progressive_frame     = 1;
1080    s->progressive_sequence  = 1;
1081    s->picture_structure     = PICT_FRAME;
1082
1083    s->coded_picture_number  = 0;
1084    s->picture_number        = 0;
1085
1086    s->f_code                = 1;
1087    s->b_code                = 1;
1088
1089    s->slice_context_count   = 1;
1090}
1091
1092/**
1093 * Set the given MpegEncContext to defaults for decoding.
1094 * the changed fields will not depend upon
1095 * the prior state of the MpegEncContext.
1096 */
1097void ff_MPV_decode_defaults(MpegEncContext *s)
1098{
1099    ff_MPV_common_defaults(s);
1100}
1101
1102static int init_er(MpegEncContext *s)
1103{
1104    ERContext *er = &s->er;
1105    int mb_array_size = s->mb_height * s->mb_stride;
1106    int i;
1107
1108    er->avctx       = s->avctx;
1109    er->dsp         = &s->dsp;
1110
1111    er->mb_index2xy = s->mb_index2xy;
1112    er->mb_num      = s->mb_num;
1113    er->mb_width    = s->mb_width;
1114    er->mb_height   = s->mb_height;
1115    er->mb_stride   = s->mb_stride;
1116    er->b8_stride   = s->b8_stride;
1117
1118    er->er_temp_buffer     = av_malloc(s->mb_height * s->mb_stride);
1119    er->error_status_table = av_mallocz(mb_array_size);
1120    if (!er->er_temp_buffer || !er->error_status_table)
1121        goto fail;
1122
1123    er->mbskip_table  = s->mbskip_table;
1124    er->mbintra_table = s->mbintra_table;
1125
1126    for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1127        er->dc_val[i] = s->dc_val[i];
1128
1129    er->decode_mb = mpeg_er_decode_mb;
1130    er->opaque    = s;
1131
1132    return 0;
1133fail:
1134    av_freep(&er->er_temp_buffer);
1135    av_freep(&er->error_status_table);
1136    return AVERROR(ENOMEM);
1137}
1138
1139/**
1140 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1141 */
1142static int init_context_frame(MpegEncContext *s)
1143{
1144    int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1145
1146    s->mb_width   = (s->width + 15) / 16;
1147    s->mb_stride  = s->mb_width + 1;
1148    s->b8_stride  = s->mb_width * 2 + 1;
1149    mb_array_size = s->mb_height * s->mb_stride;
1150    mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1151
1152    /* set default edge pos, will be overridden
1153     * in decode_header if needed */
1154    s->h_edge_pos = s->mb_width * 16;
1155    s->v_edge_pos = s->mb_height * 16;
1156
1157    s->mb_num     = s->mb_width * s->mb_height;
1158
1159    s->block_wrap[0] =
1160    s->block_wrap[1] =
1161    s->block_wrap[2] =
1162    s->block_wrap[3] = s->b8_stride;
1163    s->block_wrap[4] =
1164    s->block_wrap[5] = s->mb_stride;
1165
1166    y_size  = s->b8_stride * (2 * s->mb_height + 1);
1167    c_size  = s->mb_stride * (s->mb_height + 1);
1168    yc_size = y_size + 2   * c_size;
1169
1170    if (s->mb_height & 1)
1171        yc_size += 2*s->b8_stride + 2*s->mb_stride;
1172
1173    FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1174    for (y = 0; y < s->mb_height; y++)
1175        for (x = 0; x < s->mb_width; x++)
1176            s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1177
1178    s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1179
1180    if (s->encoding) {
1181        /* Allocate MV tables */
1182        FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,                 mv_table_size * 2 * sizeof(int16_t), fail)
1183        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,            mv_table_size * 2 * sizeof(int16_t), fail)
1184        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,            mv_table_size * 2 * sizeof(int16_t), fail)
1185        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,      mv_table_size * 2 * sizeof(int16_t), fail)
1186        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,      mv_table_size * 2 * sizeof(int16_t), fail)
1187        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,          mv_table_size * 2 * sizeof(int16_t), fail)
1188        s->p_mv_table            = s->p_mv_table_base + s->mb_stride + 1;
1189        s->b_forw_mv_table       = s->b_forw_mv_table_base + s->mb_stride + 1;
1190        s->b_back_mv_table       = s->b_back_mv_table_base + s->mb_stride + 1;
1191        s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1192        s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1193        s->b_direct_mv_table     = s->b_direct_mv_table_base + s->mb_stride + 1;
1194
1195        /* Allocate MB type table */
1196        FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1197
1198        FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1199
1200        FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1201                         mb_array_size * sizeof(float), fail);
1202        FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1203                         mb_array_size * sizeof(float), fail);
1204
1205    }
1206
1207    if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1208        (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1209        /* interlaced direct mode decoding tables */
1210        for (i = 0; i < 2; i++) {
1211            int j, k;
1212            for (j = 0; j < 2; j++) {
1213                for (k = 0; k < 2; k++) {
1214                    FF_ALLOCZ_OR_GOTO(s->avctx,
1215                                      s->b_field_mv_table_base[i][j][k],
1216                                      mv_table_size * 2 * sizeof(int16_t),
1217                                      fail);
1218                    s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1219                                                   s->mb_stride + 1;
1220                }
1221                FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1222                FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1223                s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1224            }
1225            FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1226        }
1227    }
1228    if (s->out_format == FMT_H263) {
1229        /* cbp values */
1230        FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1231        s->coded_block = s->coded_block_base + s->b8_stride + 1;
1232
1233        /* cbp, ac_pred, pred_dir */
1234        FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail);
1235        FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1236    }
1237
1238    if (s->h263_pred || s->h263_plus || !s->encoding) {
1239        /* dc values */
1240        // MN: we need these for  error resilience of intra-frames
1241        FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1242        s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1243        s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1244        s->dc_val[2] = s->dc_val[1] + c_size;
1245        for (i = 0; i < yc_size; i++)
1246            s->dc_val_base[i] = 1024;
1247    }
1248
1249    /* which mb is a intra block */
1250    FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1251    memset(s->mbintra_table, 1, mb_array_size);
1252
1253    /* init macroblock skip table */
1254    FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1255    // Note the + 1 is for  a quicker mpeg4 slice_end detection
1256
1257    return init_er(s);
1258fail:
1259    return AVERROR(ENOMEM);
1260}
1261
1262/**
1263 * init common structure for both encoder and decoder.
1264 * this assumes that some variables like width/height are already set
1265 */
1266av_cold int ff_MPV_common_init(MpegEncContext *s)
1267{
1268    int i;
1269    int nb_slices = (HAVE_THREADS &&
1270                     s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1271                    s->avctx->thread_count : 1;
1272
1273    if (s->encoding && s->avctx->slices)
1274        nb_slices = s->avctx->slices;
1275
1276    if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1277        s->mb_height = (s->height + 31) / 32 * 2;
1278    else
1279        s->mb_height = (s->height + 15) / 16;
1280
1281    if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1282        av_log(s->avctx, AV_LOG_ERROR,
1283               "decoding to AV_PIX_FMT_NONE is not supported.\n");
1284        return -1;
1285    }
1286
1287    if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1288        int max_slices;
1289        if (s->mb_height)
1290            max_slices = FFMIN(MAX_THREADS, s->mb_height);
1291        else
1292            max_slices = MAX_THREADS;
1293        av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1294               " reducing to %d\n", nb_slices, max_slices);
1295        nb_slices = max_slices;
1296    }
1297
1298    if ((s->width || s->height) &&
1299        av_image_check_size(s->width, s->height, 0, s->avctx))
1300        return -1;
1301
1302    ff_dct_common_init(s);
1303
1304    s->flags  = s->avctx->flags;
1305    s->flags2 = s->avctx->flags2;
1306
1307    /* set chroma shifts */
1308    avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1309                                  &s->chroma_x_shift,
1310                                  &s->chroma_y_shift);
1311
1312    /* convert fourcc to upper case */
1313    s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
1314
1315    s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
1316
1317    FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1318                      MAX_PICTURE_COUNT * sizeof(Picture), fail);
1319    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1320        s->picture[i].f = av_frame_alloc();
1321        if (!s->picture[i].f)
1322            goto fail;
1323    }
1324    memset(&s->next_picture, 0, sizeof(s->next_picture));
1325    memset(&s->last_picture, 0, sizeof(s->last_picture));
1326    memset(&s->current_picture, 0, sizeof(s->current_picture));
1327    memset(&s->new_picture, 0, sizeof(s->new_picture));
1328    s->next_picture.f = av_frame_alloc();
1329    if (!s->next_picture.f)
1330        goto fail;
1331    s->last_picture.f = av_frame_alloc();
1332    if (!s->last_picture.f)
1333        goto fail;
1334    s->current_picture.f = av_frame_alloc();
1335    if (!s->current_picture.f)
1336        goto fail;
1337    s->new_picture.f = av_frame_alloc();
1338    if (!s->new_picture.f)
1339        goto fail;
1340
1341        if (init_context_frame(s))
1342            goto fail;
1343
1344        s->parse_context.state = -1;
1345
1346        s->context_initialized = 1;
1347        s->thread_context[0]   = s;
1348
1349//     if (s->width && s->height) {
1350        if (nb_slices > 1) {
1351            for (i = 1; i < nb_slices; i++) {
1352                s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1353                memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1354            }
1355
1356            for (i = 0; i < nb_slices; i++) {
1357                if (init_duplicate_context(s->thread_context[i]) < 0)
1358                    goto fail;
1359                    s->thread_context[i]->start_mb_y =
1360                        (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1361                    s->thread_context[i]->end_mb_y   =
1362                        (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1363            }
1364        } else {
1365            if (init_duplicate_context(s) < 0)
1366                goto fail;
1367            s->start_mb_y = 0;
1368            s->end_mb_y   = s->mb_height;
1369        }
1370        s->slice_context_count = nb_slices;
1371//     }
1372
1373    return 0;
1374 fail:
1375    ff_MPV_common_end(s);
1376    return -1;
1377}
1378
1379/**
1380 * Frees and resets MpegEncContext fields depending on the resolution.
1381 * Is used during resolution changes to avoid a full reinitialization of the
1382 * codec.
1383 */
1384static int free_context_frame(MpegEncContext *s)
1385{
1386    int i, j, k;
1387
1388    av_freep(&s->mb_type);
1389    av_freep(&s->p_mv_table_base);
1390    av_freep(&s->b_forw_mv_table_base);
1391    av_freep(&s->b_back_mv_table_base);
1392    av_freep(&s->b_bidir_forw_mv_table_base);
1393    av_freep(&s->b_bidir_back_mv_table_base);
1394    av_freep(&s->b_direct_mv_table_base);
1395    s->p_mv_table            = NULL;
1396    s->b_forw_mv_table       = NULL;
1397    s->b_back_mv_table       = NULL;
1398    s->b_bidir_forw_mv_table = NULL;
1399    s->b_bidir_back_mv_table = NULL;
1400    s->b_direct_mv_table     = NULL;
1401    for (i = 0; i < 2; i++) {
1402        for (j = 0; j < 2; j++) {
1403            for (k = 0; k < 2; k++) {
1404                av_freep(&s->b_field_mv_table_base[i][j][k]);
1405                s->b_field_mv_table[i][j][k] = NULL;
1406            }
1407            av_freep(&s->b_field_select_table[i][j]);
1408            av_freep(&s->p_field_mv_table_base[i][j]);
1409            s->p_field_mv_table[i][j] = NULL;
1410        }
1411        av_freep(&s->p_field_select_table[i]);
1412    }
1413
1414    av_freep(&s->dc_val_base);
1415    av_freep(&s->coded_block_base);
1416    av_freep(&s->mbintra_table);
1417    av_freep(&s->cbp_table);
1418    av_freep(&s->pred_dir_table);
1419
1420    av_freep(&s->mbskip_table);
1421
1422    av_freep(&s->er.error_status_table);
1423    av_freep(&s->er.er_temp_buffer);
1424    av_freep(&s->mb_index2xy);
1425    av_freep(&s->lambda_table);
1426
1427    av_freep(&s->cplx_tab);
1428    av_freep(&s->bits_tab);
1429
1430    s->linesize = s->uvlinesize = 0;
1431
1432    return 0;
1433}
1434
1435int ff_MPV_common_frame_size_change(MpegEncContext *s)
1436{
1437    int i, err = 0;
1438
1439    if (!s->context_initialized)
1440        return AVERROR(EINVAL);
1441
1442    if (s->slice_context_count > 1) {
1443        for (i = 0; i < s->slice_context_count; i++) {
1444            free_duplicate_context(s->thread_context[i]);
1445        }
1446        for (i = 1; i < s->slice_context_count; i++) {
1447            av_freep(&s->thread_context[i]);
1448        }
1449    } else
1450        free_duplicate_context(s);
1451
1452    if ((err = free_context_frame(s)) < 0)
1453        return err;
1454
1455    if (s->picture)
1456        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1457                s->picture[i].needs_realloc = 1;
1458        }
1459
1460    s->last_picture_ptr         =
1461    s->next_picture_ptr         =
1462    s->current_picture_ptr      = NULL;
1463
1464    // init
1465    if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1466        s->mb_height = (s->height + 31) / 32 * 2;
1467    else
1468        s->mb_height = (s->height + 15) / 16;
1469
1470    if ((s->width || s->height) &&
1471        (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1472        goto fail;
1473
1474    if ((err = init_context_frame(s)))
1475        goto fail;
1476
1477    s->thread_context[0]   = s;
1478
1479    if (s->width && s->height) {
1480        int nb_slices = s->slice_context_count;
1481        if (nb_slices > 1) {
1482            for (i = 1; i < nb_slices; i++) {
1483                s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1484                memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1485            }
1486
1487            for (i = 0; i < nb_slices; i++) {
1488                if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1489                    goto fail;
1490                    s->thread_context[i]->start_mb_y =
1491                        (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1492                    s->thread_context[i]->end_mb_y   =
1493                        (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1494            }
1495        } else {
1496            err = init_duplicate_context(s);
1497            if (err < 0)
1498                goto fail;
1499            s->start_mb_y = 0;
1500            s->end_mb_y   = s->mb_height;
1501        }
1502        s->slice_context_count = nb_slices;
1503    }
1504
1505    return 0;
1506 fail:
1507    ff_MPV_common_end(s);
1508    return err;
1509}
1510
1511/* init common structure for both encoder and decoder */
1512void ff_MPV_common_end(MpegEncContext *s)
1513{
1514    int i;
1515
1516    if (s->slice_context_count > 1) {
1517        for (i = 0; i < s->slice_context_count; i++) {
1518            free_duplicate_context(s->thread_context[i]);
1519        }
1520        for (i = 1; i < s->slice_context_count; i++) {
1521            av_freep(&s->thread_context[i]);
1522        }
1523        s->slice_context_count = 1;
1524    } else free_duplicate_context(s);
1525
1526    av_freep(&s->parse_context.buffer);
1527    s->parse_context.buffer_size = 0;
1528
1529    av_freep(&s->bitstream_buffer);
1530    s->allocated_bitstream_buffer_size = 0;
1531
1532    if (s->picture) {
1533        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1534            ff_free_picture_tables(&s->picture[i]);
1535            ff_mpeg_unref_picture(s, &s->picture[i]);
1536            av_frame_free(&s->picture[i].f);
1537        }
1538    }
1539    av_freep(&s->picture);
1540    ff_free_picture_tables(&s->last_picture);
1541    ff_mpeg_unref_picture(s, &s->last_picture);
1542    av_frame_free(&s->last_picture.f);
1543    ff_free_picture_tables(&s->current_picture);
1544    ff_mpeg_unref_picture(s, &s->current_picture);
1545    av_frame_free(&s->current_picture.f);
1546    ff_free_picture_tables(&s->next_picture);
1547    ff_mpeg_unref_picture(s, &s->next_picture);
1548    av_frame_free(&s->next_picture.f);
1549    ff_free_picture_tables(&s->new_picture);
1550    ff_mpeg_unref_picture(s, &s->new_picture);
1551    av_frame_free(&s->new_picture.f);
1552
1553    free_context_frame(s);
1554
1555    s->context_initialized      = 0;
1556    s->last_picture_ptr         =
1557    s->next_picture_ptr         =
1558    s->current_picture_ptr      = NULL;
1559    s->linesize = s->uvlinesize = 0;
1560}
1561
1562av_cold void ff_init_rl(RLTable *rl,
1563                        uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1564{
1565    int8_t  max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1566    uint8_t index_run[MAX_RUN + 1];
1567    int last, run, level, start, end, i;
1568
1569    /* If table is static, we can quit if rl->max_level[0] is not NULL */
1570    if (static_store && rl->max_level[0])
1571        return;
1572
1573    /* compute max_level[], max_run[] and index_run[] */
1574    for (last = 0; last < 2; last++) {
1575        if (last == 0) {
1576            start = 0;
1577            end = rl->last;
1578        } else {
1579            start = rl->last;
1580            end = rl->n;
1581        }
1582
1583        memset(max_level, 0, MAX_RUN + 1);
1584        memset(max_run, 0, MAX_LEVEL + 1);
1585        memset(index_run, rl->n, MAX_RUN + 1);
1586        for (i = start; i < end; i++) {
1587            run   = rl->table_run[i];
1588            level = rl->table_level[i];
1589            if (index_run[run] == rl->n)
1590                index_run[run] = i;
1591            if (level > max_level[run])
1592                max_level[run] = level;
1593            if (run > max_run[level])
1594                max_run[level] = run;
1595        }
1596        if (static_store)
1597            rl->max_level[last] = static_store[last];
1598        else
1599            rl->max_level[last] = av_malloc(MAX_RUN + 1);
1600        memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1601        if (static_store)
1602            rl->max_run[last]   = static_store[last] + MAX_RUN + 1;
1603        else
1604            rl->max_run[last]   = av_malloc(MAX_LEVEL + 1);
1605        memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1606        if (static_store)
1607            rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1608        else
1609            rl->index_run[last] = av_malloc(MAX_RUN + 1);
1610        memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1611    }
1612}
1613
1614av_cold void ff_init_vlc_rl(RLTable *rl)
1615{
1616    int i, q;
1617
1618    for (q = 0; q < 32; q++) {
1619        int qmul = q * 2;
1620        int qadd = (q - 1) | 1;
1621
1622        if (q == 0) {
1623            qmul = 1;
1624            qadd = 0;
1625        }
1626        for (i = 0; i < rl->vlc.table_size; i++) {
1627            int code = rl->vlc.table[i][0];
1628            int len  = rl->vlc.table[i][1];
1629            int level, run;
1630
1631            if (len == 0) { // illegal code
1632                run   = 66;
1633                level = MAX_LEVEL;
1634            } else if (len < 0) { // more bits needed
1635                run   = 0;
1636                level = code;
1637            } else {
1638                if (code == rl->n) { // esc
1639                    run   = 66;
1640                    level =  0;
1641                } else {
1642                    run   = rl->table_run[code] + 1;
1643                    level = rl->table_level[code] * qmul + qadd;
1644                    if (code >= rl->last) run += 192;
1645                }
1646            }
1647            rl->rl_vlc[q][i].len   = len;
1648            rl->rl_vlc[q][i].level = level;
1649            rl->rl_vlc[q][i].run   = run;
1650        }
1651    }
1652}
1653
1654static void release_unused_pictures(MpegEncContext *s)
1655{
1656    int i;
1657
1658    /* release non reference frames */
1659    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1660        if (!s->picture[i].reference)
1661            ff_mpeg_unref_picture(s, &s->picture[i]);
1662    }
1663}
1664
1665static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1666{
1667    if (pic == s->last_picture_ptr)
1668        return 0;
1669    if (pic->f->buf[0] == NULL)
1670        return 1;
1671    if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1672        return 1;
1673    return 0;
1674}
1675
1676static int find_unused_picture(MpegEncContext *s, int shared)
1677{
1678    int i;
1679
1680    if (shared) {
1681        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1682            if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1683                return i;
1684        }
1685    } else {
1686        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1687            if (pic_is_unused(s, &s->picture[i]))
1688                return i;
1689        }
1690    }
1691
1692    av_log(s->avctx, AV_LOG_FATAL,
1693           "Internal error, picture buffer overflow\n");
1694    /* We could return -1, but the codec would crash trying to draw into a
1695     * non-existing frame anyway. This is safer than waiting for a random crash.
1696     * Also the return of this is never useful, an encoder must only allocate
1697     * as much as allowed in the specification. This has no relationship to how
1698     * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1699     * enough for such valid streams).
1700     * Plus, a decoder has to check stream validity and remove frames if too
1701     * many reference frames are around. Waiting for "OOM" is not correct at
1702     * all. Similarly, missing reference frames have to be replaced by
1703     * interpolated/MC frames, anything else is a bug in the codec ...
1704     */
1705    abort();
1706    return -1;
1707}
1708
1709int ff_find_unused_picture(MpegEncContext *s, int shared)
1710{
1711    int ret = find_unused_picture(s, shared);
1712
1713    if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1714        if (s->picture[ret].needs_realloc) {
1715            s->picture[ret].needs_realloc = 0;
1716            ff_free_picture_tables(&s->picture[ret]);
1717            ff_mpeg_unref_picture(s, &s->picture[ret]);
1718        }
1719    }
1720    return ret;
1721}
1722
1723static void gray_frame(AVFrame *frame)
1724{
1725    int i, h_chroma_shift, v_chroma_shift;
1726
1727    av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1728
1729    for(i=0; i<frame->height; i++)
1730        memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1731    for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1732        memset(frame->data[1] + frame->linesize[1]*i,
1733               0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1734        memset(frame->data[2] + frame->linesize[2]*i,
1735               0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1736    }
1737}
1738
1739/**
1740 * generic function called after decoding
1741 * the header and before a frame is decoded.
1742 */
1743int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1744{
1745    int i, ret;
1746    Picture *pic;
1747    s->mb_skipped = 0;
1748
1749    if (!ff_thread_can_start_frame(avctx)) {
1750        av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1751        return -1;
1752    }
1753
1754    /* mark & release old frames */
1755    if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1756        s->last_picture_ptr != s->next_picture_ptr &&
1757        s->last_picture_ptr->f->buf[0]) {
1758        ff_mpeg_unref_picture(s, s->last_picture_ptr);
1759    }
1760
1761    /* release forgotten pictures */
1762    /* if (mpeg124/h263) */
1763    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1764        if (&s->picture[i] != s->last_picture_ptr &&
1765            &s->picture[i] != s->next_picture_ptr &&
1766            s->picture[i].reference && !s->picture[i].needs_realloc) {
1767            if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1768                av_log(avctx, AV_LOG_ERROR,
1769                       "releasing zombie picture\n");
1770            ff_mpeg_unref_picture(s, &s->picture[i]);
1771        }
1772    }
1773
1774    ff_mpeg_unref_picture(s, &s->current_picture);
1775
1776    release_unused_pictures(s);
1777
1778    if (s->current_picture_ptr &&
1779        s->current_picture_ptr->f->buf[0] == NULL) {
1780        // we already have a unused image
1781        // (maybe it was set before reading the header)
1782        pic = s->current_picture_ptr;
1783    } else {
1784        i   = ff_find_unused_picture(s, 0);
1785        if (i < 0) {
1786            av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1787            return i;
1788        }
1789        pic = &s->picture[i];
1790    }
1791
1792    pic->reference = 0;
1793    if (!s->droppable) {
1794        if (s->pict_type != AV_PICTURE_TYPE_B)
1795            pic->reference = 3;
1796    }
1797
1798    pic->f->coded_picture_number = s->coded_picture_number++;
1799
1800    if (ff_alloc_picture(s, pic, 0) < 0)
1801        return -1;
1802
1803    s->current_picture_ptr = pic;
1804    // FIXME use only the vars from current_pic
1805    s->current_picture_ptr->f->top_field_first = s->top_field_first;
1806    if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1807        s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1808        if (s->picture_structure != PICT_FRAME)
1809            s->current_picture_ptr->f->top_field_first =
1810                (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1811    }
1812    s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1813                                                 !s->progressive_sequence;
1814    s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
1815
1816    s->current_picture_ptr->f->pict_type = s->pict_type;
1817    // if (s->flags && CODEC_FLAG_QSCALE)
1818    //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1819    s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1820
1821    if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1822                                   s->current_picture_ptr)) < 0)
1823        return ret;
1824
1825    if (s->pict_type != AV_PICTURE_TYPE_B) {
1826        s->last_picture_ptr = s->next_picture_ptr;
1827        if (!s->droppable)
1828            s->next_picture_ptr = s->current_picture_ptr;
1829    }
1830    av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1831            s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1832            s->last_picture_ptr    ? s->last_picture_ptr->f->data[0]    : NULL,
1833            s->next_picture_ptr    ? s->next_picture_ptr->f->data[0]    : NULL,
1834            s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1835            s->pict_type, s->droppable);
1836
1837    if ((s->last_picture_ptr == NULL ||
1838         s->last_picture_ptr->f->buf[0] == NULL) &&
1839        (s->pict_type != AV_PICTURE_TYPE_I ||
1840         s->picture_structure != PICT_FRAME)) {
1841        int h_chroma_shift, v_chroma_shift;
1842        av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1843                                         &h_chroma_shift, &v_chroma_shift);
1844        if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1845            av_log(avctx, AV_LOG_DEBUG,
1846                   "allocating dummy last picture for B frame\n");
1847        else if (s->pict_type != AV_PICTURE_TYPE_I)
1848            av_log(avctx, AV_LOG_ERROR,
1849                   "warning: first frame is no keyframe\n");
1850        else if (s->picture_structure != PICT_FRAME)
1851            av_log(avctx, AV_LOG_DEBUG,
1852                   "allocate dummy last picture for field based first keyframe\n");
1853
1854        /* Allocate a dummy frame */
1855        i = ff_find_unused_picture(s, 0);
1856        if (i < 0) {
1857            av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1858            return i;
1859        }
1860        s->last_picture_ptr = &s->picture[i];
1861
1862        s->last_picture_ptr->reference   = 3;
1863        s->last_picture_ptr->f->key_frame = 0;
1864        s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1865
1866        if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1867            s->last_picture_ptr = NULL;
1868            return -1;
1869        }
1870
1871        if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1872            for(i=0; i<avctx->height; i++)
1873                memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1874                       0x80, avctx->width);
1875            for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1876                memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1877                       0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1878                memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1879                       0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1880            }
1881
1882            if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1883                for(i=0; i<avctx->height; i++)
1884                memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1885            }
1886        }
1887
1888        ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1889        ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1890    }
1891    if ((s->next_picture_ptr == NULL ||
1892         s->next_picture_ptr->f->buf[0] == NULL) &&
1893        s->pict_type == AV_PICTURE_TYPE_B) {
1894        /* Allocate a dummy frame */
1895        i = ff_find_unused_picture(s, 0);
1896        if (i < 0) {
1897            av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1898            return i;
1899        }
1900        s->next_picture_ptr = &s->picture[i];
1901
1902        s->next_picture_ptr->reference   = 3;
1903        s->next_picture_ptr->f->key_frame = 0;
1904        s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1905
1906        if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1907            s->next_picture_ptr = NULL;
1908            return -1;
1909        }
1910        ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1911        ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1912    }
1913
1914#if 0 // BUFREF-FIXME
1915    memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1916    memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1917#endif
1918    if (s->last_picture_ptr) {
1919        ff_mpeg_unref_picture(s, &s->last_picture);
1920        if (s->last_picture_ptr->f->buf[0] &&
1921            (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1922                                       s->last_picture_ptr)) < 0)
1923            return ret;
1924    }
1925    if (s->next_picture_ptr) {
1926        ff_mpeg_unref_picture(s, &s->next_picture);
1927        if (s->next_picture_ptr->f->buf[0] &&
1928            (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1929                                       s->next_picture_ptr)) < 0)
1930            return ret;
1931    }
1932
1933    av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1934                                                 s->last_picture_ptr->f->buf[0]));
1935
1936    if (s->picture_structure!= PICT_FRAME) {
1937        int i;
1938        for (i = 0; i < 4; i++) {
1939            if (s->picture_structure == PICT_BOTTOM_FIELD) {
1940                s->current_picture.f->data[i] +=
1941                    s->current_picture.f->linesize[i];
1942            }
1943            s->current_picture.f->linesize[i] *= 2;
1944            s->last_picture.f->linesize[i]    *= 2;
1945            s->next_picture.f->linesize[i]    *= 2;
1946        }
1947    }
1948
1949    s->err_recognition = avctx->err_recognition;
1950
1951    /* set dequantizer, we can't do it during init as
1952     * it might change for mpeg4 and we can't do it in the header
1953     * decode as init is not called for mpeg4 there yet */
1954    if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1955        s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1956        s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1957    } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1958        s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1959        s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1960    } else {
1961        s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1962        s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1963    }
1964
1965    if (s->avctx->debug & FF_DEBUG_NOMC) {
1966        gray_frame(s->current_picture_ptr->f);
1967    }
1968
1969    return 0;
1970}
1971
1972/* called after a frame has been decoded. */
1973void ff_MPV_frame_end(MpegEncContext *s)
1974{
1975    emms_c();
1976
1977    if (s->current_picture.reference)
1978        ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1979}
1980
1981
1982static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
1983{
1984    if(*sx > *ex)
1985        return clip_line(ex, ey, sx, sy, maxx);
1986
1987    if (*sx < 0) {
1988        if (*ex < 0)
1989            return 1;
1990        *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
1991        *sx = 0;
1992    }
1993
1994    if (*ex > maxx) {
1995        if (*sx > maxx)
1996            return 1;
1997        *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
1998        *ex = maxx;
1999    }
2000    return 0;
2001}
2002
2003
2004/**
2005 * Draw a line from (ex, ey) -> (sx, sy).
2006 * @param w width of the image
2007 * @param h height of the image
2008 * @param stride stride/linesize of the image
2009 * @param color color of the arrow
2010 */
2011static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2012                      int w, int h, int stride, int color)
2013{
2014    int x, y, fr, f;
2015
2016    if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2017        return;
2018    if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2019        return;
2020
2021    sx = av_clip(sx, 0, w - 1);
2022    sy = av_clip(sy, 0, h - 1);
2023    ex = av_clip(ex, 0, w - 1);
2024    ey = av_clip(ey, 0, h - 1);
2025
2026    buf[sy * stride + sx] += color;
2027
2028    if (FFABS(ex - sx) > FFABS(ey - sy)) {
2029        if (sx > ex) {
2030            FFSWAP(int, sx, ex);
2031            FFSWAP(int, sy, ey);
2032        }
2033        buf += sx + sy * stride;
2034        ex  -= sx;
2035        f    = ((ey - sy) << 16) / ex;
2036        for (x = 0; x <= ex; x++) {
2037            y  = (x * f) >> 16;
2038            fr = (x * f) & 0xFFFF;
2039            buf[y * stride + x]       += (color * (0x10000 - fr)) >> 16;
2040            if(fr) buf[(y + 1) * stride + x] += (color *            fr ) >> 16;
2041        }
2042    } else {
2043        if (sy > ey) {
2044            FFSWAP(int, sx, ex);
2045            FFSWAP(int, sy, ey);
2046        }
2047        buf += sx + sy * stride;
2048        ey  -= sy;
2049        if (ey)
2050            f = ((ex - sx) << 16) / ey;
2051        else
2052            f = 0;
2053        for(y= 0; y <= ey; y++){
2054            x  = (y*f) >> 16;
2055            fr = (y*f) & 0xFFFF;
2056            buf[y * stride + x]     += (color * (0x10000 - fr)) >> 16;
2057            if(fr) buf[y * stride + x + 1] += (color *            fr ) >> 16;
2058        }
2059    }
2060}
2061
2062/**
2063 * Draw an arrow from (ex, ey) -> (sx, sy).
2064 * @param w width of the image
2065 * @param h height of the image
2066 * @param stride stride/linesize of the image
2067 * @param color color of the arrow
2068 */
2069static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2070                       int ey, int w, int h, int stride, int color, int tail, int direction)
2071{
2072    int dx,dy;
2073
2074    if (direction) {
2075        FFSWAP(int, sx, ex);
2076        FFSWAP(int, sy, ey);
2077    }
2078
2079    sx = av_clip(sx, -100, w + 100);
2080    sy = av_clip(sy, -100, h + 100);
2081    ex = av_clip(ex, -100, w + 100);
2082    ey = av_clip(ey, -100, h + 100);
2083
2084    dx = ex - sx;
2085    dy = ey - sy;
2086
2087    if (dx * dx + dy * dy > 3 * 3) {
2088        int rx =  dx + dy;
2089        int ry = -dx + dy;
2090        int length = ff_sqrt((rx * rx + ry * ry) << 8);
2091
2092        // FIXME subpixel accuracy
2093        rx = ROUNDED_DIV(rx * 3 << 4, length);
2094        ry = ROUNDED_DIV(ry * 3 << 4, length);
2095
2096        if (tail) {
2097            rx = -rx;
2098            ry = -ry;
2099        }
2100
2101        draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2102        draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2103    }
2104    draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2105}
2106
2107/**
2108 * Print debugging info for the given picture.
2109 */
2110void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2111                         uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2112                         int *low_delay,
2113                         int mb_width, int mb_height, int mb_stride, int quarter_sample)
2114{
2115    if (avctx->hwaccel || !mbtype_table
2116        || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2117        return;
2118
2119
2120    if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2121        int x,y;
2122
2123        av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2124               av_get_picture_type_char(pict->pict_type));
2125        for (y = 0; y < mb_height; y++) {
2126            for (x = 0; x < mb_width; x++) {
2127                if (avctx->debug & FF_DEBUG_SKIP) {
2128                    int count = mbskip_table[x + y * mb_stride];
2129                    if (count > 9)
2130                        count = 9;
2131                    av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2132                }
2133                if (avctx->debug & FF_DEBUG_QP) {
2134                    av_log(avctx, AV_LOG_DEBUG, "%2d",
2135                           qscale_table[x + y * mb_stride]);
2136                }
2137                if (avctx->debug & FF_DEBUG_MB_TYPE) {
2138                    int mb_type = mbtype_table[x + y * mb_stride];
2139                    // Type & MV direction
2140                    if (IS_PCM(mb_type))
2141                        av_log(avctx, AV_LOG_DEBUG, "P");
2142                    else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2143                        av_log(avctx, AV_LOG_DEBUG, "A");
2144                    else if (IS_INTRA4x4(mb_type))
2145                        av_log(avctx, AV_LOG_DEBUG, "i");
2146                    else if (IS_INTRA16x16(mb_type))
2147                        av_log(avctx, AV_LOG_DEBUG, "I");
2148                    else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2149                        av_log(avctx, AV_LOG_DEBUG, "d");
2150                    else if (IS_DIRECT(mb_type))
2151                        av_log(avctx, AV_LOG_DEBUG, "D");
2152                    else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2153                        av_log(avctx, AV_LOG_DEBUG, "g");
2154                    else if (IS_GMC(mb_type))
2155                        av_log(avctx, AV_LOG_DEBUG, "G");
2156                    else if (IS_SKIP(mb_type))
2157                        av_log(avctx, AV_LOG_DEBUG, "S");
2158                    else if (!USES_LIST(mb_type, 1))
2159                        av_log(avctx, AV_LOG_DEBUG, ">");
2160                    else if (!USES_LIST(mb_type, 0))
2161                        av_log(avctx, AV_LOG_DEBUG, "<");
2162                    else {
2163                        av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2164                        av_log(avctx, AV_LOG_DEBUG, "X");
2165                    }
2166
2167                    // segmentation
2168                    if (IS_8X8(mb_type))
2169                        av_log(avctx, AV_LOG_DEBUG, "+");
2170                    else if (IS_16X8(mb_type))
2171                        av_log(avctx, AV_LOG_DEBUG, "-");
2172                    else if (IS_8X16(mb_type))
2173                        av_log(avctx, AV_LOG_DEBUG, "|");
2174                    else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2175                        av_log(avctx, AV_LOG_DEBUG, " ");
2176                    else
2177                        av_log(avctx, AV_LOG_DEBUG, "?");
2178
2179
2180                    if (IS_INTERLACED(mb_type))
2181                        av_log(avctx, AV_LOG_DEBUG, "=");
2182                    else
2183                        av_log(avctx, AV_LOG_DEBUG, " ");
2184                }
2185            }
2186            av_log(avctx, AV_LOG_DEBUG, "\n");
2187        }
2188    }
2189
2190    if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2191        (avctx->debug_mv)) {
2192        const int shift = 1 + quarter_sample;
2193        int mb_y;
2194        uint8_t *ptr;
2195        int i;
2196        int h_chroma_shift, v_chroma_shift, block_height;
2197        const int width          = avctx->width;
2198        const int height         = avctx->height;
2199        const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2200        const int mv_stride      = (mb_width << mv_sample_log2) +
2201                                   (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2202
2203        *low_delay = 0; // needed to see the vectors without trashing the buffers
2204
2205        avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2206
2207        av_frame_make_writable(pict);
2208
2209        pict->opaque = NULL;
2210        ptr          = pict->data[0];
2211        block_height = 16 >> v_chroma_shift;
2212
2213        for (mb_y = 0; mb_y < mb_height; mb_y++) {
2214            int mb_x;
2215            for (mb_x = 0; mb_x < mb_width; mb_x++) {
2216                const int mb_index = mb_x + mb_y * mb_stride;
2217                if ((avctx->debug_mv) && motion_val[0]) {
2218                    int type;
2219                    for (type = 0; type < 3; type++) {
2220                        int direction = 0;
2221                        switch (type) {
2222                        case 0:
2223                            if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2224                                (pict->pict_type!= AV_PICTURE_TYPE_P))
2225                                continue;
2226                            direction = 0;
2227                            break;
2228                        case 1:
2229                            if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2230                                (pict->pict_type!= AV_PICTURE_TYPE_B))
2231                                continue;
2232                            direction = 0;
2233                            break;
2234                        case 2:
2235                            if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2236                                (pict->pict_type!= AV_PICTURE_TYPE_B))
2237                                continue;
2238                            direction = 1;
2239                            break;
2240                        }
2241                        if (!USES_LIST(mbtype_table[mb_index], direction))
2242                            continue;
2243
2244                        if (IS_8X8(mbtype_table[mb_index])) {
2245                            int i;
2246                            for (i = 0; i < 4; i++) {
2247                                int sx = mb_x * 16 + 4 + 8 * (i & 1);
2248                                int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2249                                int xy = (mb_x * 2 + (i & 1) +
2250                                          (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2251                                int mx = (motion_val[direction][xy][0] >> shift) + sx;
2252                                int my = (motion_val[direction][xy][1] >> shift) + sy;
2253                                draw_arrow(ptr, sx, sy, mx, my, width,
2254                                           height, pict->linesize[0], 100, 0, direction);
2255                            }
2256                        } else if (IS_16X8(mbtype_table[mb_index])) {
2257                            int i;
2258                            for (i = 0; i < 2; i++) {
2259                                int sx = mb_x * 16 + 8;
2260                                int sy = mb_y * 16 + 4 + 8 * i;
2261                                int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2262                                int mx = (motion_val[direction][xy][0] >> shift);
2263                                int my = (motion_val[direction][xy][1] >> shift);
2264
2265                                if (IS_INTERLACED(mbtype_table[mb_index]))
2266                                    my *= 2;
2267
2268                                draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2269                                           height, pict->linesize[0], 100, 0, direction);
2270                            }
2271                        } else if (IS_8X16(mbtype_table[mb_index])) {
2272                            int i;
2273                            for (i = 0; i < 2; i++) {
2274                                int sx = mb_x * 16 + 4 + 8 * i;
2275                                int sy = mb_y * 16 + 8;
2276                                int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2277                                int mx = motion_val[direction][xy][0] >> shift;
2278                                int my = motion_val[direction][xy][1] >> shift;
2279
2280                                if (IS_INTERLACED(mbtype_table[mb_index]))
2281                                    my *= 2;
2282
2283                                draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2284                                           height, pict->linesize[0], 100, 0, direction);
2285                            }
2286                        } else {
2287                              int sx= mb_x * 16 + 8;
2288                              int sy= mb_y * 16 + 8;
2289                              int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2290                              int mx= (motion_val[direction][xy][0]>>shift) + sx;
2291                              int my= (motion_val[direction][xy][1]>>shift) + sy;
2292                              draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2293                        }
2294                    }
2295                }
2296                if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2297                    uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2298                                 0x0101010101010101ULL;
2299                    int y;
2300                    for (y = 0; y < block_height; y++) {
2301                        *(uint64_t *)(pict->data[1] + 8 * mb_x +
2302                                      (block_height * mb_y + y) *
2303                                      pict->linesize[1]) = c;
2304                        *(uint64_t *)(pict->data[2] + 8 * mb_x +
2305                                      (block_height * mb_y + y) *
2306                                      pict->linesize[2]) = c;
2307                    }
2308                }
2309                if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2310                    motion_val[0]) {
2311                    int mb_type = mbtype_table[mb_index];
2312                    uint64_t u,v;
2313                    int y;
2314#define COLOR(theta, r) \
2315    u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2316    v = (int)(128 + r * sin(theta * 3.141592 / 180));
2317
2318
2319                    u = v = 128;
2320                    if (IS_PCM(mb_type)) {
2321                        COLOR(120, 48)
2322                    } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2323                               IS_INTRA16x16(mb_type)) {
2324                        COLOR(30, 48)
2325                    } else if (IS_INTRA4x4(mb_type)) {
2326                        COLOR(90, 48)
2327                    } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2328                        // COLOR(120, 48)
2329                    } else if (IS_DIRECT(mb_type)) {
2330                        COLOR(150, 48)
2331                    } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2332                        COLOR(170, 48)
2333                    } else if (IS_GMC(mb_type)) {
2334                        COLOR(190, 48)
2335                    } else if (IS_SKIP(mb_type)) {
2336                        // COLOR(180, 48)
2337                    } else if (!USES_LIST(mb_type, 1)) {
2338                        COLOR(240, 48)
2339                    } else if (!USES_LIST(mb_type, 0)) {
2340                        COLOR(0, 48)
2341                    } else {
2342                        av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2343                        COLOR(300,48)
2344                    }
2345
2346                    u *= 0x0101010101010101ULL;
2347                    v *= 0x0101010101010101ULL;
2348                    for (y = 0; y < block_height; y++) {
2349                        *(uint64_t *)(pict->data[1] + 8 * mb_x +
2350                                      (block_height * mb_y + y) * pict->linesize[1]) = u;
2351                        *(uint64_t *)(pict->data[2] + 8 * mb_x +
2352                                      (block_height * mb_y + y) * pict->linesize[2]) = v;
2353                    }
2354
2355                    // segmentation
2356                    if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2357                        *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2358                                      (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2359                        *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2360                                      (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2361                    }
2362                    if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2363                        for (y = 0; y < 16; y++)
2364                            pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2365                                          pict->linesize[0]] ^= 0x80;
2366                    }
2367                    if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2368                        int dm = 1 << (mv_sample_log2 - 2);
2369                        for (i = 0; i < 4; i++) {
2370                            int sx = mb_x * 16 + 8 * (i & 1);
2371                            int sy = mb_y * 16 + 8 * (i >> 1);
2372                            int xy = (mb_x * 2 + (i & 1) +
2373                                     (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2374                            // FIXME bidir
2375                            int32_t *mv = (int32_t *) &motion_val[0][xy];
2376                            if (mv[0] != mv[dm] ||
2377                                mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2378                                for (y = 0; y < 8; y++)
2379                                    pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2380                            if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2381                                *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2382                                              pict->linesize[0]) ^= 0x8080808080808080ULL;
2383                        }
2384                    }
2385
2386                    if (IS_INTERLACED(mb_type) &&
2387                        avctx->codec->id == AV_CODEC_ID_H264) {
2388                        // hmm
2389                    }
2390                }
2391                mbskip_table[mb_index] = 0;
2392            }
2393        }
2394    }
2395}
2396
2397void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2398{
2399    ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2400                         p->qscale_table, p->motion_val, &s->low_delay,
2401                         s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2402}
2403
2404int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2405{
2406    AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2407    int offset = 2*s->mb_stride + 1;
2408    if(!ref)
2409        return AVERROR(ENOMEM);
2410    av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2411    ref->size -= offset;
2412    ref->data += offset;
2413    return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2414}
2415
2416static inline int hpel_motion_lowres(MpegEncContext *s,
2417                                     uint8_t *dest, uint8_t *src,
2418                                     int field_based, int field_select,
2419                                     int src_x, int src_y,
2420                                     int width, int height, ptrdiff_t stride,
2421                                     int h_edge_pos, int v_edge_pos,
2422                                     int w, int h, h264_chroma_mc_func *pix_op,
2423                                     int motion_x, int motion_y)
2424{
2425    const int lowres   = s->avctx->lowres;
2426    const int op_index = FFMIN(lowres, 3);
2427    const int s_mask   = (2 << lowres) - 1;
2428    int emu = 0;
2429    int sx, sy;
2430
2431    if (s->quarter_sample) {
2432        motion_x /= 2;
2433        motion_y /= 2;
2434    }
2435
2436    sx = motion_x & s_mask;
2437    sy = motion_y & s_mask;
2438    src_x += motion_x >> lowres + 1;
2439    src_y += motion_y >> lowres + 1;
2440
2441    src   += src_y * stride + src_x;
2442
2443    if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w,                 0) ||
2444        (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2445        s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2446                                 s->linesize, s->linesize,
2447                                 w + 1, (h + 1) << field_based,
2448                                 src_x, src_y   << field_based,
2449                                 h_edge_pos, v_edge_pos);
2450        src = s->edge_emu_buffer;
2451        emu = 1;
2452    }
2453
2454    sx = (sx << 2) >> lowres;
2455    sy = (sy << 2) >> lowres;
2456    if (field_select)
2457        src += s->linesize;
2458    pix_op[op_index](dest, src, stride, h, sx, sy);
2459    return emu;
2460}
2461
2462/* apply one mpeg motion vector to the three components */
2463static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2464                                                uint8_t *dest_y,
2465                                                uint8_t *dest_cb,
2466                                                uint8_t *dest_cr,
2467                                                int field_based,
2468                                                int bottom_field,
2469                                                int field_select,
2470                                                uint8_t **ref_picture,
2471                                                h264_chroma_mc_func *pix_op,
2472                                                int motion_x, int motion_y,
2473                                                int h, int mb_y)
2474{
2475    uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2476    int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2477    ptrdiff_t uvlinesize, linesize;
2478    const int lowres     = s->avctx->lowres;
2479    const int op_index   = FFMIN(lowres-1+s->chroma_x_shift, 3);
2480    const int block_s    = 8>>lowres;
2481    const int s_mask     = (2 << lowres) - 1;
2482    const int h_edge_pos = s->h_edge_pos >> lowres;
2483    const int v_edge_pos = s->v_edge_pos >> lowres;
2484    linesize   = s->current_picture.f->linesize[0] << field_based;
2485    uvlinesize = s->current_picture.f->linesize[1] << field_based;
2486
2487    // FIXME obviously not perfect but qpel will not work in lowres anyway
2488    if (s->quarter_sample) {
2489        motion_x /= 2;
2490        motion_y /= 2;
2491    }
2492
2493    if(field_based){
2494        motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2495    }
2496
2497    sx = motion_x & s_mask;
2498    sy = motion_y & s_mask;
2499    src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2500    src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2501
2502    if (s->out_format == FMT_H263) {
2503        uvsx    = ((motion_x >> 1) & s_mask) | (sx & 1);
2504        uvsy    = ((motion_y >> 1) & s_mask) | (sy & 1);
2505        uvsrc_x = src_x >> 1;
2506        uvsrc_y = src_y >> 1;
2507    } else if (s->out_format == FMT_H261) {
2508        // even chroma mv's are full pel in H261
2509        mx      = motion_x / 4;
2510        my      = motion_y / 4;
2511        uvsx    = (2 * mx) & s_mask;
2512        uvsy    = (2 * my) & s_mask;
2513        uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2514        uvsrc_y =    mb_y * block_s + (my >> lowres);
2515    } else {
2516        if(s->chroma_y_shift){
2517            mx      = motion_x / 2;
2518            my      = motion_y / 2;
2519            uvsx    = mx & s_mask;
2520            uvsy    = my & s_mask;
2521            uvsrc_x = s->mb_x * block_s                 + (mx >> lowres + 1);
2522            uvsrc_y =   (mb_y * block_s >> field_based) + (my >> lowres + 1);
2523        } else {
2524            if(s->chroma_x_shift){
2525            //Chroma422
2526                mx = motion_x / 2;
2527                uvsx = mx & s_mask;
2528                uvsy = motion_y & s_mask;
2529                uvsrc_y = src_y;
2530                uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
2531            } else {
2532            //Chroma444
2533                uvsx = motion_x & s_mask;
2534                uvsy = motion_y & s_mask;
2535                uvsrc_x = src_x;
2536                uvsrc_y = src_y;
2537            }
2538        }
2539    }
2540
2541    ptr_y  = ref_picture[0] + src_y   * linesize   + src_x;
2542    ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2543    ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2544
2545    if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s,       0) || uvsrc_y<0 ||
2546        (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2547        s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2548                                 linesize >> field_based, linesize >> field_based,
2549                                 17, 17 + field_based,
2550                                src_x, src_y << field_based, h_edge_pos,
2551                                v_edge_pos);
2552        ptr_y = s->edge_emu_buffer;
2553        if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2554            uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2555            uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2556            s->vdsp.emulated_edge_mc(ubuf,  ptr_cb,
2557                                     uvlinesize >> field_based, uvlinesize >> field_based,
2558                                     9, 9 + field_based,
2559                                    uvsrc_x, uvsrc_y << field_based,
2560                                    h_edge_pos >> 1, v_edge_pos >> 1);
2561            s->vdsp.emulated_edge_mc(vbuf,  ptr_cr,
2562                                     uvlinesize >> field_based,uvlinesize >> field_based,
2563                                     9, 9 + field_based,
2564                                    uvsrc_x, uvsrc_y << field_based,
2565                                    h_edge_pos >> 1, v_edge_pos >> 1);
2566            ptr_cb = ubuf;
2567            ptr_cr = vbuf;
2568        }
2569    }
2570
2571    // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2572    if (bottom_field) {
2573        dest_y  += s->linesize;
2574        dest_cb += s->uvlinesize;
2575        dest_cr += s->uvlinesize;
2576    }
2577
2578    if (field_select) {
2579        ptr_y   += s->linesize;
2580        ptr_cb  += s->uvlinesize;
2581        ptr_cr  += s->uvlinesize;
2582    }
2583
2584    sx = (sx << 2) >> lowres;
2585    sy = (sy << 2) >> lowres;
2586    pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2587
2588    if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2589        int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2590        uvsx = (uvsx << 2) >> lowres;
2591        uvsy = (uvsy << 2) >> lowres;
2592        if (hc) {
2593            pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2594            pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2595        }
2596    }
2597    // FIXME h261 lowres loop filter
2598}
2599
2600static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2601                                            uint8_t *dest_cb, uint8_t *dest_cr,
2602                                            uint8_t **ref_picture,
2603                                            h264_chroma_mc_func * pix_op,
2604                                            int mx, int my)
2605{
2606    const int lowres     = s->avctx->lowres;
2607    const int op_index   = FFMIN(lowres, 3);
2608    const int block_s    = 8 >> lowres;
2609    const int s_mask     = (2 << lowres) - 1;
2610    const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2611    const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2612    int emu = 0, src_x, src_y, sx, sy;
2613    ptrdiff_t offset;
2614    uint8_t *ptr;
2615
2616    if (s->quarter_sample) {
2617        mx /= 2;
2618        my /= 2;
2619    }
2620
2621    /* In case of 8X8, we construct a single chroma motion vector
2622       with a special rounding */
2623    mx = ff_h263_round_chroma(mx);
2624    my = ff_h263_round_chroma(my);
2625
2626    sx = mx & s_mask;
2627    sy = my & s_mask;
2628    src_x = s->mb_x * block_s + (mx >> lowres + 1);
2629    src_y = s->mb_y * block_s + (my >> lowres + 1);
2630
2631    offset = src_y * s->uvlinesize + src_x;
2632    ptr = ref_picture[1] + offset;
2633    if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2634        (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2635        s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2636                                 s->uvlinesize, s->uvlinesize,
2637                                 9, 9,
2638                                 src_x, src_y, h_edge_pos, v_edge_pos);
2639        ptr = s->edge_emu_buffer;
2640        emu = 1;
2641    }
2642    sx = (sx << 2) >> lowres;
2643    sy = (sy << 2) >> lowres;
2644    pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2645
2646    ptr = ref_picture[2] + offset;
2647    if (emu) {
2648        s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2649                                 s->uvlinesize, s->uvlinesize,
2650                                 9, 9,
2651                                 src_x, src_y, h_edge_pos, v_edge_pos);
2652        ptr = s->edge_emu_buffer;
2653    }
2654    pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2655}
2656
2657/**
2658 * motion compensation of a single macroblock
2659 * @param s context
2660 * @param dest_y luma destination pointer
2661 * @param dest_cb chroma cb/u destination pointer
2662 * @param dest_cr chroma cr/v destination pointer
2663 * @param dir direction (0->forward, 1->backward)
2664 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2665 * @param pix_op halfpel motion compensation function (average or put normally)
2666 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2667 */
2668static inline void MPV_motion_lowres(MpegEncContext *s,
2669                                     uint8_t *dest_y, uint8_t *dest_cb,
2670                                     uint8_t *dest_cr,
2671                                     int dir, uint8_t **ref_picture,
2672                                     h264_chroma_mc_func *pix_op)
2673{
2674    int mx, my;
2675    int mb_x, mb_y, i;
2676    const int lowres  = s->avctx->lowres;
2677    const int block_s = 8 >>lowres;
2678
2679    mb_x = s->mb_x;
2680    mb_y = s->mb_y;
2681
2682    switch (s->mv_type) {
2683    case MV_TYPE_16X16:
2684        mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2685                           0, 0, 0,
2686                           ref_picture, pix_op,
2687                           s->mv[dir][0][0], s->mv[dir][0][1],
2688                           2 * block_s, mb_y);
2689        break;
2690    case MV_TYPE_8X8:
2691        mx = 0;
2692        my = 0;
2693        for (i = 0; i < 4; i++) {
2694            hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2695                               s->linesize) * block_s,
2696                               ref_picture[0], 0, 0,
2697                               (2 * mb_x + (i & 1)) * block_s,
2698                               (2 * mb_y + (i >> 1)) * block_s,
2699                               s->width, s->height, s->linesize,
2700                               s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2701                               block_s, block_s, pix_op,
2702                               s->mv[dir][i][0], s->mv[dir][i][1]);
2703
2704            mx += s->mv[dir][i][0];
2705            my += s->mv[dir][i][1];
2706        }
2707
2708        if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2709            chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2710                                     pix_op, mx, my);
2711        break;
2712    case MV_TYPE_FIELD:
2713        if (s->picture_structure == PICT_FRAME) {
2714            /* top field */
2715            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2716                               1, 0, s->field_select[dir][0],
2717                               ref_picture, pix_op,
2718                               s->mv[dir][0][0], s->mv[dir][0][1],
2719                               block_s, mb_y);
2720            /* bottom field */
2721            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2722                               1, 1, s->field_select[dir][1],
2723                               ref_picture, pix_op,
2724                               s->mv[dir][1][0], s->mv[dir][1][1],
2725                               block_s, mb_y);
2726        } else {
2727            if (s->picture_structure != s->field_select[dir][0] + 1 &&
2728                s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2729                ref_picture = s->current_picture_ptr->f->data;
2730
2731            }
2732            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2733                               0, 0, s->field_select[dir][0],
2734                               ref_picture, pix_op,
2735                               s->mv[dir][0][0],
2736                               s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2737            }
2738        break;
2739    case MV_TYPE_16X8:
2740        for (i = 0; i < 2; i++) {
2741            uint8_t **ref2picture;
2742
2743            if (s->picture_structure == s->field_select[dir][i] + 1 ||
2744                s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2745                ref2picture = ref_picture;
2746            } else {
2747                ref2picture = s->current_picture_ptr->f->data;
2748            }
2749
2750            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2751                               0, 0, s->field_select[dir][i],
2752                               ref2picture, pix_op,
2753                               s->mv[dir][i][0], s->mv[dir][i][1] +
2754                               2 * block_s * i, block_s, mb_y >> 1);
2755
2756            dest_y  +=  2 * block_s *  s->linesize;
2757            dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2758            dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2759        }
2760        break;
2761    case MV_TYPE_DMV:
2762        if (s->picture_structure == PICT_FRAME) {
2763            for (i = 0; i < 2; i++) {
2764                int j;
2765                for (j = 0; j < 2; j++) {
2766                    mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2767                                       1, j, j ^ i,
2768                                       ref_picture, pix_op,
2769                                       s->mv[dir][2 * i + j][0],
2770                                       s->mv[dir][2 * i + j][1],
2771                                       block_s, mb_y);
2772                }
2773                pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2774            }
2775        } else {
2776            for (i = 0; i < 2; i++) {
2777                mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2778                                   0, 0, s->picture_structure != i + 1,
2779                                   ref_picture, pix_op,
2780                                   s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2781                                   2 * block_s, mb_y >> 1);
2782
2783                // after put we make avg of the same block
2784                pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2785
2786                // opposite parity is always in the same
2787                // frame if this is second field
2788                if (!s->first_field) {
2789                    ref_picture = s->current_picture_ptr->f->data;
2790                }
2791            }
2792        }
2793        break;
2794    default:
2795        av_assert2(0);
2796    }
2797}
2798
2799/**
2800 * find the lowest MB row referenced in the MVs
2801 */
2802int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2803{
2804    int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2805    int my, off, i, mvs;
2806
2807    if (s->picture_structure != PICT_FRAME || s->mcsel)
2808        goto unhandled;
2809
2810    switch (s->mv_type) {
2811        case MV_TYPE_16X16:
2812            mvs = 1;
2813            break;
2814        case MV_TYPE_16X8:
2815            mvs = 2;
2816            break;
2817        case MV_TYPE_8X8:
2818            mvs = 4;
2819            break;
2820        default:
2821            goto unhandled;
2822    }
2823
2824    for (i = 0; i < mvs; i++) {
2825        my = s->mv[dir][i][1]<<qpel_shift;
2826        my_max = FFMAX(my_max, my);
2827        my_min = FFMIN(my_min, my);
2828    }
2829
2830    off = (FFMAX(-my_min, my_max) + 63) >> 6;
2831
2832    return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2833unhandled:
2834    return s->mb_height-1;
2835}
2836
2837/* put block[] to dest[] */
2838static inline void put_dct(MpegEncContext *s,
2839                           int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2840{
2841    s->dct_unquantize_intra(s, block, i, qscale);
2842    s->idsp.idct_put(dest, line_size, block);
2843}
2844
2845/* add block[] to dest[] */
2846static inline void add_dct(MpegEncContext *s,
2847                           int16_t *block, int i, uint8_t *dest, int line_size)
2848{
2849    if (s->block_last_index[i] >= 0) {
2850        s->idsp.idct_add(dest, line_size, block);
2851    }
2852}
2853
2854static inline void add_dequant_dct(MpegEncContext *s,
2855                           int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2856{
2857    if (s->block_last_index[i] >= 0) {
2858        s->dct_unquantize_inter(s, block, i, qscale);
2859
2860        s->idsp.idct_add(dest, line_size, block);
2861    }
2862}
2863
2864/**
2865 * Clean dc, ac, coded_block for the current non-intra MB.
2866 */
2867void ff_clean_intra_table_entries(MpegEncContext *s)
2868{
2869    int wrap = s->b8_stride;
2870    int xy = s->block_index[0];
2871
2872    s->dc_val[0][xy           ] =
2873    s->dc_val[0][xy + 1       ] =
2874    s->dc_val[0][xy     + wrap] =
2875    s->dc_val[0][xy + 1 + wrap] = 1024;
2876    /* ac pred */
2877    memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
2878    memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2879    if (s->msmpeg4_version>=3) {
2880        s->coded_block[xy           ] =
2881        s->coded_block[xy + 1       ] =
2882        s->coded_block[xy     + wrap] =
2883        s->coded_block[xy + 1 + wrap] = 0;
2884    }
2885    /* chroma */
2886    wrap = s->mb_stride;
2887    xy = s->mb_x + s->mb_y * wrap;
2888    s->dc_val[1][xy] =
2889    s->dc_val[2][xy] = 1024;
2890    /* ac pred */
2891    memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2892    memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2893
2894    s->mbintra_table[xy]= 0;
2895}
2896
2897/* generic function called after a macroblock has been parsed by the
2898   decoder or after it has been encoded by the encoder.
2899
2900   Important variables used:
2901   s->mb_intra : true if intra macroblock
2902   s->mv_dir   : motion vector direction
2903   s->mv_type  : motion vector type
2904   s->mv       : motion vector
2905   s->interlaced_dct : true if interlaced dct used (mpeg2)
2906 */
2907static av_always_inline
2908void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2909                            int lowres_flag, int is_mpeg12)
2910{
2911    const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2912
2913    if (CONFIG_XVMC &&
2914        s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2915        s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2916        return;
2917    }
2918
2919    if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2920       /* print DCT coefficients */
2921       int i,j;
2922       av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2923       for(i=0; i<6; i++){
2924           for(j=0; j<64; j++){
2925               av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2926                      block[i][s->idsp.idct_permutation[j]]);
2927           }
2928           av_log(s->avctx, AV_LOG_DEBUG, "\n");
2929       }
2930    }
2931
2932    s->current_picture.qscale_table[mb_xy] = s->qscale;
2933
2934    /* update DC predictors for P macroblocks */
2935    if (!s->mb_intra) {
2936        if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2937            if(s->mbintra_table[mb_xy])
2938                ff_clean_intra_table_entries(s);
2939        } else {
2940            s->last_dc[0] =
2941            s->last_dc[1] =
2942            s->last_dc[2] = 128 << s->intra_dc_precision;
2943        }
2944    }
2945    else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2946        s->mbintra_table[mb_xy]=1;
2947
2948    if (   (s->flags&CODEC_FLAG_PSNR)
2949        || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
2950        || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2951        uint8_t *dest_y, *dest_cb, *dest_cr;
2952        int dct_linesize, dct_offset;
2953        op_pixels_func (*op_pix)[4];
2954        qpel_mc_func (*op_qpix)[16];
2955        const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2956        const int uvlinesize = s->current_picture.f->linesize[1];
2957        const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2958        const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2959
2960        /* avoid copy if macroblock skipped in last frame too */
2961        /* skip only during decoding as we might trash the buffers during encoding a bit */
2962        if(!s->encoding){
2963            uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2964
2965            if (s->mb_skipped) {
2966                s->mb_skipped= 0;
2967                av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2968                *mbskip_ptr = 1;
2969            } else if(!s->current_picture.reference) {
2970                *mbskip_ptr = 1;
2971            } else{
2972                *mbskip_ptr = 0; /* not skipped */
2973            }
2974        }
2975
2976        dct_linesize = linesize << s->interlaced_dct;
2977        dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
2978
2979        if(readable){
2980            dest_y=  s->dest[0];
2981            dest_cb= s->dest[1];
2982            dest_cr= s->dest[2];
2983        }else{
2984            dest_y = s->b_scratchpad;
2985            dest_cb= s->b_scratchpad+16*linesize;
2986            dest_cr= s->b_scratchpad+32*linesize;
2987        }
2988
2989        if (!s->mb_intra) {
2990            /* motion handling */
2991            /* decoding or more than one mb_type (MC was already done otherwise) */
2992            if(!s->encoding){
2993
2994                if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2995                    if (s->mv_dir & MV_DIR_FORWARD) {
2996                        ff_thread_await_progress(&s->last_picture_ptr->tf,
2997                                                 ff_MPV_lowest_referenced_row(s, 0),
2998                                                 0);
2999                    }
3000                    if (s->mv_dir & MV_DIR_BACKWARD) {
3001                        ff_thread_await_progress(&s->next_picture_ptr->tf,
3002                                                 ff_MPV_lowest_referenced_row(s, 1),
3003                                                 0);
3004                    }
3005                }
3006
3007                if(lowres_flag){
3008                    h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3009
3010                    if (s->mv_dir & MV_DIR_FORWARD) {
3011                        MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3012                        op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3013                    }
3014                    if (s->mv_dir & MV_DIR_BACKWARD) {
3015                        MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3016                    }
3017                }else{
3018                    op_qpix = s->me.qpel_put;
3019                    if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3020                        op_pix = s->hdsp.put_pixels_tab;
3021                    }else{
3022                        op_pix = s->hdsp.put_no_rnd_pixels_tab;
3023                    }
3024                    if (s->mv_dir & MV_DIR_FORWARD) {
3025                        ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3026                        op_pix = s->hdsp.avg_pixels_tab;
3027                        op_qpix= s->me.qpel_avg;
3028                    }
3029                    if (s->mv_dir & MV_DIR_BACKWARD) {
3030                        ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3031                    }
3032                }
3033            }
3034
3035            /* skip dequant / idct if we are really late ;) */
3036            if(s->avctx->skip_idct){
3037                if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3038                   ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3039                   || s->avctx->skip_idct >= AVDISCARD_ALL)
3040                    goto skip_idct;
3041            }
3042
3043            /* add dct residue */
3044            if(s->encoding || !(   s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3045                                || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3046                add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
3047                add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
3048                add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
3049                add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3050
3051                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3052                    if (s->chroma_y_shift){
3053                        add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3054                        add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3055                    }else{
3056                        dct_linesize >>= 1;
3057                        dct_offset >>=1;
3058                        add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
3059                        add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
3060                        add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3061                        add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3062                    }
3063                }
3064            } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3065                add_dct(s, block[0], 0, dest_y                          , dct_linesize);
3066                add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
3067                add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
3068                add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3069
3070                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3071                    if(s->chroma_y_shift){//Chroma420
3072                        add_dct(s, block[4], 4, dest_cb, uvlinesize);
3073                        add_dct(s, block[5], 5, dest_cr, uvlinesize);
3074                    }else{
3075                        //chroma422
3076                        dct_linesize = uvlinesize << s->interlaced_dct;
3077                        dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3078
3079                        add_dct(s, block[4], 4, dest_cb, dct_linesize);
3080                        add_dct(s, block[5], 5, dest_cr, dct_linesize);
3081                        add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3082                        add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3083                        if(!s->chroma_x_shift){//Chroma444
3084                            add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3085                            add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3086                            add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3087                            add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3088                        }
3089                    }
3090                }//fi gray
3091            }
3092            else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3093                ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3094            }
3095        } else {
3096            /* dct only in intra block */
3097            if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3098                put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
3099                put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
3100                put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
3101                put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3102
3103                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3104                    if(s->chroma_y_shift){
3105                        put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3106                        put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3107                    }else{
3108                        dct_offset >>=1;
3109                        dct_linesize >>=1;
3110                        put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
3111                        put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
3112                        put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3113                        put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3114                    }
3115                }
3116            }else{
3117                s->idsp.idct_put(dest_y,                           dct_linesize, block[0]);
3118                s->idsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
3119                s->idsp.idct_put(dest_y + dct_offset,              dct_linesize, block[2]);
3120                s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3121
3122                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3123                    if(s->chroma_y_shift){
3124                        s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3125                        s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3126                    }else{
3127
3128                        dct_linesize = uvlinesize << s->interlaced_dct;
3129                        dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3130
3131                        s->idsp.idct_put(dest_cb,              dct_linesize, block[4]);
3132                        s->idsp.idct_put(dest_cr,              dct_linesize, block[5]);
3133                        s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3134                        s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3135                        if(!s->chroma_x_shift){//Chroma444
3136                            s->idsp.idct_put(dest_cb + block_size,              dct_linesize, block[8]);
3137                            s->idsp.idct_put(dest_cr + block_size,              dct_linesize, block[9]);
3138                            s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3139                            s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3140                        }
3141                    }
3142                }//gray
3143            }
3144        }
3145skip_idct:
3146        if(!readable){
3147            s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
3148            s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3149            s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3150        }
3151    }
3152}
3153
3154void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3155#if !CONFIG_SMALL
3156    if(s->out_format == FMT_MPEG1) {
3157        if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3158        else                 MPV_decode_mb_internal(s, block, 0, 1);
3159    } else
3160#endif
3161    if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3162    else                  MPV_decode_mb_internal(s, block, 0, 0);
3163}
3164
3165void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3166{
3167    ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3168                       s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3169                       s->first_field, s->low_delay);
3170}
3171
3172void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3173    const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3174    const int uvlinesize = s->current_picture.f->linesize[1];
3175    const int mb_size= 4 - s->avctx->lowres;
3176
3177    s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
3178    s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
3179    s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3180    s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3181    s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3182    s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3183    //block_index is not used by mpeg2, so it is not affected by chroma_format
3184
3185    s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) <<  mb_size);
3186    s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3187    s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3188
3189    if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3190    {
3191        if(s->picture_structure==PICT_FRAME){
3192        s->dest[0] += s->mb_y *   linesize << mb_size;
3193        s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3194        s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3195        }else{
3196            s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
3197            s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3198            s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3199            av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3200        }
3201    }
3202}
3203
3204/**
3205 * Permute an 8x8 block.
3206 * @param block the block which will be permuted according to the given permutation vector
3207 * @param permutation the permutation vector
3208 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3209 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3210 *                  (inverse) permutated to scantable order!
3211 */
3212void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3213{
3214    int i;
3215    int16_t temp[64];
3216
3217    if(last<=0) return;
3218    //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3219
3220    for(i=0; i<=last; i++){
3221        const int j= scantable[i];
3222        temp[j]= block[j];
3223        block[j]=0;
3224    }
3225
3226    for(i=0; i<=last; i++){
3227        const int j= scantable[i];
3228        const int perm_j= permutation[j];
3229        block[perm_j]= temp[j];
3230    }
3231}
3232
3233void ff_mpeg_flush(AVCodecContext *avctx){
3234    int i;
3235    MpegEncContext *s = avctx->priv_data;
3236
3237    if(s==NULL || s->picture==NULL)
3238        return;
3239
3240    for (i = 0; i < MAX_PICTURE_COUNT; i++)
3241        ff_mpeg_unref_picture(s, &s->picture[i]);
3242    s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3243
3244    ff_mpeg_unref_picture(s, &s->current_picture);
3245    ff_mpeg_unref_picture(s, &s->last_picture);
3246    ff_mpeg_unref_picture(s, &s->next_picture);
3247
3248    s->mb_x= s->mb_y= 0;
3249    s->closed_gop= 0;
3250
3251    s->parse_context.state= -1;
3252    s->parse_context.frame_start_found= 0;
3253    s->parse_context.overread= 0;
3254    s->parse_context.overread_index= 0;
3255    s->parse_context.index= 0;
3256    s->parse_context.last_index= 0;
3257    s->bitstream_buffer_size=0;
3258    s->pp_time=0;
3259}
3260
3261/**
3262 * set qscale and update qscale dependent variables.
3263 */
3264void ff_set_qscale(MpegEncContext * s, int qscale)
3265{
3266    if (qscale < 1)
3267        qscale = 1;
3268    else if (qscale > 31)
3269        qscale = 31;
3270
3271    s->qscale = qscale;
3272    s->chroma_qscale= s->chroma_qscale_table[qscale];
3273
3274    s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3275    s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3276}
3277
3278void ff_MPV_report_decode_progress(MpegEncContext *s)
3279{
3280    if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3281        ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3282}
3283