1/*
2 * Copyright (c) 2003 The Libav Project
3 *
4 * This file is part of Libav.
5 *
6 * Libav is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * Libav is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with Libav; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21/*
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
31 *
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
37 *
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 *  http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41 */
42#include "internal.h"
43#include "dsputil.h"
44#include "avcodec.h"
45#include "mpegvideo.h"
46#include "h264.h"
47
48#include "h264data.h" //FIXME FIXME FIXME
49
50#include "h264_mvpred.h"
51#include "golomb.h"
52#include "rectangle.h"
53#include "vdpau_internal.h"
54
55#if CONFIG_ZLIB
56#include <zlib.h>
57#endif
58
59#include "svq1.h"
60
61/**
62 * @file
63 * svq3 decoder.
64 */
65
66typedef struct {
67    H264Context h;
68    int halfpel_flag;
69    int thirdpel_flag;
70    int unknown_flag;
71    int next_slice_index;
72    uint32_t watermark_key;
73} SVQ3Context;
74
75#define FULLPEL_MODE  1
76#define HALFPEL_MODE  2
77#define THIRDPEL_MODE 3
78#define PREDICT_MODE  4
79
80/* dual scan (from some older h264 draft)
81 o-->o-->o   o
82         |  /|
83 o   o   o / o
84 | / |   |/  |
85 o   o   o   o
86   /
87 o-->o-->o-->o
88*/
89static const uint8_t svq3_scan[16] = {
90    0+0*4, 1+0*4, 2+0*4, 2+1*4,
91    2+2*4, 3+0*4, 3+1*4, 3+2*4,
92    0+1*4, 0+2*4, 1+1*4, 1+2*4,
93    0+3*4, 1+3*4, 2+3*4, 3+3*4,
94};
95
96static const uint8_t svq3_pred_0[25][2] = {
97    { 0, 0 },
98    { 1, 0 }, { 0, 1 },
99    { 0, 2 }, { 1, 1 }, { 2, 0 },
100    { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
101    { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
102    { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
103    { 2, 4 }, { 3, 3 }, { 4, 2 },
104    { 4, 3 }, { 3, 4 },
105    { 4, 4 }
106};
107
108static const int8_t svq3_pred_1[6][6][5] = {
109    { { 2,-1,-1,-1,-1 }, { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 },
110      { 2, 1,-1,-1,-1 }, { 1, 2,-1,-1,-1 }, { 1, 2,-1,-1,-1 } },
111    { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
112      { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
113    { { 2, 0,-1,-1,-1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
114      { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
115    { { 2, 0,-1,-1,-1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
116      { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
117    { { 0, 2,-1,-1,-1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
118      { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
119    { { 0, 2,-1,-1,-1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
120      { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
121};
122
123static const struct { uint8_t run; uint8_t level; } svq3_dct_tables[2][16] = {
124    { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
125      { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
126    { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
127      { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
128};
129
130static const uint32_t svq3_dequant_coeff[32] = {
131     3881,  4351,  4890,  5481,  6154,  6914,  7761,  8718,
132     9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
133    24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
134    61694, 68745, 77615, 89113,100253,109366,126635,141533
135};
136
137void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp){
138    const int qmul = svq3_dequant_coeff[qp];
139#define stride 16
140    int i;
141    int temp[16];
142    static const uint8_t x_offset[4]={0, 1*stride, 4*stride, 5*stride};
143
144    for(i=0; i<4; i++){
145        const int z0 = 13*(input[4*i+0] +    input[4*i+2]);
146        const int z1 = 13*(input[4*i+0] -    input[4*i+2]);
147        const int z2 =  7* input[4*i+1] - 17*input[4*i+3];
148        const int z3 = 17* input[4*i+1] +  7*input[4*i+3];
149
150        temp[4*i+0] = z0+z3;
151        temp[4*i+1] = z1+z2;
152        temp[4*i+2] = z1-z2;
153        temp[4*i+3] = z0-z3;
154    }
155
156    for(i=0; i<4; i++){
157        const int offset= x_offset[i];
158        const int z0= 13*(temp[4*0+i] +    temp[4*2+i]);
159        const int z1= 13*(temp[4*0+i] -    temp[4*2+i]);
160        const int z2=  7* temp[4*1+i] - 17*temp[4*3+i];
161        const int z3= 17* temp[4*1+i] +  7*temp[4*3+i];
162
163        output[stride* 0+offset] = ((z0 + z3)*qmul + 0x80000) >> 20;
164        output[stride* 2+offset] = ((z1 + z2)*qmul + 0x80000) >> 20;
165        output[stride* 8+offset] = ((z1 - z2)*qmul + 0x80000) >> 20;
166        output[stride*10+offset] = ((z0 - z3)*qmul + 0x80000) >> 20;
167    }
168}
169#undef stride
170
171void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp,
172                            int dc)
173{
174    const int qmul = svq3_dequant_coeff[qp];
175    int i;
176
177    if (dc) {
178        dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
179        block[0] = 0;
180    }
181
182    for (i = 0; i < 4; i++) {
183        const int z0 = 13*(block[0 + 4*i] +    block[2 + 4*i]);
184        const int z1 = 13*(block[0 + 4*i] -    block[2 + 4*i]);
185        const int z2 =  7* block[1 + 4*i] - 17*block[3 + 4*i];
186        const int z3 = 17* block[1 + 4*i] +  7*block[3 + 4*i];
187
188        block[0 + 4*i] = z0 + z3;
189        block[1 + 4*i] = z1 + z2;
190        block[2 + 4*i] = z1 - z2;
191        block[3 + 4*i] = z0 - z3;
192    }
193
194    for (i = 0; i < 4; i++) {
195        const int z0 = 13*(block[i + 4*0] +    block[i + 4*2]);
196        const int z1 = 13*(block[i + 4*0] -    block[i + 4*2]);
197        const int z2 =  7* block[i + 4*1] - 17*block[i + 4*3];
198        const int z3 = 17* block[i + 4*1] +  7*block[i + 4*3];
199        const int rr = (dc + 0x80000);
200
201        dst[i + stride*0] = av_clip_uint8( dst[i + stride*0] + (((z0 + z3)*qmul + rr) >> 20) );
202        dst[i + stride*1] = av_clip_uint8( dst[i + stride*1] + (((z1 + z2)*qmul + rr) >> 20) );
203        dst[i + stride*2] = av_clip_uint8( dst[i + stride*2] + (((z1 - z2)*qmul + rr) >> 20) );
204        dst[i + stride*3] = av_clip_uint8( dst[i + stride*3] + (((z0 - z3)*qmul + rr) >> 20) );
205    }
206}
207
208static inline int svq3_decode_block(GetBitContext *gb, DCTELEM *block,
209                                    int index, const int type)
210{
211    static const uint8_t *const scan_patterns[4] =
212    { luma_dc_zigzag_scan, zigzag_scan, svq3_scan, chroma_dc_scan };
213
214    int run, level, sign, vlc, limit;
215    const int intra = (3 * type) >> 2;
216    const uint8_t *const scan = scan_patterns[type];
217
218    for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
219        for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
220
221          if (vlc == INVALID_VLC)
222              return -1;
223
224          sign = (vlc & 0x1) - 1;
225          vlc  = (vlc + 1) >> 1;
226
227          if (type == 3) {
228              if (vlc < 3) {
229                  run   = 0;
230                  level = vlc;
231              } else if (vlc < 4) {
232                  run   = 1;
233                  level = 1;
234              } else {
235                  run   = (vlc & 0x3);
236                  level = ((vlc + 9) >> 2) - run;
237              }
238          } else {
239              if (vlc < 16) {
240                  run   = svq3_dct_tables[intra][vlc].run;
241                  level = svq3_dct_tables[intra][vlc].level;
242              } else if (intra) {
243                  run   = (vlc & 0x7);
244                  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
245              } else {
246                  run   = (vlc & 0xF);
247                  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
248              }
249          }
250
251          if ((index += run) >= limit)
252              return -1;
253
254          block[scan[index]] = (level ^ sign) - sign;
255        }
256
257        if (type != 2) {
258            break;
259        }
260    }
261
262    return 0;
263}
264
265static inline void svq3_mc_dir_part(MpegEncContext *s,
266                                    int x, int y, int width, int height,
267                                    int mx, int my, int dxy,
268                                    int thirdpel, int dir, int avg)
269{
270    const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
271    uint8_t *src, *dest;
272    int i, emu = 0;
273    int blocksize = 2 - (width>>3); //16->0, 8->1, 4->2
274
275    mx += x;
276    my += y;
277
278    if (mx < 0 || mx >= (s->h_edge_pos - width  - 1) ||
279        my < 0 || my >= (s->v_edge_pos - height - 1)) {
280
281        if ((s->flags & CODEC_FLAG_EMU_EDGE)) {
282            emu = 1;
283        }
284
285        mx = av_clip (mx, -16, (s->h_edge_pos - width  + 15));
286        my = av_clip (my, -16, (s->v_edge_pos - height + 15));
287    }
288
289    /* form component predictions */
290    dest = s->current_picture.f.data[0] + x + y*s->linesize;
291    src  = pic->f.data[0] + mx + my*s->linesize;
292
293    if (emu) {
294        s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, (width + 1), (height + 1),
295                            mx, my, s->h_edge_pos, s->v_edge_pos);
296        src = s->edge_emu_buffer;
297    }
298    if (thirdpel)
299        (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize, width, height);
300    else
301        (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize, height);
302
303    if (!(s->flags & CODEC_FLAG_GRAY)) {
304        mx     = (mx + (mx < (int) x)) >> 1;
305        my     = (my + (my < (int) y)) >> 1;
306        width  = (width  >> 1);
307        height = (height >> 1);
308        blocksize++;
309
310        for (i = 1; i < 3; i++) {
311            dest = s->current_picture.f.data[i] + (x >> 1) + (y >> 1) * s->uvlinesize;
312            src  = pic->f.data[i] + mx + my * s->uvlinesize;
313
314            if (emu) {
315                s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->uvlinesize, (width + 1), (height + 1),
316                                    mx, my, (s->h_edge_pos >> 1), (s->v_edge_pos >> 1));
317                src = s->edge_emu_buffer;
318            }
319            if (thirdpel)
320                (avg ? s->dsp.avg_tpel_pixels_tab : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->uvlinesize, width, height);
321            else
322                (avg ? s->dsp.avg_pixels_tab : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->uvlinesize, height);
323        }
324    }
325}
326
327static inline int svq3_mc_dir(H264Context *h, int size, int mode, int dir,
328                              int avg)
329{
330    int i, j, k, mx, my, dx, dy, x, y;
331    MpegEncContext *const s = (MpegEncContext *) h;
332    const int part_width  = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
333    const int part_height = 16 >> ((unsigned) (size + 1) / 3);
334    const int extra_width = (mode == PREDICT_MODE) ? -16*6 : 0;
335    const int h_edge_pos  = 6*(s->h_edge_pos - part_width ) - extra_width;
336    const int v_edge_pos  = 6*(s->v_edge_pos - part_height) - extra_width;
337
338    for (i = 0; i < 16; i += part_height) {
339        for (j = 0; j < 16; j += part_width) {
340            const int b_xy = (4*s->mb_x + (j >> 2)) + (4*s->mb_y + (i >> 2))*h->b_stride;
341            int dxy;
342            x = 16*s->mb_x + j;
343            y = 16*s->mb_y + i;
344            k = ((j >> 2) & 1) + ((i >> 1) & 2) + ((j >> 1) & 4) + (i & 8);
345
346            if (mode != PREDICT_MODE) {
347                pred_motion(h, k, (part_width >> 2), dir, 1, &mx, &my);
348            } else {
349                mx = s->next_picture.f.motion_val[0][b_xy][0] << 1;
350                my = s->next_picture.f.motion_val[0][b_xy][1] << 1;
351
352                if (dir == 0) {
353                    mx = ((mx * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
354                    my = ((my * h->frame_num_offset) / h->prev_frame_num_offset + 1) >> 1;
355                } else {
356                    mx = ((mx * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
357                    my = ((my * (h->frame_num_offset - h->prev_frame_num_offset)) / h->prev_frame_num_offset + 1) >> 1;
358                }
359            }
360
361            /* clip motion vector prediction to frame border */
362            mx = av_clip(mx, extra_width - 6*x, h_edge_pos - 6*x);
363            my = av_clip(my, extra_width - 6*y, v_edge_pos - 6*y);
364
365            /* get (optional) motion vector differential */
366            if (mode == PREDICT_MODE) {
367                dx = dy = 0;
368            } else {
369                dy = svq3_get_se_golomb(&s->gb);
370                dx = svq3_get_se_golomb(&s->gb);
371
372                if (dx == INVALID_VLC || dy == INVALID_VLC) {
373                    av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
374                    return -1;
375                }
376            }
377
378            /* compute motion vector */
379            if (mode == THIRDPEL_MODE) {
380                int fx, fy;
381                mx  = ((mx + 1)>>1) + dx;
382                my  = ((my + 1)>>1) + dy;
383                fx  = ((unsigned)(mx + 0x3000))/3 - 0x1000;
384                fy  = ((unsigned)(my + 0x3000))/3 - 0x1000;
385                dxy = (mx - 3*fx) + 4*(my - 3*fy);
386
387                svq3_mc_dir_part(s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg);
388                mx += mx;
389                my += my;
390            } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
391                mx  = ((unsigned)(mx + 1 + 0x3000))/3 + dx - 0x1000;
392                my  = ((unsigned)(my + 1 + 0x3000))/3 + dy - 0x1000;
393                dxy = (mx&1) + 2*(my&1);
394
395                svq3_mc_dir_part(s, x, y, part_width, part_height, mx>>1, my>>1, dxy, 0, dir, avg);
396                mx *= 3;
397                my *= 3;
398            } else {
399                mx = ((unsigned)(mx + 3 + 0x6000))/6 + dx - 0x1000;
400                my = ((unsigned)(my + 3 + 0x6000))/6 + dy - 0x1000;
401
402                svq3_mc_dir_part(s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg);
403                mx *= 6;
404                my *= 6;
405            }
406
407            /* update mv_cache */
408            if (mode != PREDICT_MODE) {
409                int32_t mv = pack16to32(mx,my);
410
411                if (part_height == 8 && i < 8) {
412                    AV_WN32A(h->mv_cache[dir][scan8[k] + 1*8], mv);
413
414                    if (part_width == 8 && j < 8) {
415                        AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1*8], mv);
416                    }
417                }
418                if (part_width == 8 && j < 8) {
419                    AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
420                }
421                if (part_width == 4 || part_height == 4) {
422                    AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
423                }
424            }
425
426            /* write back motion vectors */
427            fill_rectangle(s->current_picture.f.motion_val[dir][b_xy],
428                           part_width >> 2, part_height >> 2, h->b_stride,
429                           pack16to32(mx, my), 4);
430        }
431    }
432
433    return 0;
434}
435
436static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
437{
438    H264Context *h = &svq3->h;
439    int i, j, k, m, dir, mode;
440    int cbp = 0;
441    uint32_t vlc;
442    int8_t *top, *left;
443    MpegEncContext *const s = (MpegEncContext *) h;
444    const int mb_xy = h->mb_xy;
445    const int b_xy  = 4*s->mb_x + 4*s->mb_y*h->b_stride;
446
447    h->top_samples_available      = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
448    h->left_samples_available     = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
449    h->topright_samples_available = 0xFFFF;
450
451    if (mb_type == 0) {           /* SKIP */
452        if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.f.mb_type[mb_xy] == -1) {
453            svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
454
455            if (s->pict_type == AV_PICTURE_TYPE_B) {
456                svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
457            }
458
459            mb_type = MB_TYPE_SKIP;
460        } else {
461            mb_type = FFMIN(s->next_picture.f.mb_type[mb_xy], 6);
462            if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
463                return -1;
464            if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
465                return -1;
466
467            mb_type = MB_TYPE_16x16;
468        }
469    } else if (mb_type < 8) {     /* INTER */
470        if (svq3->thirdpel_flag && svq3->halfpel_flag == !get_bits1 (&s->gb)) {
471            mode = THIRDPEL_MODE;
472        } else if (svq3->halfpel_flag && svq3->thirdpel_flag == !get_bits1 (&s->gb)) {
473            mode = HALFPEL_MODE;
474        } else {
475            mode = FULLPEL_MODE;
476        }
477
478        /* fill caches */
479        /* note ref_cache should contain here:
480            ????????
481            ???11111
482            N??11111
483            N??11111
484            N??11111
485        */
486
487        for (m = 0; m < 2; m++) {
488            if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6] != -1) {
489                for (i = 0; i < 4; i++) {
490                    AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i*8], s->current_picture.f.motion_val[m][b_xy - 1 + i*h->b_stride]);
491                }
492            } else {
493                for (i = 0; i < 4; i++) {
494                    AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i*8]);
495                }
496            }
497            if (s->mb_y > 0) {
498                memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.f.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
499                memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
500
501                if (s->mb_x < (s->mb_width - 1)) {
502                    AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1*8], s->current_picture.f.motion_val[m][b_xy - h->b_stride + 4]);
503                    h->ref_cache[m][scan8[0] + 4 - 1*8] =
504                        (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1]+6] == -1 ||
505                         h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride    ]  ] == -1) ? PART_NOT_AVAILABLE : 1;
506                }else
507                    h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
508                if (s->mb_x > 0) {
509                    AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1*8], s->current_picture.f.motion_val[m][b_xy - h->b_stride - 1]);
510                    h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] == -1) ? PART_NOT_AVAILABLE : 1;
511                }else
512                    h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
513            }else
514                memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
515
516            if (s->pict_type != AV_PICTURE_TYPE_B)
517                break;
518        }
519
520        /* decode motion vector(s) and form prediction(s) */
521        if (s->pict_type == AV_PICTURE_TYPE_P) {
522            if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0)
523                return -1;
524        } else {        /* AV_PICTURE_TYPE_B */
525            if (mb_type != 2) {
526                if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
527                    return -1;
528            } else {
529                for (i = 0; i < 4; i++) {
530                    memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
531                }
532            }
533            if (mb_type != 1) {
534                if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0)
535                    return -1;
536            } else {
537                for (i = 0; i < 4; i++) {
538                    memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
539                }
540            }
541        }
542
543        mb_type = MB_TYPE_16x16;
544    } else if (mb_type == 8 || mb_type == 33) {   /* INTRA4x4 */
545        memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
546
547        if (mb_type == 8) {
548            if (s->mb_x > 0) {
549                for (i = 0; i < 4; i++) {
550                    h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6-i];
551                }
552                if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
553                    h->left_samples_available = 0x5F5F;
554                }
555            }
556            if (s->mb_y > 0) {
557                h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+0];
558                h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+1];
559                h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+2];
560                h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+3];
561
562                if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
563                    h->top_samples_available = 0x33FF;
564                }
565            }
566
567            /* decode prediction codes for luma blocks */
568            for (i = 0; i < 16; i+=2) {
569                vlc = svq3_get_ue_golomb(&s->gb);
570
571                if (vlc >= 25){
572                    av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
573                    return -1;
574                }
575
576                left    = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
577                top     = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
578
579                left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
580                left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
581
582                if (left[1] == -1 || left[2] == -1){
583                    av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
584                    return -1;
585                }
586            }
587        } else {    /* mb_type == 33, DC_128_PRED block type */
588            for (i = 0; i < 4; i++) {
589                memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
590            }
591        }
592
593        write_back_intra_pred_mode(h);
594
595        if (mb_type == 8) {
596            ff_h264_check_intra4x4_pred_mode(h);
597
598            h->top_samples_available  = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
599            h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
600        } else {
601            for (i = 0; i < 4; i++) {
602                memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
603            }
604
605            h->top_samples_available  = 0x33FF;
606            h->left_samples_available = 0x5F5F;
607        }
608
609        mb_type = MB_TYPE_INTRA4x4;
610    } else {                      /* INTRA16x16 */
611        dir = i_mb_type_info[mb_type - 8].pred_mode;
612        dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
613
614        if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1){
615            av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
616            return -1;
617        }
618
619        cbp = i_mb_type_info[mb_type - 8].cbp;
620        mb_type = MB_TYPE_INTRA16x16;
621    }
622
623    if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
624        for (i = 0; i < 4; i++) {
625            memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
626        }
627        if (s->pict_type == AV_PICTURE_TYPE_B) {
628            for (i = 0; i < 4; i++) {
629                memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
630            }
631        }
632    }
633    if (!IS_INTRA4x4(mb_type)) {
634        memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy], DC_PRED, 8);
635    }
636    if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
637        memset(h->non_zero_count_cache + 8, 0, 14*8*sizeof(uint8_t));
638        s->dsp.clear_blocks(h->mb+  0);
639        s->dsp.clear_blocks(h->mb+384);
640    }
641
642    if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
643        if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){
644            av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
645            return -1;
646        }
647
648        cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
649    }
650    if (IS_INTRA16x16(mb_type) || (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
651        s->qscale += svq3_get_se_golomb(&s->gb);
652
653        if (s->qscale > 31u){
654            av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
655            return -1;
656        }
657    }
658    if (IS_INTRA16x16(mb_type)) {
659        AV_ZERO128(h->mb_luma_dc[0]+0);
660        AV_ZERO128(h->mb_luma_dc[0]+8);
661        if (svq3_decode_block(&s->gb, h->mb_luma_dc, 0, 1)){
662            av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n");
663            return -1;
664        }
665    }
666
667    if (cbp) {
668        const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
669        const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
670
671        for (i = 0; i < 4; i++) {
672            if ((cbp & (1 << i))) {
673                for (j = 0; j < 4; j++) {
674                    k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
675                    h->non_zero_count_cache[ scan8[k] ] = 1;
676
677                    if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){
678                        av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n");
679                        return -1;
680                    }
681                }
682            }
683        }
684
685        if ((cbp & 0x30)) {
686            for (i = 1; i < 3; ++i) {
687              if (svq3_decode_block(&s->gb, &h->mb[16*16*i], 0, 3)){
688                av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n");
689                return -1;
690              }
691            }
692
693            if ((cbp & 0x20)) {
694                for (i = 1; i < 3; i++) {
695                    for (j = 0; j < 4; j++) {
696                        k = 16*i + j;
697                        h->non_zero_count_cache[ scan8[k] ] = 1;
698
699                        if (svq3_decode_block(&s->gb, &h->mb[16*k], 1, 1)){
700                            av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n");
701                            return -1;
702                        }
703                    }
704                }
705            }
706        }
707    }
708
709    h->cbp= cbp;
710    s->current_picture.f.mb_type[mb_xy] = mb_type;
711
712    if (IS_INTRA(mb_type)) {
713        h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1);
714    }
715
716    return 0;
717}
718
719static int svq3_decode_slice_header(AVCodecContext *avctx)
720{
721    SVQ3Context *svq3 = avctx->priv_data;
722    H264Context *h = &svq3->h;
723    MpegEncContext *s = &h->s;
724    const int mb_xy = h->mb_xy;
725    int i, header;
726
727    header = get_bits(&s->gb, 8);
728
729    if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
730        /* TODO: what? */
731        av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
732        return -1;
733    } else {
734        int length = (header >> 5) & 3;
735
736        svq3->next_slice_index = get_bits_count(&s->gb) + 8*show_bits(&s->gb, 8*length) + 8*length;
737
738        if (svq3->next_slice_index > s->gb.size_in_bits) {
739            av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
740            return -1;
741    }
742
743        s->gb.size_in_bits = svq3->next_slice_index - 8*(length - 1);
744        skip_bits(&s->gb, 8);
745
746        if (svq3->watermark_key) {
747            uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1]);
748            AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb)>>3)+1], header ^ svq3->watermark_key);
749        }
750        if (length > 0) {
751            memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
752                   &s->gb.buffer[s->gb.size_in_bits >> 3], (length - 1));
753        }
754        skip_bits_long(&s->gb, 0);
755    }
756
757    if ((i = svq3_get_ue_golomb(&s->gb)) == INVALID_VLC || i >= 3){
758        av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", i);
759        return -1;
760    }
761
762    h->slice_type = golomb_to_pict_type[i];
763
764    if ((header & 0x9F) == 2) {
765        i = (s->mb_num < 64) ? 6 : (1 + av_log2 (s->mb_num - 1));
766        s->mb_skip_run = get_bits(&s->gb, i) - (s->mb_x + (s->mb_y * s->mb_width));
767    } else {
768        skip_bits1(&s->gb);
769        s->mb_skip_run = 0;
770    }
771
772    h->slice_num = get_bits(&s->gb, 8);
773    s->qscale = get_bits(&s->gb, 5);
774    s->adaptive_quant = get_bits1(&s->gb);
775
776    /* unknown fields */
777    skip_bits1(&s->gb);
778
779    if (svq3->unknown_flag) {
780        skip_bits1(&s->gb);
781    }
782
783    skip_bits1(&s->gb);
784    skip_bits(&s->gb, 2);
785
786    while (get_bits1(&s->gb)) {
787        skip_bits(&s->gb, 8);
788    }
789
790    /* reset intra predictors and invalidate motion vector references */
791    if (s->mb_x > 0) {
792        memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - 1      ]+3, -1, 4*sizeof(int8_t));
793        memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - s->mb_x]  , -1, 8*sizeof(int8_t)*s->mb_x);
794    }
795    if (s->mb_y > 0) {
796        memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy - s->mb_stride], -1, 8*sizeof(int8_t)*(s->mb_width - s->mb_x));
797
798        if (s->mb_x > 0) {
799            h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] = -1;
800        }
801    }
802
803    return 0;
804}
805
806static av_cold int svq3_decode_init(AVCodecContext *avctx)
807{
808    SVQ3Context *svq3 = avctx->priv_data;
809    H264Context *h = &svq3->h;
810    MpegEncContext *s = &h->s;
811    int m;
812    unsigned char *extradata;
813    unsigned char *extradata_end;
814    unsigned int size;
815    int marker_found = 0;
816
817    if (ff_h264_decode_init(avctx) < 0)
818        return -1;
819
820    s->flags  = avctx->flags;
821    s->flags2 = avctx->flags2;
822    s->unrestricted_mv = 1;
823    h->is_complex=1;
824    avctx->pix_fmt = avctx->codec->pix_fmts[0];
825
826    if (!s->context_initialized) {
827        h->chroma_qp[0] = h->chroma_qp[1] = 4;
828
829        svq3->halfpel_flag  = 1;
830        svq3->thirdpel_flag = 1;
831        svq3->unknown_flag  = 0;
832
833        /* prowl for the "SEQH" marker in the extradata */
834        extradata = (unsigned char *)avctx->extradata;
835        extradata_end = avctx->extradata + avctx->extradata_size;
836        if (extradata) {
837            for (m = 0; m + 8 < avctx->extradata_size; m++) {
838                if (!memcmp(extradata, "SEQH", 4)) {
839                    marker_found = 1;
840                    break;
841                }
842                extradata++;
843            }
844        }
845
846        /* if a match was found, parse the extra data */
847        if (marker_found) {
848
849            GetBitContext gb;
850            int frame_size_code;
851
852            size = AV_RB32(&extradata[4]);
853            if (size > extradata_end - extradata - 8)
854                return AVERROR_INVALIDDATA;
855            init_get_bits(&gb, extradata + 8, size*8);
856
857            /* 'frame size code' and optional 'width, height' */
858            frame_size_code = get_bits(&gb, 3);
859            switch (frame_size_code) {
860                case 0: avctx->width = 160; avctx->height = 120; break;
861                case 1: avctx->width = 128; avctx->height =  96; break;
862                case 2: avctx->width = 176; avctx->height = 144; break;
863                case 3: avctx->width = 352; avctx->height = 288; break;
864                case 4: avctx->width = 704; avctx->height = 576; break;
865                case 5: avctx->width = 240; avctx->height = 180; break;
866                case 6: avctx->width = 320; avctx->height = 240; break;
867                case 7:
868                    avctx->width  = get_bits(&gb, 12);
869                    avctx->height = get_bits(&gb, 12);
870                    break;
871            }
872
873            svq3->halfpel_flag  = get_bits1(&gb);
874            svq3->thirdpel_flag = get_bits1(&gb);
875
876            /* unknown fields */
877            skip_bits1(&gb);
878            skip_bits1(&gb);
879            skip_bits1(&gb);
880            skip_bits1(&gb);
881
882            s->low_delay = get_bits1(&gb);
883
884            /* unknown field */
885            skip_bits1(&gb);
886
887            while (get_bits1(&gb)) {
888                skip_bits(&gb, 8);
889            }
890
891            svq3->unknown_flag = get_bits1(&gb);
892            avctx->has_b_frames = !s->low_delay;
893            if (svq3->unknown_flag) {
894#if CONFIG_ZLIB
895                unsigned watermark_width  = svq3_get_ue_golomb(&gb);
896                unsigned watermark_height = svq3_get_ue_golomb(&gb);
897                int u1 = svq3_get_ue_golomb(&gb);
898                int u2 = get_bits(&gb, 8);
899                int u3 = get_bits(&gb, 2);
900                int u4 = svq3_get_ue_golomb(&gb);
901                unsigned long buf_len = watermark_width*watermark_height*4;
902                int offset = (get_bits_count(&gb)+7)>>3;
903                uint8_t *buf;
904
905                if ((uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
906                    return -1;
907
908                buf = av_malloc(buf_len);
909                av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n", watermark_width, watermark_height);
910                av_log(avctx, AV_LOG_DEBUG, "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n", u1, u2, u3, u4, offset);
911                if (uncompress(buf, &buf_len, extradata + 8 + offset, size - offset) != Z_OK) {
912                    av_log(avctx, AV_LOG_ERROR, "could not uncompress watermark logo\n");
913                    av_free(buf);
914                    return -1;
915                }
916                svq3->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
917                svq3->watermark_key = svq3->watermark_key << 16 | svq3->watermark_key;
918                av_log(avctx, AV_LOG_DEBUG, "watermark key %#x\n", svq3->watermark_key);
919                av_free(buf);
920#else
921                av_log(avctx, AV_LOG_ERROR, "this svq3 file contains watermark which need zlib support compiled in\n");
922                return -1;
923#endif
924            }
925        }
926
927        s->width  = avctx->width;
928        s->height = avctx->height;
929
930        if (MPV_common_init(s) < 0)
931            return -1;
932
933        h->b_stride = 4*s->mb_width;
934
935        if (ff_h264_alloc_tables(h) < 0) {
936            av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
937            return AVERROR(ENOMEM);
938        }
939    }
940
941    return 0;
942}
943
944static int svq3_decode_frame(AVCodecContext *avctx,
945                             void *data, int *data_size,
946                             AVPacket *avpkt)
947{
948    const uint8_t *buf = avpkt->data;
949    SVQ3Context *svq3 = avctx->priv_data;
950    H264Context *h = &svq3->h;
951    MpegEncContext *s = &h->s;
952    int buf_size = avpkt->size;
953    int m, mb_type;
954
955    /* special case for last picture */
956    if (buf_size == 0) {
957        if (s->next_picture_ptr && !s->low_delay) {
958            *(AVFrame *) data = *(AVFrame *) &s->next_picture;
959            s->next_picture_ptr = NULL;
960            *data_size = sizeof(AVFrame);
961        }
962        return 0;
963    }
964
965    init_get_bits (&s->gb, buf, 8*buf_size);
966
967    s->mb_x = s->mb_y = h->mb_xy = 0;
968
969    if (svq3_decode_slice_header(avctx))
970        return -1;
971
972    s->pict_type = h->slice_type;
973    s->picture_number = h->slice_num;
974
975    if (avctx->debug&FF_DEBUG_PICT_INFO){
976        av_log(h->s.avctx, AV_LOG_DEBUG, "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
977               av_get_picture_type_char(s->pict_type), svq3->halfpel_flag, svq3->thirdpel_flag,
978               s->adaptive_quant, s->qscale, h->slice_num);
979    }
980
981    /* for skipping the frame */
982    s->current_picture.f.pict_type = s->pict_type;
983    s->current_picture.f.key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
984
985    /* Skip B-frames if we do not have reference frames. */
986    if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B)
987        return 0;
988    if (  (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
989        ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
990        || avctx->skip_frame >= AVDISCARD_ALL)
991        return 0;
992
993    if (s->next_p_frame_damaged) {
994        if (s->pict_type == AV_PICTURE_TYPE_B)
995            return 0;
996        else
997            s->next_p_frame_damaged = 0;
998    }
999
1000    if (ff_h264_frame_start(h) < 0)
1001        return -1;
1002
1003    if (s->pict_type == AV_PICTURE_TYPE_B) {
1004        h->frame_num_offset = (h->slice_num - h->prev_frame_num);
1005
1006        if (h->frame_num_offset < 0) {
1007            h->frame_num_offset += 256;
1008        }
1009        if (h->frame_num_offset == 0 || h->frame_num_offset >= h->prev_frame_num_offset) {
1010            av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1011            return -1;
1012        }
1013    } else {
1014        h->prev_frame_num = h->frame_num;
1015        h->frame_num = h->slice_num;
1016        h->prev_frame_num_offset = (h->frame_num - h->prev_frame_num);
1017
1018        if (h->prev_frame_num_offset < 0) {
1019            h->prev_frame_num_offset += 256;
1020        }
1021    }
1022
1023    for (m = 0; m < 2; m++){
1024        int i;
1025        for (i = 0; i < 4; i++){
1026            int j;
1027            for (j = -1; j < 4; j++)
1028                h->ref_cache[m][scan8[0] + 8*i + j]= 1;
1029            if (i < 3)
1030                h->ref_cache[m][scan8[0] + 8*i + j]= PART_NOT_AVAILABLE;
1031        }
1032    }
1033
1034    for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1035        for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1036            h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
1037
1038            if ( (get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
1039                ((get_bits_count(&s->gb) & 7) == 0 || show_bits(&s->gb, (-get_bits_count(&s->gb) & 7)) == 0)) {
1040
1041                skip_bits(&s->gb, svq3->next_slice_index - get_bits_count(&s->gb));
1042                s->gb.size_in_bits = 8*buf_size;
1043
1044                if (svq3_decode_slice_header(avctx))
1045                    return -1;
1046
1047                /* TODO: support s->mb_skip_run */
1048            }
1049
1050            mb_type = svq3_get_ue_golomb(&s->gb);
1051
1052            if (s->pict_type == AV_PICTURE_TYPE_I) {
1053                mb_type += 8;
1054            } else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4) {
1055                mb_type += 4;
1056            }
1057            if ((unsigned)mb_type > 33 || svq3_decode_mb(svq3, mb_type)) {
1058                av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1059                return -1;
1060            }
1061
1062            if (mb_type != 0) {
1063                ff_h264_hl_decode_mb (h);
1064            }
1065
1066            if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay) {
1067                s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1068                    (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1069            }
1070        }
1071
1072        ff_draw_horiz_band(s, 16*s->mb_y, 16);
1073    }
1074
1075    MPV_frame_end(s);
1076
1077    if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
1078        *(AVFrame *) data = *(AVFrame *) &s->current_picture;
1079    } else {
1080        *(AVFrame *) data = *(AVFrame *) &s->last_picture;
1081    }
1082
1083    /* Do not output the last pic after seeking. */
1084    if (s->last_picture_ptr || s->low_delay) {
1085        *data_size = sizeof(AVFrame);
1086    }
1087
1088    return buf_size;
1089}
1090
1091static int svq3_decode_end(AVCodecContext *avctx)
1092{
1093    SVQ3Context *svq3 = avctx->priv_data;
1094    H264Context *h = &svq3->h;
1095    MpegEncContext *s = &h->s;
1096
1097    ff_h264_free_context(h);
1098
1099    MPV_common_end(s);
1100
1101    return 0;
1102}
1103
1104AVCodec ff_svq3_decoder = {
1105    .name           = "svq3",
1106    .type           = AVMEDIA_TYPE_VIDEO,
1107    .id             = CODEC_ID_SVQ3,
1108    .priv_data_size = sizeof(SVQ3Context),
1109    .init           = svq3_decode_init,
1110    .close          = svq3_decode_end,
1111    .decode         = svq3_decode_frame,
1112    .capabilities   = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_DELAY,
1113    .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1114    .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_NONE},
1115};
1116