VirtualBox

source: vbox/trunk/src/libs/ffmpeg-20060710/ffplay.c@ 8998

最後變更 在這個檔案從8998是 5776,由 vboxsync 提交於 17 年 前

ffmpeg: exported to OSE

檔案大小: 73.3 KB
 
1/*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#define HAVE_AV_CONFIG_H
20#include "avformat.h"
21
22#include "version.h"
23#include "cmdutils.h"
24
25#include <SDL.h>
26#include <SDL_thread.h>
27
28#ifdef CONFIG_WIN32
29#undef main /* We don't want SDL to override our main() */
30#endif
31
32#ifdef CONFIG_OS2
33#define INCL_DOS
34 #include <os2.h>
35 #include <stdio.h>
36
37 void MorphToPM()
38 {
39 PPIB pib;
40 PTIB tib;
41
42 DosGetInfoBlocks(&tib, &pib);
43
44 // Change flag from VIO to PM:
45 if (pib->pib_ultype==2) pib->pib_ultype = 3;
46 }
47#endif
48
49//#define DEBUG_SYNC
50
51#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
52#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
53#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
54
55/* SDL audio buffer size, in samples. Should be small to have precise
56 A/V sync as SDL does not have hardware buffer fullness info. */
57#define SDL_AUDIO_BUFFER_SIZE 1024
58
59/* no AV sync correction is done if below the AV sync threshold */
60#define AV_SYNC_THRESHOLD 0.01
61/* no AV correction is done if too big error */
62#define AV_NOSYNC_THRESHOLD 10.0
63
64/* maximum audio speed change to get correct sync */
65#define SAMPLE_CORRECTION_PERCENT_MAX 10
66
67/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
68#define AUDIO_DIFF_AVG_NB 20
69
70/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
71#define SAMPLE_ARRAY_SIZE (2*65536)
72
73typedef struct PacketQueue {
74 AVPacketList *first_pkt, *last_pkt;
75 int nb_packets;
76 int size;
77 int abort_request;
78 SDL_mutex *mutex;
79 SDL_cond *cond;
80} PacketQueue;
81
82#define VIDEO_PICTURE_QUEUE_SIZE 1
83#define SUBPICTURE_QUEUE_SIZE 4
84
85typedef struct VideoPicture {
86 double pts; ///<presentation time stamp for this picture
87 SDL_Overlay *bmp;
88 int width, height; /* source height & width */
89 int allocated;
90} VideoPicture;
91
92typedef struct SubPicture {
93 double pts; /* presentation time stamp for this picture */
94 AVSubtitle sub;
95} SubPicture;
96
97enum {
98 AV_SYNC_AUDIO_MASTER, /* default choice */
99 AV_SYNC_VIDEO_MASTER,
100 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
101};
102
103typedef struct VideoState {
104 SDL_Thread *parse_tid;
105 SDL_Thread *video_tid;
106 AVInputFormat *iformat;
107 int no_background;
108 int abort_request;
109 int paused;
110 int last_paused;
111 int seek_req;
112 int seek_flags;
113 int64_t seek_pos;
114 AVFormatContext *ic;
115 int dtg_active_format;
116
117 int audio_stream;
118
119 int av_sync_type;
120 double external_clock; /* external clock base */
121 int64_t external_clock_time;
122
123 double audio_clock;
124 double audio_diff_cum; /* used for AV difference average computation */
125 double audio_diff_avg_coef;
126 double audio_diff_threshold;
127 int audio_diff_avg_count;
128 AVStream *audio_st;
129 PacketQueue audioq;
130 int audio_hw_buf_size;
131 /* samples output by the codec. we reserve more space for avsync
132 compensation */
133 uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
134 unsigned int audio_buf_size; /* in bytes */
135 int audio_buf_index; /* in bytes */
136 AVPacket audio_pkt;
137 uint8_t *audio_pkt_data;
138 int audio_pkt_size;
139
140 int show_audio; /* if true, display audio samples */
141 int16_t sample_array[SAMPLE_ARRAY_SIZE];
142 int sample_array_index;
143 int last_i_start;
144
145 SDL_Thread *subtitle_tid;
146 int subtitle_stream;
147 int subtitle_stream_changed;
148 AVStream *subtitle_st;
149 PacketQueue subtitleq;
150 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
151 int subpq_size, subpq_rindex, subpq_windex;
152 SDL_mutex *subpq_mutex;
153 SDL_cond *subpq_cond;
154
155 double frame_timer;
156 double frame_last_pts;
157 double frame_last_delay;
158 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
159 int video_stream;
160 AVStream *video_st;
161 PacketQueue videoq;
162 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
163 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
164 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
165 int pictq_size, pictq_rindex, pictq_windex;
166 SDL_mutex *pictq_mutex;
167 SDL_cond *pictq_cond;
168
169 SDL_mutex *video_decoder_mutex;
170 SDL_mutex *audio_decoder_mutex;
171 SDL_mutex *subtitle_decoder_mutex;
172
173 // QETimer *video_timer;
174 char filename[1024];
175 int width, height, xleft, ytop;
176} VideoState;
177
178void show_help(void);
179static int audio_write_get_buf_size(VideoState *is);
180
181/* options specified by the user */
182static AVInputFormat *file_iformat;
183static AVImageFormat *image_format;
184static const char *input_filename;
185static int fs_screen_width;
186static int fs_screen_height;
187static int screen_width = 640;
188static int screen_height = 480;
189static int audio_disable;
190static int video_disable;
191static int display_disable;
192static int show_status;
193static int av_sync_type = AV_SYNC_AUDIO_MASTER;
194static int64_t start_time = AV_NOPTS_VALUE;
195static int debug = 0;
196static int debug_mv = 0;
197static int step = 0;
198static int thread_count = 1;
199static int workaround_bugs = 1;
200static int fast = 0;
201static int genpts = 0;
202static int lowres = 0;
203static int idct = FF_IDCT_AUTO;
204static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
205static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
206static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
207static int error_resilience = FF_ER_CAREFUL;
208static int error_concealment = 3;
209
210/* current context */
211static int is_full_screen;
212static VideoState *cur_stream;
213static int64_t audio_callback_time;
214
215#define FF_ALLOC_EVENT (SDL_USEREVENT)
216#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
217#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
218
219SDL_Surface *screen;
220
221/* packet queue handling */
222static void packet_queue_init(PacketQueue *q)
223{
224 memset(q, 0, sizeof(PacketQueue));
225 q->mutex = SDL_CreateMutex();
226 q->cond = SDL_CreateCond();
227}
228
229static void packet_queue_flush(PacketQueue *q)
230{
231 AVPacketList *pkt, *pkt1;
232
233 SDL_LockMutex(q->mutex);
234 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
235 pkt1 = pkt->next;
236 av_free_packet(&pkt->pkt);
237 av_freep(&pkt);
238 }
239 q->last_pkt = NULL;
240 q->first_pkt = NULL;
241 q->nb_packets = 0;
242 q->size = 0;
243 SDL_UnlockMutex(q->mutex);
244}
245
246static void packet_queue_end(PacketQueue *q)
247{
248 packet_queue_flush(q);
249 SDL_DestroyMutex(q->mutex);
250 SDL_DestroyCond(q->cond);
251}
252
253static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
254{
255 AVPacketList *pkt1;
256
257 /* duplicate the packet */
258 if (av_dup_packet(pkt) < 0)
259 return -1;
260
261 pkt1 = av_malloc(sizeof(AVPacketList));
262 if (!pkt1)
263 return -1;
264 pkt1->pkt = *pkt;
265 pkt1->next = NULL;
266
267
268 SDL_LockMutex(q->mutex);
269
270 if (!q->last_pkt)
271
272 q->first_pkt = pkt1;
273 else
274 q->last_pkt->next = pkt1;
275 q->last_pkt = pkt1;
276 q->nb_packets++;
277 q->size += pkt1->pkt.size;
278 /* XXX: should duplicate packet data in DV case */
279 SDL_CondSignal(q->cond);
280
281 SDL_UnlockMutex(q->mutex);
282 return 0;
283}
284
285static void packet_queue_abort(PacketQueue *q)
286{
287 SDL_LockMutex(q->mutex);
288
289 q->abort_request = 1;
290
291 SDL_CondSignal(q->cond);
292
293 SDL_UnlockMutex(q->mutex);
294}
295
296/* return < 0 if aborted, 0 if no packet and > 0 if packet. */
297static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
298{
299 AVPacketList *pkt1;
300 int ret;
301
302 SDL_LockMutex(q->mutex);
303
304 for(;;) {
305 if (q->abort_request) {
306 ret = -1;
307 break;
308 }
309
310 pkt1 = q->first_pkt;
311 if (pkt1) {
312 q->first_pkt = pkt1->next;
313 if (!q->first_pkt)
314 q->last_pkt = NULL;
315 q->nb_packets--;
316 q->size -= pkt1->pkt.size;
317 *pkt = pkt1->pkt;
318 av_free(pkt1);
319 ret = 1;
320 break;
321 } else if (!block) {
322 ret = 0;
323 break;
324 } else {
325 SDL_CondWait(q->cond, q->mutex);
326 }
327 }
328 SDL_UnlockMutex(q->mutex);
329 return ret;
330}
331
332static inline void fill_rectangle(SDL_Surface *screen,
333 int x, int y, int w, int h, int color)
334{
335 SDL_Rect rect;
336 rect.x = x;
337 rect.y = y;
338 rect.w = w;
339 rect.h = h;
340 SDL_FillRect(screen, &rect, color);
341}
342
343#if 0
344/* draw only the border of a rectangle */
345void fill_border(VideoState *s, int x, int y, int w, int h, int color)
346{
347 int w1, w2, h1, h2;
348
349 /* fill the background */
350 w1 = x;
351 if (w1 < 0)
352 w1 = 0;
353 w2 = s->width - (x + w);
354 if (w2 < 0)
355 w2 = 0;
356 h1 = y;
357 if (h1 < 0)
358 h1 = 0;
359 h2 = s->height - (y + h);
360 if (h2 < 0)
361 h2 = 0;
362 fill_rectangle(screen,
363 s->xleft, s->ytop,
364 w1, s->height,
365 color);
366 fill_rectangle(screen,
367 s->xleft + s->width - w2, s->ytop,
368 w2, s->height,
369 color);
370 fill_rectangle(screen,
371 s->xleft + w1, s->ytop,
372 s->width - w1 - w2, h1,
373 color);
374 fill_rectangle(screen,
375 s->xleft + w1, s->ytop + s->height - h2,
376 s->width - w1 - w2, h2,
377 color);
378}
379#endif
380
381
382
383#define SCALEBITS 10
384#define ONE_HALF (1 << (SCALEBITS - 1))
385#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
386
387#define RGB_TO_Y_CCIR(r, g, b) \
388((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
389 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
390
391#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
392(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
393 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
394
395#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
396(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
397 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
398
399#define ALPHA_BLEND(a, oldp, newp, s)\
400((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
401
402#define RGBA_IN(r, g, b, a, s)\
403{\
404 unsigned int v = ((const uint32_t *)(s))[0];\
405 a = (v >> 24) & 0xff;\
406 r = (v >> 16) & 0xff;\
407 g = (v >> 8) & 0xff;\
408 b = v & 0xff;\
409}
410
411#define YUVA_IN(y, u, v, a, s, pal)\
412{\
413 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
414 a = (val >> 24) & 0xff;\
415 y = (val >> 16) & 0xff;\
416 u = (val >> 8) & 0xff;\
417 v = val & 0xff;\
418}
419
420#define YUVA_OUT(d, y, u, v, a)\
421{\
422 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
423}
424
425
426#define BPP 1
427
428static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
429{
430 int wrap, wrap3, width2, skip2;
431 int y, u, v, a, u1, v1, a1, w, h;
432 uint8_t *lum, *cb, *cr;
433 const uint8_t *p;
434 const uint32_t *pal;
435
436 lum = dst->data[0] + rect->y * dst->linesize[0];
437 cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
438 cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
439
440 width2 = (rect->w + 1) >> 1;
441 skip2 = rect->x >> 1;
442 wrap = dst->linesize[0];
443 wrap3 = rect->linesize;
444 p = rect->bitmap;
445 pal = rect->rgba_palette; /* Now in YCrCb! */
446
447 if (rect->y & 1) {
448 lum += rect->x;
449 cb += skip2;
450 cr += skip2;
451
452 if (rect->x & 1) {
453 YUVA_IN(y, u, v, a, p, pal);
454 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
455 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
456 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
457 cb++;
458 cr++;
459 lum++;
460 p += BPP;
461 }
462 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
463 YUVA_IN(y, u, v, a, p, pal);
464 u1 = u;
465 v1 = v;
466 a1 = a;
467 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468
469 YUVA_IN(y, u, v, a, p + BPP, pal);
470 u1 += u;
471 v1 += v;
472 a1 += a;
473 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
474 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
475 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
476 cb++;
477 cr++;
478 p += 2 * BPP;
479 lum += 2;
480 }
481 if (w) {
482 YUVA_IN(y, u, v, a, p, pal);
483 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
484 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
485 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
486 }
487 p += wrap3 + (wrap3 - rect->w * BPP);
488 lum += wrap + (wrap - rect->w - rect->x);
489 cb += dst->linesize[1] - width2 - skip2;
490 cr += dst->linesize[2] - width2 - skip2;
491 }
492 for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
493 lum += rect->x;
494 cb += skip2;
495 cr += skip2;
496
497 if (rect->x & 1) {
498 YUVA_IN(y, u, v, a, p, pal);
499 u1 = u;
500 v1 = v;
501 a1 = a;
502 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
503 p += wrap3;
504 lum += wrap;
505 YUVA_IN(y, u, v, a, p, pal);
506 u1 += u;
507 v1 += v;
508 a1 += a;
509 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
511 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
512 cb++;
513 cr++;
514 p += -wrap3 + BPP;
515 lum += -wrap + 1;
516 }
517 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
518 YUVA_IN(y, u, v, a, p, pal);
519 u1 = u;
520 v1 = v;
521 a1 = a;
522 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523
524 YUVA_IN(y, u, v, a, p, pal);
525 u1 += u;
526 v1 += v;
527 a1 += a;
528 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529 p += wrap3;
530 lum += wrap;
531
532 YUVA_IN(y, u, v, a, p, pal);
533 u1 += u;
534 v1 += v;
535 a1 += a;
536 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537
538 YUVA_IN(y, u, v, a, p, pal);
539 u1 += u;
540 v1 += v;
541 a1 += a;
542 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
543
544 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
545 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
546
547 cb++;
548 cr++;
549 p += -wrap3 + 2 * BPP;
550 lum += -wrap + 2;
551 }
552 if (w) {
553 YUVA_IN(y, u, v, a, p, pal);
554 u1 = u;
555 v1 = v;
556 a1 = a;
557 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558 p += wrap3;
559 lum += wrap;
560 YUVA_IN(y, u, v, a, p, pal);
561 u1 += u;
562 v1 += v;
563 a1 += a;
564 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
566 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
567 cb++;
568 cr++;
569 p += -wrap3 + BPP;
570 lum += -wrap + 1;
571 }
572 p += wrap3 + (wrap3 - rect->w * BPP);
573 lum += wrap + (wrap - rect->w - rect->x);
574 cb += dst->linesize[1] - width2 - skip2;
575 cr += dst->linesize[2] - width2 - skip2;
576 }
577 /* handle odd height */
578 if (h) {
579 lum += rect->x;
580 cb += skip2;
581 cr += skip2;
582
583 if (rect->x & 1) {
584 YUVA_IN(y, u, v, a, p, pal);
585 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
586 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
587 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
588 cb++;
589 cr++;
590 lum++;
591 p += BPP;
592 }
593 for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
594 YUVA_IN(y, u, v, a, p, pal);
595 u1 = u;
596 v1 = v;
597 a1 = a;
598 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
599
600 YUVA_IN(y, u, v, a, p + BPP, pal);
601 u1 += u;
602 v1 += v;
603 a1 += a;
604 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
605 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
606 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
607 cb++;
608 cr++;
609 p += 2 * BPP;
610 lum += 2;
611 }
612 if (w) {
613 YUVA_IN(y, u, v, a, p, pal);
614 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
616 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
617 }
618 }
619}
620
621static void free_subpicture(SubPicture *sp)
622{
623 int i;
624
625 for (i = 0; i < sp->sub.num_rects; i++)
626 {
627 av_free(sp->sub.rects[i].bitmap);
628 av_free(sp->sub.rects[i].rgba_palette);
629 }
630
631 av_free(sp->sub.rects);
632
633 memset(&sp->sub, 0, sizeof(AVSubtitle));
634}
635
636static void video_image_display(VideoState *is)
637{
638 VideoPicture *vp;
639 SubPicture *sp;
640 AVPicture pict;
641 float aspect_ratio;
642 int width, height, x, y;
643 SDL_Rect rect;
644 int i;
645
646 vp = &is->pictq[is->pictq_rindex];
647 if (vp->bmp) {
648 /* XXX: use variable in the frame */
649 if (is->video_st->codec->sample_aspect_ratio.num == 0)
650 aspect_ratio = 0;
651 else
652 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
653 * is->video_st->codec->width / is->video_st->codec->height;;
654 if (aspect_ratio <= 0.0)
655 aspect_ratio = (float)is->video_st->codec->width /
656 (float)is->video_st->codec->height;
657 /* if an active format is indicated, then it overrides the
658 mpeg format */
659#if 0
660 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
661 is->dtg_active_format = is->video_st->codec->dtg_active_format;
662 printf("dtg_active_format=%d\n", is->dtg_active_format);
663 }
664#endif
665#if 0
666 switch(is->video_st->codec->dtg_active_format) {
667 case FF_DTG_AFD_SAME:
668 default:
669 /* nothing to do */
670 break;
671 case FF_DTG_AFD_4_3:
672 aspect_ratio = 4.0 / 3.0;
673 break;
674 case FF_DTG_AFD_16_9:
675 aspect_ratio = 16.0 / 9.0;
676 break;
677 case FF_DTG_AFD_14_9:
678 aspect_ratio = 14.0 / 9.0;
679 break;
680 case FF_DTG_AFD_4_3_SP_14_9:
681 aspect_ratio = 14.0 / 9.0;
682 break;
683 case FF_DTG_AFD_16_9_SP_14_9:
684 aspect_ratio = 14.0 / 9.0;
685 break;
686 case FF_DTG_AFD_SP_4_3:
687 aspect_ratio = 4.0 / 3.0;
688 break;
689 }
690#endif
691
692 if (is->subtitle_st)
693 {
694 if (is->subpq_size > 0)
695 {
696 sp = &is->subpq[is->subpq_rindex];
697
698 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
699 {
700 SDL_LockYUVOverlay (vp->bmp);
701
702 pict.data[0] = vp->bmp->pixels[0];
703 pict.data[1] = vp->bmp->pixels[2];
704 pict.data[2] = vp->bmp->pixels[1];
705
706 pict.linesize[0] = vp->bmp->pitches[0];
707 pict.linesize[1] = vp->bmp->pitches[2];
708 pict.linesize[2] = vp->bmp->pitches[1];
709
710 for (i = 0; i < sp->sub.num_rects; i++)
711 blend_subrect(&pict, &sp->sub.rects[i]);
712
713 SDL_UnlockYUVOverlay (vp->bmp);
714 }
715 }
716 }
717
718
719 /* XXX: we suppose the screen has a 1.0 pixel ratio */
720 height = is->height;
721 width = ((int)rint(height * aspect_ratio)) & -3;
722 if (width > is->width) {
723 width = is->width;
724 height = ((int)rint(width / aspect_ratio)) & -3;
725 }
726 x = (is->width - width) / 2;
727 y = (is->height - height) / 2;
728 if (!is->no_background) {
729 /* fill the background */
730 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
731 } else {
732 is->no_background = 0;
733 }
734 rect.x = is->xleft + x;
735 rect.y = is->xleft + y;
736 rect.w = width;
737 rect.h = height;
738 SDL_DisplayYUVOverlay(vp->bmp, &rect);
739 } else {
740#if 0
741 fill_rectangle(screen,
742 is->xleft, is->ytop, is->width, is->height,
743 QERGB(0x00, 0x00, 0x00));
744#endif
745 }
746}
747
748static inline int compute_mod(int a, int b)
749{
750 a = a % b;
751 if (a >= 0)
752 return a;
753 else
754 return a + b;
755}
756
757static void video_audio_display(VideoState *s)
758{
759 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
760 int ch, channels, h, h2, bgcolor, fgcolor;
761 int16_t time_diff;
762
763 /* compute display index : center on currently output samples */
764 channels = s->audio_st->codec->channels;
765 nb_display_channels = channels;
766 if (!s->paused) {
767 n = 2 * channels;
768 delay = audio_write_get_buf_size(s);
769 delay /= n;
770
771 /* to be more precise, we take into account the time spent since
772 the last buffer computation */
773 if (audio_callback_time) {
774 time_diff = av_gettime() - audio_callback_time;
775 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
776 }
777
778 delay -= s->width / 2;
779 if (delay < s->width)
780 delay = s->width;
781 i_start = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
782 s->last_i_start = i_start;
783 } else {
784 i_start = s->last_i_start;
785 }
786
787 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
788 fill_rectangle(screen,
789 s->xleft, s->ytop, s->width, s->height,
790 bgcolor);
791
792 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
793
794 /* total height for one channel */
795 h = s->height / nb_display_channels;
796 /* graph height / 2 */
797 h2 = (h * 9) / 20;
798 for(ch = 0;ch < nb_display_channels; ch++) {
799 i = i_start + ch;
800 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
801 for(x = 0; x < s->width; x++) {
802 y = (s->sample_array[i] * h2) >> 15;
803 if (y < 0) {
804 y = -y;
805 ys = y1 - y;
806 } else {
807 ys = y1;
808 }
809 fill_rectangle(screen,
810 s->xleft + x, ys, 1, y,
811 fgcolor);
812 i += channels;
813 if (i >= SAMPLE_ARRAY_SIZE)
814 i -= SAMPLE_ARRAY_SIZE;
815 }
816 }
817
818 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
819
820 for(ch = 1;ch < nb_display_channels; ch++) {
821 y = s->ytop + ch * h;
822 fill_rectangle(screen,
823 s->xleft, y, s->width, 1,
824 fgcolor);
825 }
826 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
827}
828
829/* display the current picture, if any */
830static void video_display(VideoState *is)
831{
832 if (is->audio_st && is->show_audio)
833 video_audio_display(is);
834 else if (is->video_st)
835 video_image_display(is);
836}
837
838static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
839{
840 SDL_Event event;
841 event.type = FF_REFRESH_EVENT;
842 event.user.data1 = opaque;
843 SDL_PushEvent(&event);
844 return 0; /* 0 means stop timer */
845}
846
847/* schedule a video refresh in 'delay' ms */
848static void schedule_refresh(VideoState *is, int delay)
849{
850 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
851}
852
853/* get the current audio clock value */
854static double get_audio_clock(VideoState *is)
855{
856 double pts;
857 int hw_buf_size, bytes_per_sec;
858 pts = is->audio_clock;
859 hw_buf_size = audio_write_get_buf_size(is);
860 bytes_per_sec = 0;
861 if (is->audio_st) {
862 bytes_per_sec = is->audio_st->codec->sample_rate *
863 2 * is->audio_st->codec->channels;
864 }
865 if (bytes_per_sec)
866 pts -= (double)hw_buf_size / bytes_per_sec;
867 return pts;
868}
869
870/* get the current video clock value */
871static double get_video_clock(VideoState *is)
872{
873 double delta;
874 if (is->paused) {
875 delta = 0;
876 } else {
877 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
878 }
879 return is->video_current_pts + delta;
880}
881
882/* get the current external clock value */
883static double get_external_clock(VideoState *is)
884{
885 int64_t ti;
886 ti = av_gettime();
887 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
888}
889
890/* get the current master clock value */
891static double get_master_clock(VideoState *is)
892{
893 double val;
894
895 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
896 if (is->video_st)
897 val = get_video_clock(is);
898 else
899 val = get_audio_clock(is);
900 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
901 if (is->audio_st)
902 val = get_audio_clock(is);
903 else
904 val = get_video_clock(is);
905 } else {
906 val = get_external_clock(is);
907 }
908 return val;
909}
910
911/* seek in the stream */
912static void stream_seek(VideoState *is, int64_t pos, int rel)
913{
914 if (!is->seek_req) {
915 is->seek_pos = pos;
916 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
917 is->seek_req = 1;
918 }
919}
920
921/* pause or resume the video */
922static void stream_pause(VideoState *is)
923{
924 is->paused = !is->paused;
925 if (is->paused) {
926 is->video_current_pts = get_video_clock(is);
927 }
928}
929
930/* called to display each frame */
931static void video_refresh_timer(void *opaque)
932{
933 VideoState *is = opaque;
934 VideoPicture *vp;
935 double actual_delay, delay, sync_threshold, ref_clock, diff;
936
937 SubPicture *sp, *sp2;
938
939 if (is->video_st) {
940 if (is->pictq_size == 0) {
941 /* if no picture, need to wait */
942 schedule_refresh(is, 1);
943 } else {
944 /* dequeue the picture */
945 vp = &is->pictq[is->pictq_rindex];
946
947 /* update current video pts */
948 is->video_current_pts = vp->pts;
949 is->video_current_pts_time = av_gettime();
950
951 /* compute nominal delay */
952 delay = vp->pts - is->frame_last_pts;
953 if (delay <= 0 || delay >= 1.0) {
954 /* if incorrect delay, use previous one */
955 delay = is->frame_last_delay;
956 }
957 is->frame_last_delay = delay;
958 is->frame_last_pts = vp->pts;
959
960 /* update delay to follow master synchronisation source */
961 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
962 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
963 /* if video is slave, we try to correct big delays by
964 duplicating or deleting a frame */
965 ref_clock = get_master_clock(is);
966 diff = vp->pts - ref_clock;
967
968 /* skip or repeat frame. We take into account the
969 delay to compute the threshold. I still don't know
970 if it is the best guess */
971 sync_threshold = AV_SYNC_THRESHOLD;
972 if (delay > sync_threshold)
973 sync_threshold = delay;
974 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
975 if (diff <= -sync_threshold)
976 delay = 0;
977 else if (diff >= sync_threshold)
978 delay = 2 * delay;
979 }
980 }
981
982 is->frame_timer += delay;
983 /* compute the REAL delay (we need to do that to avoid
984 long term errors */
985 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
986 if (actual_delay < 0.010) {
987 /* XXX: should skip picture */
988 actual_delay = 0.010;
989 }
990 /* launch timer for next picture */
991 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
992
993#if defined(DEBUG_SYNC)
994 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
995 delay, actual_delay, vp->pts, -diff);
996#endif
997
998 if(is->subtitle_st) {
999 if (is->subtitle_stream_changed) {
1000 SDL_LockMutex(is->subpq_mutex);
1001
1002 while (is->subpq_size) {
1003 free_subpicture(&is->subpq[is->subpq_rindex]);
1004
1005 /* update queue size and signal for next picture */
1006 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1007 is->subpq_rindex = 0;
1008
1009 is->subpq_size--;
1010 }
1011 is->subtitle_stream_changed = 0;
1012
1013 SDL_CondSignal(is->subpq_cond);
1014 SDL_UnlockMutex(is->subpq_mutex);
1015 } else {
1016 if (is->subpq_size > 0) {
1017 sp = &is->subpq[is->subpq_rindex];
1018
1019 if (is->subpq_size > 1)
1020 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1021 else
1022 sp2 = NULL;
1023
1024 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1025 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1026 {
1027 free_subpicture(sp);
1028
1029 /* update queue size and signal for next picture */
1030 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1031 is->subpq_rindex = 0;
1032
1033 SDL_LockMutex(is->subpq_mutex);
1034 is->subpq_size--;
1035 SDL_CondSignal(is->subpq_cond);
1036 SDL_UnlockMutex(is->subpq_mutex);
1037 }
1038 }
1039 }
1040 }
1041
1042 /* display picture */
1043 video_display(is);
1044
1045 /* update queue size and signal for next picture */
1046 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1047 is->pictq_rindex = 0;
1048
1049 SDL_LockMutex(is->pictq_mutex);
1050 is->pictq_size--;
1051 SDL_CondSignal(is->pictq_cond);
1052 SDL_UnlockMutex(is->pictq_mutex);
1053 }
1054 } else if (is->audio_st) {
1055 /* draw the next audio frame */
1056
1057 schedule_refresh(is, 40);
1058
1059 /* if only audio stream, then display the audio bars (better
1060 than nothing, just to test the implementation */
1061
1062 /* display picture */
1063 video_display(is);
1064 } else {
1065 schedule_refresh(is, 100);
1066 }
1067 if (show_status) {
1068 static int64_t last_time;
1069 int64_t cur_time;
1070 int aqsize, vqsize, sqsize;
1071 double av_diff;
1072
1073 cur_time = av_gettime();
1074 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1075 aqsize = 0;
1076 vqsize = 0;
1077 sqsize = 0;
1078 if (is->audio_st)
1079 aqsize = is->audioq.size;
1080 if (is->video_st)
1081 vqsize = is->videoq.size;
1082 if (is->subtitle_st)
1083 sqsize = is->subtitleq.size;
1084 av_diff = 0;
1085 if (is->audio_st && is->video_st)
1086 av_diff = get_audio_clock(is) - get_video_clock(is);
1087 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1088 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1089 fflush(stdout);
1090 last_time = cur_time;
1091 }
1092 }
1093}
1094
1095/* allocate a picture (needs to do that in main thread to avoid
1096 potential locking problems */
1097static void alloc_picture(void *opaque)
1098{
1099 VideoState *is = opaque;
1100 VideoPicture *vp;
1101
1102 vp = &is->pictq[is->pictq_windex];
1103
1104 if (vp->bmp)
1105 SDL_FreeYUVOverlay(vp->bmp);
1106
1107#if 0
1108 /* XXX: use generic function */
1109 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1110 switch(is->video_st->codec->pix_fmt) {
1111 case PIX_FMT_YUV420P:
1112 case PIX_FMT_YUV422P:
1113 case PIX_FMT_YUV444P:
1114 case PIX_FMT_YUV422:
1115 case PIX_FMT_YUV410P:
1116 case PIX_FMT_YUV411P:
1117 is_yuv = 1;
1118 break;
1119 default:
1120 is_yuv = 0;
1121 break;
1122 }
1123#endif
1124 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1125 is->video_st->codec->height,
1126 SDL_YV12_OVERLAY,
1127 screen);
1128 vp->width = is->video_st->codec->width;
1129 vp->height = is->video_st->codec->height;
1130
1131 SDL_LockMutex(is->pictq_mutex);
1132 vp->allocated = 1;
1133 SDL_CondSignal(is->pictq_cond);
1134 SDL_UnlockMutex(is->pictq_mutex);
1135}
1136
1137/**
1138 *
1139 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1140 */
1141static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1142{
1143 VideoPicture *vp;
1144 int dst_pix_fmt;
1145 AVPicture pict;
1146
1147 /* wait until we have space to put a new picture */
1148 SDL_LockMutex(is->pictq_mutex);
1149 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1150 !is->videoq.abort_request) {
1151 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1152 }
1153 SDL_UnlockMutex(is->pictq_mutex);
1154
1155 if (is->videoq.abort_request)
1156 return -1;
1157
1158 vp = &is->pictq[is->pictq_windex];
1159
1160 /* alloc or resize hardware picture buffer */
1161 if (!vp->bmp ||
1162 vp->width != is->video_st->codec->width ||
1163 vp->height != is->video_st->codec->height) {
1164 SDL_Event event;
1165
1166 vp->allocated = 0;
1167
1168 /* the allocation must be done in the main thread to avoid
1169 locking problems */
1170 event.type = FF_ALLOC_EVENT;
1171 event.user.data1 = is;
1172 SDL_PushEvent(&event);
1173
1174 /* wait until the picture is allocated */
1175 SDL_LockMutex(is->pictq_mutex);
1176 while (!vp->allocated && !is->videoq.abort_request) {
1177 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1178 }
1179 SDL_UnlockMutex(is->pictq_mutex);
1180
1181 if (is->videoq.abort_request)
1182 return -1;
1183 }
1184
1185 /* if the frame is not skipped, then display it */
1186 if (vp->bmp) {
1187 /* get a pointer on the bitmap */
1188 SDL_LockYUVOverlay (vp->bmp);
1189
1190 dst_pix_fmt = PIX_FMT_YUV420P;
1191 pict.data[0] = vp->bmp->pixels[0];
1192 pict.data[1] = vp->bmp->pixels[2];
1193 pict.data[2] = vp->bmp->pixels[1];
1194
1195 pict.linesize[0] = vp->bmp->pitches[0];
1196 pict.linesize[1] = vp->bmp->pitches[2];
1197 pict.linesize[2] = vp->bmp->pitches[1];
1198 img_convert(&pict, dst_pix_fmt,
1199 (AVPicture *)src_frame, is->video_st->codec->pix_fmt,
1200 is->video_st->codec->width, is->video_st->codec->height);
1201 /* update the bitmap content */
1202 SDL_UnlockYUVOverlay(vp->bmp);
1203
1204 vp->pts = pts;
1205
1206 /* now we can update the picture count */
1207 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1208 is->pictq_windex = 0;
1209 SDL_LockMutex(is->pictq_mutex);
1210 is->pictq_size++;
1211 SDL_UnlockMutex(is->pictq_mutex);
1212 }
1213 return 0;
1214}
1215
1216/**
1217 * compute the exact PTS for the picture if it is omitted in the stream
1218 * @param pts1 the dts of the pkt / pts of the frame
1219 */
1220static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1221{
1222 double frame_delay, pts;
1223
1224 pts = pts1;
1225
1226 if (pts != 0) {
1227 /* update video clock with pts, if present */
1228 is->video_clock = pts;
1229 } else {
1230 pts = is->video_clock;
1231 }
1232 /* update video clock for next frame */
1233 frame_delay = av_q2d(is->video_st->codec->time_base);
1234 /* for MPEG2, the frame can be repeated, so we update the
1235 clock accordingly */
1236 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1237 is->video_clock += frame_delay;
1238
1239#if defined(DEBUG_SYNC) && 0
1240 {
1241 int ftype;
1242 if (src_frame->pict_type == FF_B_TYPE)
1243 ftype = 'B';
1244 else if (src_frame->pict_type == FF_I_TYPE)
1245 ftype = 'I';
1246 else
1247 ftype = 'P';
1248 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1249 ftype, pts, pts1);
1250 }
1251#endif
1252 return queue_picture(is, src_frame, pts);
1253}
1254
1255static int video_thread(void *arg)
1256{
1257 VideoState *is = arg;
1258 AVPacket pkt1, *pkt = &pkt1;
1259 int len1, got_picture;
1260 AVFrame *frame= avcodec_alloc_frame();
1261 double pts;
1262
1263 for(;;) {
1264 while (is->paused && !is->videoq.abort_request) {
1265 SDL_Delay(10);
1266 }
1267 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1268 break;
1269 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1270 this packet, if any */
1271 pts = 0;
1272 if (pkt->dts != AV_NOPTS_VALUE)
1273 pts = av_q2d(is->video_st->time_base)*pkt->dts;
1274
1275 SDL_LockMutex(is->video_decoder_mutex);
1276 len1 = avcodec_decode_video(is->video_st->codec,
1277 frame, &got_picture,
1278 pkt->data, pkt->size);
1279 SDL_UnlockMutex(is->video_decoder_mutex);
1280// if (len1 < 0)
1281// break;
1282 if (got_picture) {
1283 if (output_picture2(is, frame, pts) < 0)
1284 goto the_end;
1285 }
1286 av_free_packet(pkt);
1287 if (step)
1288 if (cur_stream)
1289 stream_pause(cur_stream);
1290 }
1291 the_end:
1292 av_free(frame);
1293 return 0;
1294}
1295
1296static int subtitle_thread(void *arg)
1297{
1298 VideoState *is = arg;
1299 SubPicture *sp;
1300 AVPacket pkt1, *pkt = &pkt1;
1301 int len1, got_subtitle;
1302 double pts;
1303 int i, j;
1304 int r, g, b, y, u, v, a;
1305
1306 for(;;) {
1307 while (is->paused && !is->subtitleq.abort_request) {
1308 SDL_Delay(10);
1309 }
1310 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1311 break;
1312
1313 SDL_LockMutex(is->subpq_mutex);
1314 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1315 !is->subtitleq.abort_request) {
1316 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1317 }
1318 SDL_UnlockMutex(is->subpq_mutex);
1319
1320 if (is->subtitleq.abort_request)
1321 goto the_end;
1322
1323 sp = &is->subpq[is->subpq_windex];
1324
1325 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1326 this packet, if any */
1327 pts = 0;
1328 if (pkt->pts != AV_NOPTS_VALUE)
1329 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1330
1331 SDL_LockMutex(is->subtitle_decoder_mutex);
1332 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1333 &sp->sub, &got_subtitle,
1334 pkt->data, pkt->size);
1335 SDL_UnlockMutex(is->subtitle_decoder_mutex);
1336// if (len1 < 0)
1337// break;
1338 if (got_subtitle && sp->sub.format == 0) {
1339 sp->pts = pts;
1340
1341 for (i = 0; i < sp->sub.num_rects; i++)
1342 {
1343 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1344 {
1345 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1346 y = RGB_TO_Y_CCIR(r, g, b);
1347 u = RGB_TO_U_CCIR(r, g, b, 0);
1348 v = RGB_TO_V_CCIR(r, g, b, 0);
1349 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1350 }
1351 }
1352
1353 /* now we can update the picture count */
1354 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1355 is->subpq_windex = 0;
1356 SDL_LockMutex(is->subpq_mutex);
1357 is->subpq_size++;
1358 SDL_UnlockMutex(is->subpq_mutex);
1359 }
1360 av_free_packet(pkt);
1361// if (step)
1362// if (cur_stream)
1363// stream_pause(cur_stream);
1364 }
1365 the_end:
1366 return 0;
1367}
1368
1369/* copy samples for viewing in editor window */
1370static void update_sample_display(VideoState *is, short *samples, int samples_size)
1371{
1372 int size, len, channels;
1373
1374 channels = is->audio_st->codec->channels;
1375
1376 size = samples_size / sizeof(short);
1377 while (size > 0) {
1378 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1379 if (len > size)
1380 len = size;
1381 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1382 samples += len;
1383 is->sample_array_index += len;
1384 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1385 is->sample_array_index = 0;
1386 size -= len;
1387 }
1388}
1389
1390/* return the new audio buffer size (samples can be added or deleted
1391 to get better sync if video or external master clock) */
1392static int synchronize_audio(VideoState *is, short *samples,
1393 int samples_size1, double pts)
1394{
1395 int n, samples_size;
1396 double ref_clock;
1397
1398 n = 2 * is->audio_st->codec->channels;
1399 samples_size = samples_size1;
1400
1401 /* if not master, then we try to remove or add samples to correct the clock */
1402 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1403 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1404 double diff, avg_diff;
1405 int wanted_size, min_size, max_size, nb_samples;
1406
1407 ref_clock = get_master_clock(is);
1408 diff = get_audio_clock(is) - ref_clock;
1409
1410 if (diff < AV_NOSYNC_THRESHOLD) {
1411 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1412 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1413 /* not enough measures to have a correct estimate */
1414 is->audio_diff_avg_count++;
1415 } else {
1416 /* estimate the A-V difference */
1417 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1418
1419 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1420 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1421 nb_samples = samples_size / n;
1422
1423 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1424 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1425 if (wanted_size < min_size)
1426 wanted_size = min_size;
1427 else if (wanted_size > max_size)
1428 wanted_size = max_size;
1429
1430 /* add or remove samples to correction the synchro */
1431 if (wanted_size < samples_size) {
1432 /* remove samples */
1433 samples_size = wanted_size;
1434 } else if (wanted_size > samples_size) {
1435 uint8_t *samples_end, *q;
1436 int nb;
1437
1438 /* add samples */
1439 nb = (samples_size - wanted_size);
1440 samples_end = (uint8_t *)samples + samples_size - n;
1441 q = samples_end + n;
1442 while (nb > 0) {
1443 memcpy(q, samples_end, n);
1444 q += n;
1445 nb -= n;
1446 }
1447 samples_size = wanted_size;
1448 }
1449 }
1450#if 0
1451 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1452 diff, avg_diff, samples_size - samples_size1,
1453 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1454#endif
1455 }
1456 } else {
1457 /* too big difference : may be initial PTS errors, so
1458 reset A-V filter */
1459 is->audio_diff_avg_count = 0;
1460 is->audio_diff_cum = 0;
1461 }
1462 }
1463
1464 return samples_size;
1465}
1466
1467/* decode one audio frame and returns its uncompressed size */
1468static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, double *pts_ptr)
1469{
1470 AVPacket *pkt = &is->audio_pkt;
1471 int n, len1, data_size;
1472 double pts;
1473
1474 for(;;) {
1475 /* NOTE: the audio packet can contain several frames */
1476 while (is->audio_pkt_size > 0) {
1477 SDL_LockMutex(is->audio_decoder_mutex);
1478 len1 = avcodec_decode_audio(is->audio_st->codec,
1479 (int16_t *)audio_buf, &data_size,
1480 is->audio_pkt_data, is->audio_pkt_size);
1481 SDL_UnlockMutex(is->audio_decoder_mutex);
1482 if (len1 < 0) {
1483 /* if error, we skip the frame */
1484 is->audio_pkt_size = 0;
1485 break;
1486 }
1487
1488 is->audio_pkt_data += len1;
1489 is->audio_pkt_size -= len1;
1490 if (data_size <= 0)
1491 continue;
1492 /* if no pts, then compute it */
1493 pts = is->audio_clock;
1494 *pts_ptr = pts;
1495 n = 2 * is->audio_st->codec->channels;
1496 is->audio_clock += (double)data_size /
1497 (double)(n * is->audio_st->codec->sample_rate);
1498#if defined(DEBUG_SYNC)
1499 {
1500 static double last_clock;
1501 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1502 is->audio_clock - last_clock,
1503 is->audio_clock, pts);
1504 last_clock = is->audio_clock;
1505 }
1506#endif
1507 return data_size;
1508 }
1509
1510 /* free the current packet */
1511 if (pkt->data)
1512 av_free_packet(pkt);
1513
1514 if (is->paused || is->audioq.abort_request) {
1515 return -1;
1516 }
1517
1518 /* read next packet */
1519 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1520 return -1;
1521 is->audio_pkt_data = pkt->data;
1522 is->audio_pkt_size = pkt->size;
1523
1524 /* if update the audio clock with the pts */
1525 if (pkt->pts != AV_NOPTS_VALUE) {
1526 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1527 }
1528 }
1529}
1530
1531/* get the current audio output buffer size, in samples. With SDL, we
1532 cannot have a precise information */
1533static int audio_write_get_buf_size(VideoState *is)
1534{
1535 return is->audio_hw_buf_size - is->audio_buf_index;
1536}
1537
1538
1539/* prepare a new audio buffer */
1540void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1541{
1542 VideoState *is = opaque;
1543 int audio_size, len1;
1544 double pts;
1545
1546 audio_callback_time = av_gettime();
1547
1548 while (len > 0) {
1549 if (is->audio_buf_index >= is->audio_buf_size) {
1550 audio_size = audio_decode_frame(is, is->audio_buf, &pts);
1551 if (audio_size < 0) {
1552 /* if error, just output silence */
1553 is->audio_buf_size = 1024;
1554 memset(is->audio_buf, 0, is->audio_buf_size);
1555 } else {
1556 if (is->show_audio)
1557 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1558 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1559 pts);
1560 is->audio_buf_size = audio_size;
1561 }
1562 is->audio_buf_index = 0;
1563 }
1564 len1 = is->audio_buf_size - is->audio_buf_index;
1565 if (len1 > len)
1566 len1 = len;
1567 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1568 len -= len1;
1569 stream += len1;
1570 is->audio_buf_index += len1;
1571 }
1572}
1573
1574
1575/* open a given stream. Return 0 if OK */
1576static int stream_component_open(VideoState *is, int stream_index)
1577{
1578 AVFormatContext *ic = is->ic;
1579 AVCodecContext *enc;
1580 AVCodec *codec;
1581 SDL_AudioSpec wanted_spec, spec;
1582
1583 if (stream_index < 0 || stream_index >= ic->nb_streams)
1584 return -1;
1585 enc = ic->streams[stream_index]->codec;
1586
1587 /* prepare audio output */
1588 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1589 wanted_spec.freq = enc->sample_rate;
1590 wanted_spec.format = AUDIO_S16SYS;
1591 /* hack for AC3. XXX: suppress that */
1592 if (enc->channels > 2)
1593 enc->channels = 2;
1594 wanted_spec.channels = enc->channels;
1595 wanted_spec.silence = 0;
1596 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1597 wanted_spec.callback = sdl_audio_callback;
1598 wanted_spec.userdata = is;
1599 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1600 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1601 return -1;
1602 }
1603 is->audio_hw_buf_size = spec.size;
1604 }
1605
1606 codec = avcodec_find_decoder(enc->codec_id);
1607 enc->debug_mv = debug_mv;
1608 enc->debug = debug;
1609 enc->workaround_bugs = workaround_bugs;
1610 enc->lowres = lowres;
1611 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1612 enc->idct_algo= idct;
1613 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1614 enc->skip_frame= skip_frame;
1615 enc->skip_idct= skip_idct;
1616 enc->skip_loop_filter= skip_loop_filter;
1617 enc->error_resilience= error_resilience;
1618 enc->error_concealment= error_concealment;
1619 if (!codec ||
1620 avcodec_open(enc, codec) < 0)
1621 return -1;
1622#if defined(HAVE_THREADS)
1623 if(thread_count>1)
1624 avcodec_thread_init(enc, thread_count);
1625#endif
1626 enc->thread_count= thread_count;
1627 switch(enc->codec_type) {
1628 case CODEC_TYPE_AUDIO:
1629 is->audio_stream = stream_index;
1630 is->audio_st = ic->streams[stream_index];
1631 is->audio_buf_size = 0;
1632 is->audio_buf_index = 0;
1633
1634 /* init averaging filter */
1635 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1636 is->audio_diff_avg_count = 0;
1637 /* since we do not have a precise anough audio fifo fullness,
1638 we correct audio sync only if larger than this threshold */
1639 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1640
1641 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1642 packet_queue_init(&is->audioq);
1643 SDL_PauseAudio(0);
1644 break;
1645 case CODEC_TYPE_VIDEO:
1646 is->video_stream = stream_index;
1647 is->video_st = ic->streams[stream_index];
1648
1649 is->frame_last_delay = 40e-3;
1650 is->frame_timer = (double)av_gettime() / 1000000.0;
1651 is->video_current_pts_time = av_gettime();
1652
1653 packet_queue_init(&is->videoq);
1654 is->video_tid = SDL_CreateThread(video_thread, is);
1655 break;
1656 case CODEC_TYPE_SUBTITLE:
1657 is->subtitle_stream = stream_index;
1658 is->subtitle_st = ic->streams[stream_index];
1659 packet_queue_init(&is->subtitleq);
1660
1661 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1662 break;
1663 default:
1664 break;
1665 }
1666 return 0;
1667}
1668
1669static void stream_component_close(VideoState *is, int stream_index)
1670{
1671 AVFormatContext *ic = is->ic;
1672 AVCodecContext *enc;
1673
1674 if (stream_index < 0 || stream_index >= ic->nb_streams)
1675 return;
1676 enc = ic->streams[stream_index]->codec;
1677
1678 switch(enc->codec_type) {
1679 case CODEC_TYPE_AUDIO:
1680 packet_queue_abort(&is->audioq);
1681
1682 SDL_CloseAudio();
1683
1684 packet_queue_end(&is->audioq);
1685 break;
1686 case CODEC_TYPE_VIDEO:
1687 packet_queue_abort(&is->videoq);
1688
1689 /* note: we also signal this mutex to make sure we deblock the
1690 video thread in all cases */
1691 SDL_LockMutex(is->pictq_mutex);
1692 SDL_CondSignal(is->pictq_cond);
1693 SDL_UnlockMutex(is->pictq_mutex);
1694
1695 SDL_WaitThread(is->video_tid, NULL);
1696
1697 packet_queue_end(&is->videoq);
1698 break;
1699 case CODEC_TYPE_SUBTITLE:
1700 packet_queue_abort(&is->subtitleq);
1701
1702 /* note: we also signal this mutex to make sure we deblock the
1703 video thread in all cases */
1704 SDL_LockMutex(is->subpq_mutex);
1705 is->subtitle_stream_changed = 1;
1706
1707 SDL_CondSignal(is->subpq_cond);
1708 SDL_UnlockMutex(is->subpq_mutex);
1709
1710 SDL_WaitThread(is->subtitle_tid, NULL);
1711
1712 packet_queue_end(&is->subtitleq);
1713 break;
1714 default:
1715 break;
1716 }
1717
1718 avcodec_close(enc);
1719 switch(enc->codec_type) {
1720 case CODEC_TYPE_AUDIO:
1721 is->audio_st = NULL;
1722 is->audio_stream = -1;
1723 break;
1724 case CODEC_TYPE_VIDEO:
1725 is->video_st = NULL;
1726 is->video_stream = -1;
1727 break;
1728 case CODEC_TYPE_SUBTITLE:
1729 is->subtitle_st = NULL;
1730 is->subtitle_stream = -1;
1731 break;
1732 default:
1733 break;
1734 }
1735}
1736
1737static void dump_stream_info(const AVFormatContext *s)
1738{
1739 if (s->track != 0)
1740 fprintf(stderr, "Track: %d\n", s->track);
1741 if (s->title[0] != '\0')
1742 fprintf(stderr, "Title: %s\n", s->title);
1743 if (s->author[0] != '\0')
1744 fprintf(stderr, "Author: %s\n", s->author);
1745 if (s->album[0] != '\0')
1746 fprintf(stderr, "Album: %s\n", s->album);
1747 if (s->year != 0)
1748 fprintf(stderr, "Year: %d\n", s->year);
1749 if (s->genre[0] != '\0')
1750 fprintf(stderr, "Genre: %s\n", s->genre);
1751}
1752
1753/* since we have only one decoding thread, we can use a global
1754 variable instead of a thread local variable */
1755static VideoState *global_video_state;
1756
1757static int decode_interrupt_cb(void)
1758{
1759 return (global_video_state && global_video_state->abort_request);
1760}
1761
1762/* this thread gets the stream from the disk or the network */
1763static int decode_thread(void *arg)
1764{
1765 VideoState *is = arg;
1766 AVFormatContext *ic;
1767 int err, i, ret, video_index, audio_index, use_play;
1768 AVPacket pkt1, *pkt = &pkt1;
1769 AVFormatParameters params, *ap = &params;
1770
1771 video_index = -1;
1772 audio_index = -1;
1773 is->video_stream = -1;
1774 is->audio_stream = -1;
1775 is->subtitle_stream = -1;
1776
1777 global_video_state = is;
1778 url_set_interrupt_cb(decode_interrupt_cb);
1779
1780 memset(ap, 0, sizeof(*ap));
1781 ap->image_format = image_format;
1782 ap->initial_pause = 1; /* we force a pause when starting an RTSP
1783 stream */
1784
1785 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1786 if (err < 0) {
1787 print_error(is->filename, err);
1788 ret = -1;
1789 goto fail;
1790 }
1791 is->ic = ic;
1792#ifdef CONFIG_NETWORK
1793 use_play = (ic->iformat == &rtsp_demuxer);
1794#else
1795 use_play = 0;
1796#endif
1797
1798 if(genpts)
1799 ic->flags |= AVFMT_FLAG_GENPTS;
1800
1801 if (!use_play) {
1802 err = av_find_stream_info(ic);
1803 if (err < 0) {
1804 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1805 ret = -1;
1806 goto fail;
1807 }
1808 ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe shouldnt use url_feof() to test for the end
1809 }
1810
1811 /* if seeking requested, we execute it */
1812 if (start_time != AV_NOPTS_VALUE) {
1813 int64_t timestamp;
1814
1815 timestamp = start_time;
1816 /* add the stream start time */
1817 if (ic->start_time != AV_NOPTS_VALUE)
1818 timestamp += ic->start_time;
1819 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1820 if (ret < 0) {
1821 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1822 is->filename, (double)timestamp / AV_TIME_BASE);
1823 }
1824 }
1825
1826 /* now we can begin to play (RTSP stream only) */
1827 av_read_play(ic);
1828
1829 if (use_play) {
1830 err = av_find_stream_info(ic);
1831 if (err < 0) {
1832 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1833 ret = -1;
1834 goto fail;
1835 }
1836 }
1837
1838 for(i = 0; i < ic->nb_streams; i++) {
1839 AVCodecContext *enc = ic->streams[i]->codec;
1840 switch(enc->codec_type) {
1841 case CODEC_TYPE_AUDIO:
1842 if (audio_index < 0 && !audio_disable)
1843 audio_index = i;
1844 break;
1845 case CODEC_TYPE_VIDEO:
1846 if (video_index < 0 && !video_disable)
1847 video_index = i;
1848 break;
1849 default:
1850 break;
1851 }
1852 }
1853 if (show_status) {
1854 dump_format(ic, 0, is->filename, 0);
1855 dump_stream_info(ic);
1856 }
1857
1858 /* open the streams */
1859 if (audio_index >= 0) {
1860 stream_component_open(is, audio_index);
1861 }
1862
1863 if (video_index >= 0) {
1864 stream_component_open(is, video_index);
1865 } else {
1866 if (!display_disable)
1867 is->show_audio = 1;
1868 }
1869
1870 if (is->video_stream < 0 && is->audio_stream < 0) {
1871 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1872 ret = -1;
1873 goto fail;
1874 }
1875
1876 for(;;) {
1877 if (is->abort_request)
1878 break;
1879#ifdef CONFIG_NETWORK
1880 if (is->paused != is->last_paused) {
1881 is->last_paused = is->paused;
1882 if (is->paused)
1883 av_read_pause(ic);
1884 else
1885 av_read_play(ic);
1886 }
1887 if (is->paused && ic->iformat == &rtsp_demuxer) {
1888 /* wait 10 ms to avoid trying to get another packet */
1889 /* XXX: horrible */
1890 SDL_Delay(10);
1891 continue;
1892 }
1893#endif
1894 if (is->seek_req) {
1895 /* XXX: must lock decoder threads */
1896 SDL_LockMutex(is->video_decoder_mutex);
1897 SDL_LockMutex(is->audio_decoder_mutex);
1898 SDL_LockMutex(is->subtitle_decoder_mutex);
1899 ret = av_seek_frame(is->ic, -1, is->seek_pos, is->seek_flags);
1900 if (ret < 0) {
1901 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
1902 }else{
1903 if (is->audio_stream >= 0) {
1904 packet_queue_flush(&is->audioq);
1905 }
1906 if (is->subtitle_stream >= 0) {
1907 packet_queue_flush(&is->subtitleq);
1908 }
1909 if (is->video_stream >= 0) {
1910 packet_queue_flush(&is->videoq);
1911 avcodec_flush_buffers(ic->streams[video_index]->codec);
1912 }
1913 }
1914 SDL_UnlockMutex(is->subtitle_decoder_mutex);
1915 SDL_UnlockMutex(is->audio_decoder_mutex);
1916 SDL_UnlockMutex(is->video_decoder_mutex);
1917 is->seek_req = 0;
1918 }
1919
1920 /* if the queue are full, no need to read more */
1921 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
1922 is->videoq.size > MAX_VIDEOQ_SIZE ||
1923 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
1924 url_feof(&ic->pb)) {
1925 /* wait 10 ms */
1926 SDL_Delay(10);
1927 continue;
1928 }
1929 ret = av_read_frame(ic, pkt);
1930 if (ret < 0) {
1931 if (url_ferror(&ic->pb) == 0) {
1932 SDL_Delay(100); /* wait for user event */
1933 continue;
1934 } else
1935 break;
1936 }
1937 if (pkt->stream_index == is->audio_stream) {
1938 packet_queue_put(&is->audioq, pkt);
1939 } else if (pkt->stream_index == is->video_stream) {
1940 packet_queue_put(&is->videoq, pkt);
1941 } else if (pkt->stream_index == is->subtitle_stream) {
1942 packet_queue_put(&is->subtitleq, pkt);
1943 } else {
1944 av_free_packet(pkt);
1945 }
1946 }
1947 /* wait until the end */
1948 while (!is->abort_request) {
1949 SDL_Delay(100);
1950 }
1951
1952 ret = 0;
1953 fail:
1954 /* disable interrupting */
1955 global_video_state = NULL;
1956
1957 /* close each stream */
1958 if (is->audio_stream >= 0)
1959 stream_component_close(is, is->audio_stream);
1960 if (is->video_stream >= 0)
1961 stream_component_close(is, is->video_stream);
1962 if (is->subtitle_stream >= 0)
1963 stream_component_close(is, is->subtitle_stream);
1964 if (is->ic) {
1965 av_close_input_file(is->ic);
1966 is->ic = NULL; /* safety */
1967 }
1968 url_set_interrupt_cb(NULL);
1969
1970 if (ret != 0) {
1971 SDL_Event event;
1972
1973 event.type = FF_QUIT_EVENT;
1974 event.user.data1 = is;
1975 SDL_PushEvent(&event);
1976 }
1977 return 0;
1978}
1979
1980static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
1981{
1982 VideoState *is;
1983
1984 is = av_mallocz(sizeof(VideoState));
1985 if (!is)
1986 return NULL;
1987 pstrcpy(is->filename, sizeof(is->filename), filename);
1988 is->iformat = iformat;
1989 if (screen) {
1990 is->width = screen->w;
1991 is->height = screen->h;
1992 }
1993 is->ytop = 0;
1994 is->xleft = 0;
1995
1996 /* start video display */
1997 is->pictq_mutex = SDL_CreateMutex();
1998 is->pictq_cond = SDL_CreateCond();
1999
2000 is->subpq_mutex = SDL_CreateMutex();
2001 is->subpq_cond = SDL_CreateCond();
2002
2003 is->subtitle_decoder_mutex = SDL_CreateMutex();
2004 is->audio_decoder_mutex = SDL_CreateMutex();
2005 is->video_decoder_mutex = SDL_CreateMutex();
2006
2007 /* add the refresh timer to draw the picture */
2008 schedule_refresh(is, 40);
2009
2010 is->av_sync_type = av_sync_type;
2011 is->parse_tid = SDL_CreateThread(decode_thread, is);
2012 if (!is->parse_tid) {
2013 av_free(is);
2014 return NULL;
2015 }
2016 return is;
2017}
2018
2019static void stream_close(VideoState *is)
2020{
2021 VideoPicture *vp;
2022 int i;
2023 /* XXX: use a special url_shutdown call to abort parse cleanly */
2024 is->abort_request = 1;
2025 SDL_WaitThread(is->parse_tid, NULL);
2026
2027 /* free all pictures */
2028 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2029 vp = &is->pictq[i];
2030 if (vp->bmp) {
2031 SDL_FreeYUVOverlay(vp->bmp);
2032 vp->bmp = NULL;
2033 }
2034 }
2035 SDL_DestroyMutex(is->pictq_mutex);
2036 SDL_DestroyCond(is->pictq_cond);
2037 SDL_DestroyMutex(is->subpq_mutex);
2038 SDL_DestroyCond(is->subpq_cond);
2039 SDL_DestroyMutex(is->subtitle_decoder_mutex);
2040 SDL_DestroyMutex(is->audio_decoder_mutex);
2041 SDL_DestroyMutex(is->video_decoder_mutex);
2042}
2043
2044static void stream_cycle_channel(VideoState *is, int codec_type)
2045{
2046 AVFormatContext *ic = is->ic;
2047 int start_index, stream_index;
2048 AVStream *st;
2049
2050 if (codec_type == CODEC_TYPE_VIDEO)
2051 start_index = is->video_stream;
2052 else if (codec_type == CODEC_TYPE_AUDIO)
2053 start_index = is->audio_stream;
2054 else
2055 start_index = is->subtitle_stream;
2056 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2057 return;
2058 stream_index = start_index;
2059 for(;;) {
2060 if (++stream_index >= is->ic->nb_streams)
2061 {
2062 if (codec_type == CODEC_TYPE_SUBTITLE)
2063 {
2064 stream_index = -1;
2065 goto the_end;
2066 } else
2067 stream_index = 0;
2068 }
2069 if (stream_index == start_index)
2070 return;
2071 st = ic->streams[stream_index];
2072 if (st->codec->codec_type == codec_type) {
2073 /* check that parameters are OK */
2074 switch(codec_type) {
2075 case CODEC_TYPE_AUDIO:
2076 if (st->codec->sample_rate != 0 &&
2077 st->codec->channels != 0)
2078 goto the_end;
2079 break;
2080 case CODEC_TYPE_VIDEO:
2081 case CODEC_TYPE_SUBTITLE:
2082 goto the_end;
2083 default:
2084 break;
2085 }
2086 }
2087 }
2088 the_end:
2089 stream_component_close(is, start_index);
2090 stream_component_open(is, stream_index);
2091}
2092
2093
2094static void toggle_full_screen(void)
2095{
2096 int w, h, flags;
2097 is_full_screen = !is_full_screen;
2098 if (!fs_screen_width) {
2099 /* use default SDL method */
2100 SDL_WM_ToggleFullScreen(screen);
2101 } else {
2102 /* use the recorded resolution */
2103 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
2104 if (is_full_screen) {
2105 w = fs_screen_width;
2106 h = fs_screen_height;
2107 flags |= SDL_FULLSCREEN;
2108 } else {
2109 w = screen_width;
2110 h = screen_height;
2111 flags |= SDL_RESIZABLE;
2112 }
2113 screen = SDL_SetVideoMode(w, h, 0, flags);
2114 cur_stream->width = w;
2115 cur_stream->height = h;
2116 }
2117}
2118
2119static void toggle_pause(void)
2120{
2121 if (cur_stream)
2122 stream_pause(cur_stream);
2123 step = 0;
2124}
2125
2126static void step_to_next_frame(void)
2127{
2128 if (cur_stream) {
2129 if (cur_stream->paused)
2130 cur_stream->paused=0;
2131 cur_stream->video_current_pts = get_video_clock(cur_stream);
2132 }
2133 step = 1;
2134}
2135
2136static void do_exit(void)
2137{
2138 if (cur_stream) {
2139 stream_close(cur_stream);
2140 cur_stream = NULL;
2141 }
2142 if (show_status)
2143 printf("\n");
2144 SDL_Quit();
2145 exit(0);
2146}
2147
2148static void toggle_audio_display(void)
2149{
2150 if (cur_stream) {
2151 cur_stream->show_audio = !cur_stream->show_audio;
2152 }
2153}
2154
2155/* handle an event sent by the GUI */
2156static void event_loop(void)
2157{
2158 SDL_Event event;
2159 double incr, pos, frac;
2160
2161 for(;;) {
2162 SDL_WaitEvent(&event);
2163 switch(event.type) {
2164 case SDL_KEYDOWN:
2165 switch(event.key.keysym.sym) {
2166 case SDLK_ESCAPE:
2167 case SDLK_q:
2168 do_exit();
2169 break;
2170 case SDLK_f:
2171 toggle_full_screen();
2172 break;
2173 case SDLK_p:
2174 case SDLK_SPACE:
2175 toggle_pause();
2176 break;
2177 case SDLK_s: //S: Step to next frame
2178 step_to_next_frame();
2179 break;
2180 case SDLK_a:
2181 if (cur_stream)
2182 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2183 break;
2184 case SDLK_v:
2185 if (cur_stream)
2186 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2187 break;
2188 case SDLK_t:
2189 if (cur_stream)
2190 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2191 break;
2192 case SDLK_w:
2193 toggle_audio_display();
2194 break;
2195 case SDLK_LEFT:
2196 incr = -10.0;
2197 goto do_seek;
2198 case SDLK_RIGHT:
2199 incr = 10.0;
2200 goto do_seek;
2201 case SDLK_UP:
2202 incr = 60.0;
2203 goto do_seek;
2204 case SDLK_DOWN:
2205 incr = -60.0;
2206 do_seek:
2207 if (cur_stream) {
2208 pos = get_master_clock(cur_stream);
2209 pos += incr;
2210 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2211 }
2212 break;
2213 default:
2214 break;
2215 }
2216 break;
2217 case SDL_MOUSEBUTTONDOWN:
2218 if (cur_stream) {
2219 int ns, hh, mm, ss;
2220 int tns, thh, tmm, tss;
2221 tns = cur_stream->ic->duration/1000000LL;
2222 thh = tns/3600;
2223 tmm = (tns%3600)/60;
2224 tss = (tns%60);
2225 frac = (double)event.button.x/(double)cur_stream->width;
2226 ns = frac*tns;
2227 hh = ns/3600;
2228 mm = (ns%3600)/60;
2229 ss = (ns%60);
2230 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2231 hh, mm, ss, thh, tmm, tss);
2232 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2233 }
2234 break;
2235 case SDL_VIDEORESIZE:
2236 if (cur_stream) {
2237 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2238 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2239 cur_stream->width = event.resize.w;
2240 cur_stream->height = event.resize.h;
2241 }
2242 break;
2243 case SDL_QUIT:
2244 case FF_QUIT_EVENT:
2245 do_exit();
2246 break;
2247 case FF_ALLOC_EVENT:
2248 alloc_picture(event.user.data1);
2249 break;
2250 case FF_REFRESH_EVENT:
2251 video_refresh_timer(event.user.data1);
2252 break;
2253 default:
2254 break;
2255 }
2256 }
2257}
2258
2259void opt_width(const char *arg)
2260{
2261 screen_width = atoi(arg);
2262}
2263
2264void opt_height(const char *arg)
2265{
2266 screen_height = atoi(arg);
2267}
2268
2269static void opt_format(const char *arg)
2270{
2271 file_iformat = av_find_input_format(arg);
2272 if (!file_iformat) {
2273 fprintf(stderr, "Unknown input format: %s\n", arg);
2274 exit(1);
2275 }
2276}
2277
2278static void opt_image_format(const char *arg)
2279{
2280 AVImageFormat *f;
2281
2282 for(f = first_image_format; f != NULL; f = f->next) {
2283 if (!strcmp(arg, f->name))
2284 break;
2285 }
2286 if (!f) {
2287 fprintf(stderr, "Unknown image format: '%s'\n", arg);
2288 exit(1);
2289 }
2290 image_format = f;
2291}
2292
2293#ifdef CONFIG_NETWORK
2294void opt_rtp_tcp(void)
2295{
2296 /* only tcp protocol */
2297 rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2298}
2299#endif
2300
2301void opt_sync(const char *arg)
2302{
2303 if (!strcmp(arg, "audio"))
2304 av_sync_type = AV_SYNC_AUDIO_MASTER;
2305 else if (!strcmp(arg, "video"))
2306 av_sync_type = AV_SYNC_VIDEO_MASTER;
2307 else if (!strcmp(arg, "ext"))
2308 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2309 else
2310 show_help();
2311}
2312
2313void opt_seek(const char *arg)
2314{
2315 start_time = parse_date(arg, 1);
2316}
2317
2318static void opt_debug(const char *arg)
2319{
2320 av_log_set_level(99);
2321 debug = atoi(arg);
2322}
2323
2324static void opt_vismv(const char *arg)
2325{
2326 debug_mv = atoi(arg);
2327}
2328
2329static void opt_thread_count(const char *arg)
2330{
2331 thread_count= atoi(arg);
2332#if !defined(HAVE_THREADS)
2333 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2334#endif
2335}
2336
2337const OptionDef options[] = {
2338 { "h", 0, {(void*)show_help}, "show help" },
2339 { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2340 { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2341 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2342 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2343 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2344 { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2345 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2346 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2347 { "img", HAS_ARG, {(void*)opt_image_format}, "force image format", "img_fmt" },
2348 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2349 { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2350 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2351 { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2352 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2353 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2354 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2355 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2356 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2357 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2358 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2359 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2360 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2361#ifdef CONFIG_NETWORK
2362 { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2363#endif
2364 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2365 { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2366 { NULL, },
2367};
2368
2369void show_help(void)
2370{
2371 printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003 Fabrice Bellard\n"
2372 "usage: ffplay [options] input_file\n"
2373 "Simple media player\n");
2374 printf("\n");
2375 show_help_options(options, "Main options:\n",
2376 OPT_EXPERT, 0);
2377 show_help_options(options, "\nAdvanced options:\n",
2378 OPT_EXPERT, OPT_EXPERT);
2379 printf("\nWhile playing:\n"
2380 "q, ESC quit\n"
2381 "f toggle full screen\n"
2382 "p, SPC pause\n"
2383 "a cycle audio channel\n"
2384 "v cycle video channel\n"
2385 "t cycle subtitle channel\n"
2386 "w show audio waves\n"
2387 "left/right seek backward/forward 10 seconds\n"
2388 "down/up seek backward/forward 1 minute\n"
2389 "mouse click seek to percentage in file corresponding to fraction of width\n"
2390 );
2391 exit(1);
2392}
2393
2394void parse_arg_file(const char *filename)
2395{
2396 if (!strcmp(filename, "-"))
2397 filename = "pipe:";
2398 input_filename = filename;
2399}
2400
2401/* Called from the main */
2402int main(int argc, char **argv)
2403{
2404 int flags, w, h;
2405
2406 /* register all codecs, demux and protocols */
2407 av_register_all();
2408
2409 #ifdef CONFIG_OS2
2410 MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
2411
2412 // Make stdout and stderr unbuffered
2413 setbuf( stdout, NULL );
2414 setbuf( stderr, NULL );
2415 #endif
2416
2417 parse_options(argc, argv, options);
2418
2419 if (!input_filename)
2420 show_help();
2421
2422 if (display_disable) {
2423 video_disable = 1;
2424 }
2425 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2426#if !defined(CONFIG_WIN32) && !defined(CONFIG_DARWIN)
2427 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2428#endif
2429 if (SDL_Init (flags)) {
2430 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2431 exit(1);
2432 }
2433
2434 if (!display_disable) {
2435#ifdef HAVE_SDL_VIDEO_SIZE
2436 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2437 fs_screen_width = vi->current_w;
2438 fs_screen_height = vi->current_h;
2439#endif
2440 flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
2441 if (is_full_screen && fs_screen_width) {
2442 w = fs_screen_width;
2443 h = fs_screen_height;
2444 flags |= SDL_FULLSCREEN;
2445 } else {
2446 w = screen_width;
2447 h = screen_height;
2448 flags |= SDL_RESIZABLE;
2449 }
2450#ifndef CONFIG_DARWIN
2451 screen = SDL_SetVideoMode(w, h, 0, flags);
2452#else
2453 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
2454 screen = SDL_SetVideoMode(w, h, 24, flags);
2455#endif
2456 if (!screen) {
2457 fprintf(stderr, "SDL: could not set video mode - exiting\n");
2458 exit(1);
2459 }
2460 SDL_WM_SetCaption("FFplay", "FFplay");
2461 }
2462
2463 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2464 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2465 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2466 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2467
2468 cur_stream = stream_open(input_filename, file_iformat);
2469
2470 event_loop();
2471
2472 /* never returns */
2473
2474 return 0;
2475}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette