Blender V4.3
writeffmpeg.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later
4 * Partial Copyright 2006 Peter Schlaile. */
5
10#ifdef WITH_FFMPEG
11# include <cstdio>
12# include <cstring>
13
14# include <cstdlib>
15
16# include "MEM_guardedalloc.h"
17
18# include "DNA_scene_types.h"
19
20# include "BLI_blenlib.h"
21
22# ifdef WITH_AUDASPACE
23# include <AUD_Device.h>
24# include <AUD_Special.h>
25# endif
26
27# include "BLI_endian_defines.h"
28# include "BLI_math_base.h"
29# include "BLI_threads.h"
30# include "BLI_utildefines.h"
31# include "BLI_vector.hh"
32
33# include "BKE_global.hh"
34# include "BKE_image.hh"
35# include "BKE_main.hh"
36# include "BKE_report.hh"
37# include "BKE_sound.h"
38# include "BKE_writeffmpeg.hh"
39
40# include "IMB_imbuf.hh"
41
42/* This needs to be included after BLI_math_base.h otherwise it will redefine some math defines
43 * like M_SQRT1_2 leading to warnings with MSVC */
44extern "C" {
45# include <libavcodec/avcodec.h>
46# include <libavformat/avformat.h>
47# include <libavutil/buffer.h>
48# include <libavutil/channel_layout.h>
49# include <libavutil/imgutils.h>
50# include <libavutil/opt.h>
51# include <libavutil/rational.h>
52# include <libavutil/samplefmt.h>
53# include <libswscale/swscale.h>
54
55# include "ffmpeg_compat.h"
56}
57
58struct StampData;
59
60/* libswscale context creation and destruction is expensive.
61 * Maintain a cache of already created contexts. */
62
63constexpr int64_t swscale_cache_max_entries = 32;
64
65struct SwscaleContext {
66 int src_width = 0, src_height = 0;
67 int dst_width = 0, dst_height = 0;
68 AVPixelFormat src_format = AV_PIX_FMT_NONE, dst_format = AV_PIX_FMT_NONE;
69 int flags = 0;
70
71 SwsContext *context = nullptr;
72 int64_t last_use_timestamp = 0;
73 bool is_used = false;
74};
75
76static ThreadMutex swscale_cache_lock = PTHREAD_MUTEX_INITIALIZER;
77static int64_t swscale_cache_timestamp = 0;
78static blender::Vector<SwscaleContext> *swscale_cache = nullptr;
79
80struct FFMpegContext {
81 int ffmpeg_type;
82 AVCodecID ffmpeg_codec;
83 AVCodecID ffmpeg_audio_codec;
84 int ffmpeg_video_bitrate;
85 int ffmpeg_audio_bitrate;
86 int ffmpeg_gop_size;
87 int ffmpeg_max_b_frames;
88 int ffmpeg_autosplit;
89 int ffmpeg_autosplit_count;
90 bool ffmpeg_preview;
91
92 int ffmpeg_crf; /* set to 0 to not use CRF mode; we have another flag for lossless anyway. */
93 int ffmpeg_preset; /* see eFFMpegPreset */
94
95 AVFormatContext *outfile;
96 AVCodecContext *video_codec;
97 AVCodecContext *audio_codec;
98 AVStream *video_stream;
99 AVStream *audio_stream;
100 AVFrame *current_frame; /* Image frame in output pixel format. */
101 int video_time;
102
103 /* Image frame in Blender's own pixel format, may need conversion to the output pixel format. */
104 AVFrame *img_convert_frame;
105 SwsContext *img_convert_ctx;
106
107 uint8_t *audio_input_buffer;
108 uint8_t *audio_deinterleave_buffer;
109 int audio_input_samples;
110 double audio_time;
111 double audio_time_total;
112 bool audio_deinterleave;
113 int audio_sample_size;
114
115 StampData *stamp_data;
116
117# ifdef WITH_AUDASPACE
118 AUD_Device *audio_mixdown_device;
119# endif
120};
121
122# define FFMPEG_AUTOSPLIT_SIZE 2000000000
123
124# define PRINT \
125 if (G.debug & G_DEBUG_FFMPEG) \
126 printf
127
128static void ffmpeg_dict_set_int(AVDictionary **dict, const char *key, int value);
129static void ffmpeg_filepath_get(FFMpegContext *context,
130 char filepath[FILE_MAX],
131 const RenderData *rd,
132 bool preview,
133 const char *suffix);
134
135/* Delete a picture buffer */
136
137static void delete_picture(AVFrame *f)
138{
139 if (f) {
140 av_frame_free(&f);
141 }
142}
143
144static int request_float_audio_buffer(int codec_id)
145{
146 /* If any of these codecs, we prefer the float sample format (if supported) */
147 return codec_id == AV_CODEC_ID_AAC || codec_id == AV_CODEC_ID_AC3 ||
148 codec_id == AV_CODEC_ID_VORBIS;
149}
150
151# ifdef WITH_AUDASPACE
152
153static int write_audio_frame(FFMpegContext *context)
154{
155 AVFrame *frame = nullptr;
156 AVCodecContext *c = context->audio_codec;
157
158 AUD_Device_read(
159 context->audio_mixdown_device, context->audio_input_buffer, context->audio_input_samples);
160
161 frame = av_frame_alloc();
162 frame->pts = context->audio_time / av_q2d(c->time_base);
163 frame->nb_samples = context->audio_input_samples;
164 frame->format = c->sample_fmt;
165# ifdef FFMPEG_USE_OLD_CHANNEL_VARS
166 frame->channels = c->channels;
167 frame->channel_layout = c->channel_layout;
168 const int num_channels = c->channels;
169# else
170 av_channel_layout_copy(&frame->ch_layout, &c->ch_layout);
171 const int num_channels = c->ch_layout.nb_channels;
172# endif
173
174 if (context->audio_deinterleave) {
175 int channel, i;
176 uint8_t *temp;
177
178 for (channel = 0; channel < num_channels; channel++) {
179 for (i = 0; i < frame->nb_samples; i++) {
180 memcpy(context->audio_deinterleave_buffer +
181 (i + channel * frame->nb_samples) * context->audio_sample_size,
182 context->audio_input_buffer +
183 (num_channels * i + channel) * context->audio_sample_size,
184 context->audio_sample_size);
185 }
186 }
187
188 temp = context->audio_deinterleave_buffer;
189 context->audio_deinterleave_buffer = context->audio_input_buffer;
190 context->audio_input_buffer = temp;
191 }
192
193 avcodec_fill_audio_frame(frame,
194 num_channels,
195 c->sample_fmt,
196 context->audio_input_buffer,
197 context->audio_input_samples * num_channels *
198 context->audio_sample_size,
199 1);
200
201 int success = 1;
202
203 char error_str[AV_ERROR_MAX_STRING_SIZE];
204 int ret = avcodec_send_frame(c, frame);
205 if (ret < 0) {
206 /* Can't send frame to encoder. This shouldn't happen. */
207 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
208 fprintf(stderr, "Can't send audio frame: %s\n", error_str);
209 success = -1;
210 }
211
212 AVPacket *pkt = av_packet_alloc();
213
214 while (ret >= 0) {
215
216 ret = avcodec_receive_packet(c, pkt);
217 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
218 break;
219 }
220 if (ret < 0) {
221 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
222 fprintf(stderr, "Error encoding audio frame: %s\n", error_str);
223 success = -1;
224 }
225
226 pkt->stream_index = context->audio_stream->index;
227 av_packet_rescale_ts(pkt, c->time_base, context->audio_stream->time_base);
228# ifdef FFMPEG_USE_DURATION_WORKAROUND
229 my_guess_pkt_duration(context->outfile, context->audio_stream, pkt);
230# endif
231
232 pkt->flags |= AV_PKT_FLAG_KEY;
233
234 int write_ret = av_interleaved_write_frame(context->outfile, pkt);
235 if (write_ret != 0) {
236 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
237 fprintf(stderr, "Error writing audio packet: %s\n", error_str);
238 success = -1;
239 break;
240 }
241 }
242
243 av_packet_free(&pkt);
244 av_frame_free(&frame);
245
246 return success;
247}
248# endif /* #ifdef WITH_AUDASPACE */
249
250/* Allocate a temporary frame */
251static AVFrame *alloc_picture(AVPixelFormat pix_fmt, int width, int height)
252{
253 /* allocate space for the struct */
254 AVFrame *f = av_frame_alloc();
255 if (f == nullptr) {
256 return nullptr;
257 }
258
259 /* allocate the actual picture buffer */
260 const size_t align = ffmpeg_get_buffer_alignment();
261 int size = av_image_get_buffer_size(pix_fmt, width, height, align);
262 AVBufferRef *buf = av_buffer_alloc(size);
263 if (buf == nullptr) {
264 av_frame_free(&f);
265 return nullptr;
266 }
267
268 av_image_fill_arrays(f->data, f->linesize, buf->data, pix_fmt, width, height, align);
269 f->buf[0] = buf;
270 f->format = pix_fmt;
271 f->width = width;
272 f->height = height;
273
274 return f;
275}
276
277/* Get the correct file extensions for the requested format,
278 * first is always desired guess_format parameter */
279static const char **get_file_extensions(int format)
280{
281 switch (format) {
282 case FFMPEG_DV: {
283 static const char *rv[] = {".dv", nullptr};
284 return rv;
285 }
286 case FFMPEG_MPEG1: {
287 static const char *rv[] = {".mpg", ".mpeg", nullptr};
288 return rv;
289 }
290 case FFMPEG_MPEG2: {
291 static const char *rv[] = {".dvd", ".vob", ".mpg", ".mpeg", nullptr};
292 return rv;
293 }
294 case FFMPEG_MPEG4: {
295 static const char *rv[] = {".mp4", ".mpg", ".mpeg", nullptr};
296 return rv;
297 }
298 case FFMPEG_AVI: {
299 static const char *rv[] = {".avi", nullptr};
300 return rv;
301 }
302 case FFMPEG_MOV: {
303 static const char *rv[] = {".mov", nullptr};
304 return rv;
305 }
306 case FFMPEG_H264: {
307 /* FIXME: avi for now... */
308 static const char *rv[] = {".avi", nullptr};
309 return rv;
310 }
311
312 case FFMPEG_XVID: {
313 /* FIXME: avi for now... */
314 static const char *rv[] = {".avi", nullptr};
315 return rv;
316 }
317 case FFMPEG_FLV: {
318 static const char *rv[] = {".flv", nullptr};
319 return rv;
320 }
321 case FFMPEG_MKV: {
322 static const char *rv[] = {".mkv", nullptr};
323 return rv;
324 }
325 case FFMPEG_OGG: {
326 static const char *rv[] = {".ogv", ".ogg", nullptr};
327 return rv;
328 }
329 case FFMPEG_WEBM: {
330 static const char *rv[] = {".webm", nullptr};
331 return rv;
332 }
333 case FFMPEG_AV1: {
334 static const char *rv[] = {".mp4", ".mkv", nullptr};
335 return rv;
336 }
337 default:
338 return nullptr;
339 }
340}
341
342/* Write a frame to the output file */
343static bool write_video_frame(FFMpegContext *context, AVFrame *frame, ReportList *reports)
344{
345 int ret, success = 1;
346 AVPacket *packet = av_packet_alloc();
347
348 AVCodecContext *c = context->video_codec;
349
350 frame->pts = context->video_time;
351 context->video_time++;
352
353 char error_str[AV_ERROR_MAX_STRING_SIZE];
354 ret = avcodec_send_frame(c, frame);
355 if (ret < 0) {
356 /* Can't send frame to encoder. This shouldn't happen. */
357 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
358 fprintf(stderr, "Can't send video frame: %s\n", error_str);
359 success = -1;
360 }
361
362 while (ret >= 0) {
363 ret = avcodec_receive_packet(c, packet);
364
365 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
366 /* No more packets available. */
367 break;
368 }
369 if (ret < 0) {
370 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
371 fprintf(stderr, "Error encoding frame: %s\n", error_str);
372 break;
373 }
374
375 packet->stream_index = context->video_stream->index;
376 av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
377# ifdef FFMPEG_USE_DURATION_WORKAROUND
378 my_guess_pkt_duration(context->outfile, context->video_stream, packet);
379# endif
380
381 if (av_interleaved_write_frame(context->outfile, packet) != 0) {
382 success = -1;
383 break;
384 }
385 }
386
387 if (!success) {
388 BKE_report(reports, RPT_ERROR, "Error writing frame");
389 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
390 PRINT("Error writing frame: %s\n", error_str);
391 }
392
393 av_packet_free(&packet);
394
395 return success;
396}
397
398/* read and encode a frame of video from the buffer */
399static AVFrame *generate_video_frame(FFMpegContext *context, const ImBuf *image)
400{
401 /* For now only 8-bit/channel images are supported. */
402 const uint8_t *pixels = image->byte_buffer.data;
403 if (pixels == nullptr) {
404 return nullptr;
405 }
406
407 AVCodecParameters *codec = context->video_stream->codecpar;
408 int height = codec->height;
409 AVFrame *rgb_frame;
410
411 if (context->img_convert_frame != nullptr) {
412 /* Pixel format conversion is needed. */
413 rgb_frame = context->img_convert_frame;
414 }
415 else {
416 /* The output pixel format is Blender's internal pixel format. */
417 rgb_frame = context->current_frame;
418 }
419
420 /* Ensure frame is writable. Some video codecs might have made previous frame
421 * shared (i.e. not writable). */
422 av_frame_make_writable(rgb_frame);
423
424 /* Copy the Blender pixels into the FFMPEG data-structure, taking care of endianness and flipping
425 * the image vertically. */
426 int linesize = rgb_frame->linesize[0];
427 int linesize_src = rgb_frame->width * 4;
428 for (int y = 0; y < height; y++) {
429 uint8_t *target = rgb_frame->data[0] + linesize * (height - y - 1);
430 const uint8_t *src = pixels + linesize_src * y;
431
432# if ENDIAN_ORDER == L_ENDIAN
433 memcpy(target, src, linesize_src);
434
435# elif ENDIAN_ORDER == B_ENDIAN
436 const uint8_t *end = src + linesize_src;
437 while (src != end) {
438 target[3] = src[0];
439 target[2] = src[1];
440 target[1] = src[2];
441 target[0] = src[3];
442
443 target += 4;
444 src += 4;
445 }
446# else
447# error ENDIAN_ORDER should either be L_ENDIAN or B_ENDIAN.
448# endif
449 }
450
451 /* Convert to the output pixel format, if it's different that Blender's internal one. */
452 if (context->img_convert_frame != nullptr) {
453 BLI_assert(context->img_convert_ctx != NULL);
454 /* Ensure the frame we are scaling to is writable as well. */
455 av_frame_make_writable(context->current_frame);
456 BKE_ffmpeg_sws_scale_frame(context->img_convert_ctx, context->current_frame, rgb_frame);
457 }
458
459 return context->current_frame;
460}
461
462static AVRational calc_time_base(uint den, double num, int codec_id)
463{
464 /* Convert the input 'num' to an integer. Simply shift the decimal places until we get an integer
465 * (within a floating point error range).
466 * For example if we have `den = 3` and `num = 0.1` then the fps is: `den/num = 30` fps.
467 * When converting this to a FFMPEG time base, we want num to be an integer.
468 * So we simply move the decimal places of both numbers. i.e. `den = 30`, `num = 1`. */
469 float eps = FLT_EPSILON;
470 const uint DENUM_MAX = (codec_id == AV_CODEC_ID_MPEG4) ? (1UL << 16) - 1 : (1UL << 31) - 1;
471
472 /* Calculate the precision of the initial floating point number. */
473 if (num > 1.0) {
474 const uint num_integer_bits = log2_floor_u(uint(num));
475
476 /* Formula for calculating the epsilon value: (power of two range) / (pow mantissa bits)
477 * For example, a float has 23 mantissa bits and the float value 3.5f as a pow2 range of
478 * (4-2=2):
479 * (2) / pow2(23) = floating point precision for 3.5f
480 */
481 eps = float(1 << num_integer_bits) * FLT_EPSILON;
482 }
483
484 /* Calculate how many decimal shifts we can do until we run out of precision. */
485 const int max_num_shift = fabsf(log10f(eps));
486 /* Calculate how many times we can shift the denominator. */
487 const int max_den_shift = log10f(DENUM_MAX) - log10f(den);
488 const int max_iter = min_ii(max_num_shift, max_den_shift);
489
490 for (int i = 0; i < max_iter && fabs(num - round(num)) > eps; i++) {
491 /* Increase the number and denominator until both are integers. */
492 num *= 10;
493 den *= 10;
494 eps *= 10;
495 }
496
497 AVRational time_base;
498 time_base.den = den;
499 time_base.num = int(num);
500
501 return time_base;
502}
503
504static const AVCodec *get_av1_encoder(
505 FFMpegContext *context, RenderData *rd, AVDictionary **opts, int rectx, int recty)
506{
507 /* There are three possible encoders for AV1: `libaom-av1`, librav1e, and `libsvtav1`. librav1e
508 * tends to give the best compression quality while `libsvtav1` tends to be the fastest encoder.
509 * One of each will be picked based on the preset setting, and if a particular encoder is not
510 * available, then use the default returned by FFMpeg. */
511 const AVCodec *codec = nullptr;
512 switch (context->ffmpeg_preset) {
513 case FFM_PRESET_BEST:
514 /* `libaom-av1` may produce better VMAF-scoring videos in several cases, but there are cases
515 * where using a different encoder is desirable, such as in #103849. */
516 codec = avcodec_find_encoder_by_name("librav1e");
517 if (!codec) {
518 /* Fallback to `libaom-av1` if librav1e is not found. */
519 codec = avcodec_find_encoder_by_name("libaom-av1");
520 }
521 break;
523 codec = avcodec_find_encoder_by_name("libsvtav1");
524 break;
525 case FFM_PRESET_GOOD:
526 default:
527 codec = avcodec_find_encoder_by_name("libaom-av1");
528 break;
529 }
530
531 /* Use the default AV1 encoder if the specified encoder wasn't found. */
532 if (!codec) {
533 codec = avcodec_find_encoder(AV_CODEC_ID_AV1);
534 }
535
536 /* Apply AV1 encoder specific settings. */
537 if (codec) {
538 if (STREQ(codec->name, "librav1e")) {
539 /* Set "tiles" to 8 to enable multi-threaded encoding. */
540 if (rd->threads > 8) {
541 ffmpeg_dict_set_int(opts, "tiles", rd->threads);
542 }
543 else {
544 ffmpeg_dict_set_int(opts, "tiles", 8);
545 }
546
547 /* Use a reasonable speed setting based on preset. Speed ranges from 0-10.
548 * Must check context->ffmpeg_preset again in case this encoder was selected due to the
549 * absence of another. */
550 switch (context->ffmpeg_preset) {
551 case FFM_PRESET_BEST:
552 ffmpeg_dict_set_int(opts, "speed", 4);
553 break;
555 ffmpeg_dict_set_int(opts, "speed", 10);
556 break;
557 case FFM_PRESET_GOOD:
558 default:
559 ffmpeg_dict_set_int(opts, "speed", 6);
560 break;
561 }
562 if (context->ffmpeg_crf >= 0) {
563 /* librav1e does not use `-crf`, but uses `-qp` in the range of 0-255.
564 * Calculates the roughly equivalent float, and truncates it to an integer. */
565 uint qp_value = float(context->ffmpeg_crf) * 255.0f / 51.0f;
566 if (qp_value > 255) {
567 qp_value = 255;
568 }
569 ffmpeg_dict_set_int(opts, "qp", qp_value);
570 }
571 /* Set gop_size as rav1e's "--keyint". */
572 char buffer[64];
573 SNPRINTF(buffer, "keyint=%d", context->ffmpeg_gop_size);
574 av_dict_set(opts, "rav1e-params", buffer, 0);
575 }
576 else if (STREQ(codec->name, "libsvtav1")) {
577 /* Set preset value based on ffmpeg_preset.
578 * Must check `context->ffmpeg_preset` again in case this encoder was selected due to the
579 * absence of another. */
580 switch (context->ffmpeg_preset) {
582 ffmpeg_dict_set_int(opts, "preset", 8);
583 break;
584 case FFM_PRESET_BEST:
585 ffmpeg_dict_set_int(opts, "preset", 3);
586 break;
587 case FFM_PRESET_GOOD:
588 default:
589 ffmpeg_dict_set_int(opts, "preset", 5);
590 break;
591 }
592 if (context->ffmpeg_crf >= 0) {
593 /* `libsvtav1` does not support CRF until FFMPEG builds since 2022-02-24,
594 * use `qp` as fallback. */
595 ffmpeg_dict_set_int(opts, "qp", context->ffmpeg_crf);
596 }
597 }
598 else if (STREQ(codec->name, "libaom-av1")) {
599 /* Speed up libaom-av1 encoding by enabling multi-threading and setting tiles. */
600 ffmpeg_dict_set_int(opts, "row-mt", 1);
601 const char *tiles_string = nullptr;
602 bool tiles_string_is_dynamic = false;
603 if (rd->threads > 0) {
604 /* See if threads is a square. */
605 int threads_sqrt = sqrtf(rd->threads);
606 if (threads_sqrt < 4) {
607 /* Ensure a default minimum. */
608 threads_sqrt = 4;
609 }
610 if (is_power_of_2_i(threads_sqrt) && threads_sqrt * threads_sqrt == rd->threads) {
611 /* Is a square num, therefore just do "sqrt x sqrt" for tiles parameter. */
612 int digits = 0;
613 for (int t_sqrt_copy = threads_sqrt; t_sqrt_copy > 0; t_sqrt_copy /= 10) {
614 ++digits;
615 }
616 /* A char array need only an alignment of 1. */
617 char *tiles_string_mut = (char *)calloc(digits * 2 + 2, 1);
618 BLI_snprintf(tiles_string_mut, digits * 2 + 2, "%dx%d", threads_sqrt, threads_sqrt);
619 tiles_string_is_dynamic = true;
620 tiles_string = tiles_string_mut;
621 }
622 else {
623 /* Is not a square num, set greater side based on longer side, or use a square if both
624 * sides are equal. */
625 int sqrt_p2 = power_of_2_min_i(threads_sqrt);
626 if (sqrt_p2 < 2) {
627 /* Ensure a default minimum. */
628 sqrt_p2 = 2;
629 }
630 int sqrt_p2_next = power_of_2_min_i(int(rd->threads) / sqrt_p2);
631 if (sqrt_p2_next < 1) {
632 sqrt_p2_next = 1;
633 }
634 if (sqrt_p2 > sqrt_p2_next) {
635 /* Ensure sqrt_p2_next is greater or equal to sqrt_p2. */
636 int temp = sqrt_p2;
637 sqrt_p2 = sqrt_p2_next;
638 sqrt_p2_next = temp;
639 }
640 int combined_digits = 0;
641 for (int sqrt_p2_copy = sqrt_p2; sqrt_p2_copy > 0; sqrt_p2_copy /= 10) {
642 ++combined_digits;
643 }
644 for (int sqrt_p2_copy = sqrt_p2_next; sqrt_p2_copy > 0; sqrt_p2_copy /= 10) {
645 ++combined_digits;
646 }
647 /* A char array need only an alignment of 1. */
648 char *tiles_string_mut = (char *)calloc(combined_digits + 2, 1);
649 if (rectx > recty) {
650 BLI_snprintf(tiles_string_mut, combined_digits + 2, "%dx%d", sqrt_p2_next, sqrt_p2);
651 }
652 else if (rectx < recty) {
653 BLI_snprintf(tiles_string_mut, combined_digits + 2, "%dx%d", sqrt_p2, sqrt_p2_next);
654 }
655 else {
656 BLI_snprintf(tiles_string_mut, combined_digits + 2, "%dx%d", sqrt_p2, sqrt_p2);
657 }
658 tiles_string_is_dynamic = true;
659 tiles_string = tiles_string_mut;
660 }
661 }
662 else {
663 /* Thread count unknown, default to 8. */
664 if (rectx > recty) {
665 tiles_string = "4x2";
666 }
667 else if (rectx < recty) {
668 tiles_string = "2x4";
669 }
670 else {
671 tiles_string = "2x2";
672 }
673 }
674 av_dict_set(opts, "tiles", tiles_string, 0);
675 if (tiles_string_is_dynamic) {
676 free((void *)tiles_string);
677 }
678 /* libaom-av1 uses "cpu-used" instead of "preset" for defining compression quality.
679 * This value is in a range from 0-8. 0 and 8 are extremes, but we will allow 8.
680 * Must check context->ffmpeg_preset again in case this encoder was selected due to the
681 * absence of another. */
682 switch (context->ffmpeg_preset) {
684 ffmpeg_dict_set_int(opts, "cpu-used", 8);
685 break;
686 case FFM_PRESET_BEST:
687 ffmpeg_dict_set_int(opts, "cpu-used", 4);
688 break;
689 case FFM_PRESET_GOOD:
690 default:
691 ffmpeg_dict_set_int(opts, "cpu-used", 6);
692 break;
693 }
694
695 /* CRF related settings is similar to H264 for libaom-av1, so we will rely on those settings
696 * applied later. */
697 }
698 }
699
700 return codec;
701}
702
703static SwsContext *sws_create_context(int src_width,
704 int src_height,
705 int av_src_format,
706 int dst_width,
707 int dst_height,
708 int av_dst_format,
709 int sws_flags)
710{
711# if defined(FFMPEG_SWSCALE_THREADING)
712 /* sws_getContext does not allow passing flags that ask for multi-threaded
713 * scaling context, so do it the hard way. */
714 SwsContext *c = sws_alloc_context();
715 if (c == nullptr) {
716 return nullptr;
717 }
718 av_opt_set_int(c, "srcw", src_width, 0);
719 av_opt_set_int(c, "srch", src_height, 0);
720 av_opt_set_int(c, "src_format", av_src_format, 0);
721 av_opt_set_int(c, "dstw", dst_width, 0);
722 av_opt_set_int(c, "dsth", dst_height, 0);
723 av_opt_set_int(c, "dst_format", av_dst_format, 0);
724 av_opt_set_int(c, "sws_flags", sws_flags, 0);
725 av_opt_set_int(c, "threads", BLI_system_thread_count(), 0);
726
727 if (sws_init_context(c, nullptr, nullptr) < 0) {
728 sws_freeContext(c);
729 return nullptr;
730 }
731# else
732 SwsContext *c = sws_getContext(src_width,
733 src_height,
734 AVPixelFormat(av_src_format),
735 dst_width,
736 dst_height,
737 AVPixelFormat(av_dst_format),
738 sws_flags,
739 nullptr,
740 nullptr,
741 nullptr);
742# endif
743
744 return c;
745}
746
747static void init_swscale_cache_if_needed()
748{
749 if (swscale_cache == nullptr) {
750 swscale_cache = new blender::Vector<SwscaleContext>();
751 swscale_cache_timestamp = 0;
752 }
753}
754
755static bool remove_oldest_swscale_context()
756{
757 int64_t oldest_index = -1;
758 int64_t oldest_time = 0;
759 for (int64_t index = 0; index < swscale_cache->size(); index++) {
760 SwscaleContext &ctx = (*swscale_cache)[index];
761 if (ctx.is_used) {
762 continue;
763 }
764 int64_t time = swscale_cache_timestamp - ctx.last_use_timestamp;
765 if (time > oldest_time) {
766 oldest_time = time;
767 oldest_index = index;
768 }
769 }
770
771 if (oldest_index >= 0) {
772 SwscaleContext &ctx = (*swscale_cache)[oldest_index];
773 sws_freeContext(ctx.context);
774 swscale_cache->remove_and_reorder(oldest_index);
775 return true;
776 }
777 return false;
778}
779
780static void maintain_swscale_cache_size()
781{
782 while (swscale_cache->size() > swscale_cache_max_entries) {
783 if (!remove_oldest_swscale_context()) {
784 /* Could not remove anything (all contexts are actively used),
785 * stop trying. */
786 break;
787 }
788 }
789}
790
791SwsContext *BKE_ffmpeg_sws_get_context(int src_width,
792 int src_height,
793 int av_src_format,
794 int dst_width,
795 int dst_height,
796 int av_dst_format,
797 int sws_flags)
798{
799 BLI_mutex_lock(&swscale_cache_lock);
800
801 init_swscale_cache_if_needed();
802
803 swscale_cache_timestamp++;
804
805 /* Search for unused context that has suitable parameters. */
806 SwsContext *ctx = nullptr;
807 for (SwscaleContext &c : *swscale_cache) {
808 if (!c.is_used && c.src_width == src_width && c.src_height == src_height &&
809 c.src_format == av_src_format && c.dst_width == dst_width && c.dst_height == dst_height &&
810 c.dst_format == av_dst_format && c.flags == sws_flags)
811 {
812 ctx = c.context;
813 /* Mark as used. */
814 c.is_used = true;
815 c.last_use_timestamp = swscale_cache_timestamp;
816 break;
817 }
818 }
819 if (ctx == nullptr) {
820 /* No free matching context in cache: create a new one. */
821 ctx = sws_create_context(
822 src_width, src_height, av_src_format, dst_width, dst_height, av_dst_format, sws_flags);
823 SwscaleContext c;
824 c.src_width = src_width;
825 c.src_height = src_height;
826 c.dst_width = dst_width;
827 c.dst_height = dst_height;
828 c.src_format = AVPixelFormat(av_src_format);
829 c.dst_format = AVPixelFormat(av_dst_format);
830 c.flags = sws_flags;
831 c.context = ctx;
832 c.is_used = true;
833 c.last_use_timestamp = swscale_cache_timestamp;
834 swscale_cache->append(c);
835
836 maintain_swscale_cache_size();
837 }
838
839 BLI_mutex_unlock(&swscale_cache_lock);
840 return ctx;
841}
842
843void BKE_ffmpeg_sws_release_context(SwsContext *ctx)
844{
845 BLI_mutex_lock(&swscale_cache_lock);
846 init_swscale_cache_if_needed();
847
848 bool found = false;
849 for (SwscaleContext &c : *swscale_cache) {
850 if (c.context == ctx) {
851 BLI_assert_msg(c.is_used, "Releasing ffmpeg swscale context that is not in use");
852 c.is_used = false;
853 found = true;
854 break;
855 }
856 }
857 BLI_assert_msg(found, "Releasing ffmpeg swscale context that is not in cache");
858 UNUSED_VARS_NDEBUG(found);
859 maintain_swscale_cache_size();
860
861 BLI_mutex_unlock(&swscale_cache_lock);
862}
863
864void BKE_ffmpeg_exit()
865{
866 BLI_mutex_lock(&swscale_cache_lock);
867 if (swscale_cache != nullptr) {
868 for (SwscaleContext &c : *swscale_cache) {
869 sws_freeContext(c.context);
870 }
871 delete swscale_cache;
872 swscale_cache = nullptr;
873 }
874 BLI_mutex_unlock(&swscale_cache_lock);
875}
876
877void BKE_ffmpeg_sws_scale_frame(SwsContext *ctx, AVFrame *dst, const AVFrame *src)
878{
879# if defined(FFMPEG_SWSCALE_THREADING)
880 sws_scale_frame(ctx, dst, src);
881# else
882 sws_scale(ctx, src->data, src->linesize, 0, src->height, dst->data, dst->linesize);
883# endif
884}
885
886/* prepare a video stream for the output file */
887
888static AVStream *alloc_video_stream(FFMpegContext *context,
889 RenderData *rd,
890 AVCodecID codec_id,
891 AVFormatContext *of,
892 int rectx,
893 int recty,
894 char *error,
895 int error_size)
896{
897 AVStream *st;
898 const AVCodec *codec;
899 AVDictionary *opts = nullptr;
900
901 error[0] = '\0';
902
903 st = avformat_new_stream(of, nullptr);
904 if (!st) {
905 return nullptr;
906 }
907 st->id = 0;
908
909 /* Set up the codec context */
910
911 if (codec_id == AV_CODEC_ID_AV1) {
912 /* Use get_av1_encoder() to get the ideal (hopefully) encoder for AV1 based
913 * on given parameters, and also set up opts. */
914 codec = get_av1_encoder(context, rd, &opts, rectx, recty);
915 }
916 else {
917 codec = avcodec_find_encoder(codec_id);
918 }
919 if (!codec) {
920 fprintf(stderr, "Couldn't find valid video codec\n");
921 context->video_codec = nullptr;
922 return nullptr;
923 }
924
925 context->video_codec = avcodec_alloc_context3(codec);
926 AVCodecContext *c = context->video_codec;
927
928 /* Get some values from the current render settings */
929
930 c->width = rectx;
931 c->height = recty;
932
933 if (context->ffmpeg_type == FFMPEG_DV && rd->frs_sec != 25) {
934 /* FIXME: Really bad hack (tm) for NTSC support */
935 c->time_base.den = 2997;
936 c->time_base.num = 100;
937 }
938 else if (float(int(rd->frs_sec_base)) == rd->frs_sec_base) {
939 c->time_base.den = rd->frs_sec;
940 c->time_base.num = int(rd->frs_sec_base);
941 }
942 else {
943 c->time_base = calc_time_base(rd->frs_sec, rd->frs_sec_base, codec_id);
944 }
945
946 /* As per the time-base documentation here:
947 * https://www.ffmpeg.org/ffmpeg-codecs.html#Codec-Options
948 * We want to set the time base to (1 / fps) for fixed frame rate video.
949 * If it is not possible, we want to set the time-base numbers to something as
950 * small as possible.
951 */
952 if (c->time_base.num != 1) {
953 AVRational new_time_base;
954 if (av_reduce(
955 &new_time_base.num, &new_time_base.den, c->time_base.num, c->time_base.den, INT_MAX))
956 {
957 /* Exact reduction was possible. Use the new value. */
958 c->time_base = new_time_base;
959 }
960 }
961
962 st->time_base = c->time_base;
963
964 c->gop_size = context->ffmpeg_gop_size;
965 c->max_b_frames = context->ffmpeg_max_b_frames;
966
967 if (context->ffmpeg_type == FFMPEG_WEBM && context->ffmpeg_crf == 0) {
968 ffmpeg_dict_set_int(&opts, "lossless", 1);
969 }
970 else if (context->ffmpeg_crf >= 0) {
971 /* As per https://trac.ffmpeg.org/wiki/Encode/VP9 we must set the bit rate to zero when
972 * encoding with VP9 in CRF mode.
973 * Set this to always be zero for other codecs as well.
974 * We don't care about bit rate in CRF mode. */
975 c->bit_rate = 0;
976 ffmpeg_dict_set_int(&opts, "crf", context->ffmpeg_crf);
977 }
978 else {
979 c->bit_rate = context->ffmpeg_video_bitrate * 1000;
980 c->rc_max_rate = rd->ffcodecdata.rc_max_rate * 1000;
981 c->rc_min_rate = rd->ffcodecdata.rc_min_rate * 1000;
982 c->rc_buffer_size = rd->ffcodecdata.rc_buffer_size * 1024;
983 }
984
985 if (context->ffmpeg_preset) {
986 /* 'preset' is used by h.264, 'deadline' is used by WEBM/VP9. I'm not
987 * setting those properties conditionally based on the video codec,
988 * as the FFmpeg encoder simply ignores unknown settings anyway. */
989 char const *preset_name = nullptr; /* Used by h.264. */
990 char const *deadline_name = nullptr; /* Used by WEBM/VP9. */
991 switch (context->ffmpeg_preset) {
992 case FFM_PRESET_GOOD:
993 preset_name = "medium";
994 deadline_name = "good";
995 break;
996 case FFM_PRESET_BEST:
997 preset_name = "slower";
998 deadline_name = "best";
999 break;
1001 preset_name = "superfast";
1002 deadline_name = "realtime";
1003 break;
1004 default:
1005 printf("Unknown preset number %i, ignoring.\n", context->ffmpeg_preset);
1006 }
1007 /* "codec_id != AV_CODEC_ID_AV1" is required due to "preset" already being set by an AV1 codec.
1008 */
1009 if (preset_name != nullptr && codec_id != AV_CODEC_ID_AV1) {
1010 av_dict_set(&opts, "preset", preset_name, 0);
1011 }
1012 if (deadline_name != nullptr) {
1013 av_dict_set(&opts, "deadline", deadline_name, 0);
1014 }
1015 }
1016
1017 /* Be sure to use the correct pixel format(e.g. RGB, YUV) */
1018
1019 if (codec->pix_fmts) {
1020 c->pix_fmt = codec->pix_fmts[0];
1021 }
1022 else {
1023 /* makes HuffYUV happy ... */
1024 c->pix_fmt = AV_PIX_FMT_YUV422P;
1025 }
1026
1027 if (context->ffmpeg_type == FFMPEG_XVID) {
1028 /* Alas! */
1029 c->pix_fmt = AV_PIX_FMT_YUV420P;
1030 c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');
1031 }
1032
1033 /* Keep lossless encodes in the RGB domain. */
1034 if (codec_id == AV_CODEC_ID_HUFFYUV) {
1035 if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
1036 c->pix_fmt = AV_PIX_FMT_BGRA;
1037 }
1038 else {
1039 c->pix_fmt = AV_PIX_FMT_RGB32;
1040 }
1041 }
1042
1043 if (codec_id == AV_CODEC_ID_DNXHD) {
1045 /* Set the block decision algorithm to be of the highest quality ("rd" == 2). */
1046 c->mb_decision = 2;
1047 }
1048 }
1049
1050 if (codec_id == AV_CODEC_ID_FFV1) {
1051 c->pix_fmt = AV_PIX_FMT_RGB32;
1052 }
1053
1054 if (codec_id == AV_CODEC_ID_QTRLE) {
1055 if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
1056 c->pix_fmt = AV_PIX_FMT_ARGB;
1057 }
1058 }
1059
1060 if (codec_id == AV_CODEC_ID_VP9 && rd->im_format.planes == R_IMF_PLANES_RGBA) {
1061 c->pix_fmt = AV_PIX_FMT_YUVA420P;
1062 }
1063 else if (ELEM(codec_id, AV_CODEC_ID_H264, AV_CODEC_ID_VP9) && (context->ffmpeg_crf == 0)) {
1064 /* Use 4:4:4 instead of 4:2:0 pixel format for lossless rendering. */
1065 c->pix_fmt = AV_PIX_FMT_YUV444P;
1066 }
1067
1068 if (codec_id == AV_CODEC_ID_PNG) {
1069 if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
1070 c->pix_fmt = AV_PIX_FMT_RGBA;
1071 }
1072 }
1073
1074 if (of->oformat->flags & AVFMT_GLOBALHEADER) {
1075 PRINT("Using global header\n");
1076 c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
1077 }
1078
1079 /* xasp & yasp got float lately... */
1080
1081 st->sample_aspect_ratio = c->sample_aspect_ratio = av_d2q((double(rd->xasp) / double(rd->yasp)),
1082 255);
1083 st->avg_frame_rate = av_inv_q(c->time_base);
1084
1085 if (codec->capabilities & AV_CODEC_CAP_OTHER_THREADS) {
1086 c->thread_count = 0;
1087 }
1088 else {
1089 c->thread_count = BLI_system_thread_count();
1090 }
1091
1092 if (codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) {
1093 c->thread_type = FF_THREAD_FRAME;
1094 }
1095 else if (codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) {
1096 c->thread_type = FF_THREAD_SLICE;
1097 }
1098
1099 int ret = avcodec_open2(c, codec, &opts);
1100
1101 if (ret < 0) {
1102 char error_str[AV_ERROR_MAX_STRING_SIZE];
1103 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
1104 fprintf(stderr, "Couldn't initialize video codec: %s\n", error_str);
1105 BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
1106 av_dict_free(&opts);
1107 avcodec_free_context(&c);
1108 context->video_codec = nullptr;
1109 return nullptr;
1110 }
1111 av_dict_free(&opts);
1112
1113 /* FFMPEG expects its data in the output pixel format. */
1114 context->current_frame = alloc_picture(c->pix_fmt, c->width, c->height);
1115
1116 if (c->pix_fmt == AV_PIX_FMT_RGBA) {
1117 /* Output pixel format is the same we use internally, no conversion necessary. */
1118 context->img_convert_frame = nullptr;
1119 context->img_convert_ctx = nullptr;
1120 }
1121 else {
1122 /* Output pixel format is different, allocate frame for conversion. */
1123 context->img_convert_frame = alloc_picture(AV_PIX_FMT_RGBA, c->width, c->height);
1124 context->img_convert_ctx = BKE_ffmpeg_sws_get_context(
1125 c->width, c->height, AV_PIX_FMT_RGBA, c->width, c->height, c->pix_fmt, SWS_BICUBIC);
1126 }
1127
1128 avcodec_parameters_from_context(st->codecpar, c);
1129
1130 context->video_time = 0.0f;
1131
1132 return st;
1133}
1134
1135static AVStream *alloc_audio_stream(FFMpegContext *context,
1136 RenderData *rd,
1137 AVCodecID codec_id,
1138 AVFormatContext *of,
1139 char *error,
1140 int error_size)
1141{
1142 AVStream *st;
1143 const AVCodec *codec;
1144
1145 error[0] = '\0';
1146
1147 st = avformat_new_stream(of, nullptr);
1148 if (!st) {
1149 return nullptr;
1150 }
1151 st->id = 1;
1152
1153 codec = avcodec_find_encoder(codec_id);
1154 if (!codec) {
1155 fprintf(stderr, "Couldn't find valid audio codec\n");
1156 context->audio_codec = nullptr;
1157 return nullptr;
1158 }
1159
1160 context->audio_codec = avcodec_alloc_context3(codec);
1161 AVCodecContext *c = context->audio_codec;
1162 c->thread_count = BLI_system_thread_count();
1163 c->thread_type = FF_THREAD_SLICE;
1164
1165 c->sample_rate = rd->ffcodecdata.audio_mixrate;
1166 c->bit_rate = context->ffmpeg_audio_bitrate * 1000;
1167 c->sample_fmt = AV_SAMPLE_FMT_S16;
1168
1169 const int num_channels = rd->ffcodecdata.audio_channels;
1170 int channel_layout_mask = 0;
1171 switch (rd->ffcodecdata.audio_channels) {
1172 case FFM_CHANNELS_MONO:
1173 channel_layout_mask = AV_CH_LAYOUT_MONO;
1174 break;
1176 channel_layout_mask = AV_CH_LAYOUT_STEREO;
1177 break;
1179 channel_layout_mask = AV_CH_LAYOUT_QUAD;
1180 break;
1182 channel_layout_mask = AV_CH_LAYOUT_5POINT1_BACK;
1183 break;
1185 channel_layout_mask = AV_CH_LAYOUT_7POINT1;
1186 break;
1187 }
1188 BLI_assert(channel_layout_mask != 0);
1189
1190# ifdef FFMPEG_USE_OLD_CHANNEL_VARS
1191 c->channels = num_channels;
1192 c->channel_layout = channel_layout_mask;
1193# else
1194 av_channel_layout_from_mask(&c->ch_layout, channel_layout_mask);
1195# endif
1196
1197 if (request_float_audio_buffer(codec_id)) {
1198 /* mainly for AAC codec which is experimental */
1199 c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
1200 c->sample_fmt = AV_SAMPLE_FMT_FLT;
1201 }
1202
1203 if (codec->sample_fmts) {
1204 /* Check if the preferred sample format for this codec is supported.
1205 * this is because, depending on the version of LIBAV,
1206 * and with the whole FFMPEG/LIBAV fork situation,
1207 * you have various implementations around.
1208 * Float samples in particular are not always supported. */
1209 const enum AVSampleFormat *p = codec->sample_fmts;
1210 for (; *p != -1; p++) {
1211 if (*p == c->sample_fmt) {
1212 break;
1213 }
1214 }
1215 if (*p == -1) {
1216 /* sample format incompatible with codec. Defaulting to a format known to work */
1217 c->sample_fmt = codec->sample_fmts[0];
1218 }
1219 }
1220
1221 if (codec->supported_samplerates) {
1222 const int *p = codec->supported_samplerates;
1223 int best = 0;
1224 int best_dist = INT_MAX;
1225 for (; *p; p++) {
1226 int dist = abs(c->sample_rate - *p);
1227 if (dist < best_dist) {
1228 best_dist = dist;
1229 best = *p;
1230 }
1231 }
1232 /* best is the closest supported sample rate (same as selected if best_dist == 0) */
1233 c->sample_rate = best;
1234 }
1235
1236 if (of->oformat->flags & AVFMT_GLOBALHEADER) {
1237 c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
1238 }
1239
1240 int ret = avcodec_open2(c, codec, nullptr);
1241
1242 if (ret < 0) {
1243 char error_str[AV_ERROR_MAX_STRING_SIZE];
1244 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
1245 fprintf(stderr, "Couldn't initialize audio codec: %s\n", error_str);
1246 BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
1247 avcodec_free_context(&c);
1248 context->audio_codec = nullptr;
1249 return nullptr;
1250 }
1251
1252 /* Need to prevent floating point exception when using VORBIS audio codec,
1253 * initialize this value in the same way as it's done in FFMPEG itself (sergey) */
1254 c->time_base.num = 1;
1255 c->time_base.den = c->sample_rate;
1256
1257 if (c->frame_size == 0) {
1258 /* Used to be if ((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
1259 * not sure if that is needed anymore, so let's try out if there are any
1260 * complaints regarding some FFMPEG versions users might have. */
1261 context->audio_input_samples = AV_INPUT_BUFFER_MIN_SIZE * 8 / c->bits_per_coded_sample /
1262 num_channels;
1263 }
1264 else {
1265 context->audio_input_samples = c->frame_size;
1266 }
1267
1268 context->audio_deinterleave = av_sample_fmt_is_planar(c->sample_fmt);
1269
1270 context->audio_sample_size = av_get_bytes_per_sample(c->sample_fmt);
1271
1272 context->audio_input_buffer = (uint8_t *)av_malloc(context->audio_input_samples * num_channels *
1273 context->audio_sample_size);
1274 if (context->audio_deinterleave) {
1275 context->audio_deinterleave_buffer = (uint8_t *)av_malloc(
1276 context->audio_input_samples * num_channels * context->audio_sample_size);
1277 }
1278
1279 context->audio_time = 0.0f;
1280
1281 avcodec_parameters_from_context(st->codecpar, c);
1282
1283 return st;
1284}
1285/* essential functions -- start, append, end */
1286
1287static void ffmpeg_dict_set_int(AVDictionary **dict, const char *key, int value)
1288{
1289 char buffer[32];
1290
1291 SNPRINTF(buffer, "%d", value);
1292
1293 av_dict_set(dict, key, buffer, 0);
1294}
1295
1296static void ffmpeg_add_metadata_callback(void *data,
1297 const char *propname,
1298 char *propvalue,
1299 int /*propvalue_maxncpy*/)
1300{
1301 AVDictionary **metadata = (AVDictionary **)data;
1302 av_dict_set(metadata, propname, propvalue, 0);
1303}
1304
1305static bool start_ffmpeg_impl(FFMpegContext *context,
1306 RenderData *rd,
1307 int rectx,
1308 int recty,
1309 const char *suffix,
1310 ReportList *reports)
1311{
1312 /* Handle to the output file */
1313 AVFormatContext *of;
1314 const AVOutputFormat *fmt;
1315 char filepath[FILE_MAX], error[1024];
1316 const char **exts;
1317 int ret = 0;
1318
1319 context->ffmpeg_type = rd->ffcodecdata.type;
1320 context->ffmpeg_codec = AVCodecID(rd->ffcodecdata.codec);
1321 context->ffmpeg_audio_codec = AVCodecID(rd->ffcodecdata.audio_codec);
1322 context->ffmpeg_video_bitrate = rd->ffcodecdata.video_bitrate;
1323 context->ffmpeg_audio_bitrate = rd->ffcodecdata.audio_bitrate;
1324 context->ffmpeg_gop_size = rd->ffcodecdata.gop_size;
1325 context->ffmpeg_autosplit = rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT;
1326 context->ffmpeg_crf = rd->ffcodecdata.constant_rate_factor;
1327 context->ffmpeg_preset = rd->ffcodecdata.ffmpeg_preset;
1328
1329 if ((rd->ffcodecdata.flags & FFMPEG_USE_MAX_B_FRAMES) != 0) {
1330 context->ffmpeg_max_b_frames = rd->ffcodecdata.max_b_frames;
1331 }
1332
1333 /* Determine the correct filename */
1334 ffmpeg_filepath_get(context, filepath, rd, context->ffmpeg_preview, suffix);
1335 PRINT(
1336 "Starting output to %s(FFMPEG)...\n"
1337 " Using type=%d, codec=%d, audio_codec=%d,\n"
1338 " video_bitrate=%d, audio_bitrate=%d,\n"
1339 " gop_size=%d, autosplit=%d\n"
1340 " render width=%d, render height=%d\n",
1341 filepath,
1342 context->ffmpeg_type,
1343 context->ffmpeg_codec,
1344 context->ffmpeg_audio_codec,
1345 context->ffmpeg_video_bitrate,
1346 context->ffmpeg_audio_bitrate,
1347 context->ffmpeg_gop_size,
1348 context->ffmpeg_autosplit,
1349 rectx,
1350 recty);
1351
1352 /* Sanity checks for the output file extensions. */
1353 exts = get_file_extensions(context->ffmpeg_type);
1354 if (!exts) {
1355 BKE_report(reports, RPT_ERROR, "No valid formats found");
1356 return false;
1357 }
1358
1359 fmt = av_guess_format(nullptr, exts[0], nullptr);
1360 if (!fmt) {
1361 BKE_report(reports, RPT_ERROR, "No valid formats found");
1362 return false;
1363 }
1364
1365 of = avformat_alloc_context();
1366 if (!of) {
1367 BKE_report(reports, RPT_ERROR, "Can't allocate FFmpeg format context");
1368 return false;
1369 }
1370
1371 enum AVCodecID audio_codec = context->ffmpeg_audio_codec;
1372 enum AVCodecID video_codec = context->ffmpeg_codec;
1373
1374 of->url = av_strdup(filepath);
1375 /* Check if we need to force change the codec because of file type codec restrictions */
1376 switch (context->ffmpeg_type) {
1377 case FFMPEG_OGG:
1378 video_codec = AV_CODEC_ID_THEORA;
1379 break;
1380 case FFMPEG_DV:
1381 video_codec = AV_CODEC_ID_DVVIDEO;
1382 break;
1383 case FFMPEG_MPEG1:
1384 video_codec = AV_CODEC_ID_MPEG1VIDEO;
1385 break;
1386 case FFMPEG_MPEG2:
1387 video_codec = AV_CODEC_ID_MPEG2VIDEO;
1388 break;
1389 case FFMPEG_H264:
1390 video_codec = AV_CODEC_ID_H264;
1391 break;
1392 case FFMPEG_XVID:
1393 video_codec = AV_CODEC_ID_MPEG4;
1394 break;
1395 case FFMPEG_FLV:
1396 video_codec = AV_CODEC_ID_FLV1;
1397 break;
1398 case FFMPEG_AV1:
1399 video_codec = AV_CODEC_ID_AV1;
1400 break;
1401 default:
1402 /* These containers are not restricted to any specific codec types.
1403 * Currently we expect these to be `.avi`, `.mov`, `.mkv`, and `.mp4`. */
1404 video_codec = context->ffmpeg_codec;
1405 break;
1406 }
1407
1408 /* Returns after this must 'goto fail;' */
1409
1410# if LIBAVFORMAT_VERSION_MAJOR >= 59
1411 of->oformat = fmt;
1412# else
1413 /* *DEPRECATED* 2022/08/01 For FFMPEG (<5.0) remove this else branch and the `ifdef` above. */
1414 of->oformat = (AVOutputFormat *)fmt;
1415# endif
1416
1417 if (video_codec == AV_CODEC_ID_DVVIDEO) {
1418 if (rectx != 720) {
1419 BKE_report(reports, RPT_ERROR, "Render width has to be 720 pixels for DV!");
1420 goto fail;
1421 }
1422 if (rd->frs_sec != 25 && recty != 480) {
1423 BKE_report(reports, RPT_ERROR, "Render height has to be 480 pixels for DV-NTSC!");
1424 goto fail;
1425 }
1426 if (rd->frs_sec == 25 && recty != 576) {
1427 BKE_report(reports, RPT_ERROR, "Render height has to be 576 pixels for DV-PAL!");
1428 goto fail;
1429 }
1430 }
1431
1432 if (context->ffmpeg_type == FFMPEG_DV) {
1433 audio_codec = AV_CODEC_ID_PCM_S16LE;
1434 if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE &&
1435 rd->ffcodecdata.audio_mixrate != 48000 && rd->ffcodecdata.audio_channels != 2)
1436 {
1437 BKE_report(reports, RPT_ERROR, "FFmpeg only supports 48khz / stereo audio for DV!");
1438 goto fail;
1439 }
1440 }
1441
1442 if (video_codec != AV_CODEC_ID_NONE) {
1443 context->video_stream = alloc_video_stream(
1444 context, rd, video_codec, of, rectx, recty, error, sizeof(error));
1445 PRINT("alloc video stream %p\n", context->video_stream);
1446 if (!context->video_stream) {
1447 if (error[0]) {
1448 BKE_report(reports, RPT_ERROR, error);
1449 PRINT("Video stream error: %s\n", error);
1450 }
1451 else {
1452 BKE_report(reports, RPT_ERROR, "Error initializing video stream");
1453 PRINT("Error initializing video stream");
1454 }
1455 goto fail;
1456 }
1457 }
1458
1459 if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE) {
1460 context->audio_stream = alloc_audio_stream(context, rd, audio_codec, of, error, sizeof(error));
1461 if (!context->audio_stream) {
1462 if (error[0]) {
1463 BKE_report(reports, RPT_ERROR, error);
1464 PRINT("Audio stream error: %s\n", error);
1465 }
1466 else {
1467 BKE_report(reports, RPT_ERROR, "Error initializing audio stream");
1468 PRINT("Error initializing audio stream");
1469 }
1470 goto fail;
1471 }
1472 }
1473 if (!(fmt->flags & AVFMT_NOFILE)) {
1474 if (avio_open(&of->pb, filepath, AVIO_FLAG_WRITE) < 0) {
1475 BKE_report(reports, RPT_ERROR, "Could not open file for writing");
1476 PRINT("Could not open file for writing\n");
1477 goto fail;
1478 }
1479 }
1480
1481 if (context->stamp_data != nullptr) {
1483 &of->metadata, context->stamp_data, ffmpeg_add_metadata_callback, false);
1484 }
1485
1486 ret = avformat_write_header(of, nullptr);
1487 if (ret < 0) {
1488 BKE_report(reports,
1489 RPT_ERROR,
1490 "Could not initialize streams, probably unsupported codec combination");
1491 char error_str[AV_ERROR_MAX_STRING_SIZE];
1492 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
1493 PRINT("Could not write media header: %s\n", error_str);
1494 goto fail;
1495 }
1496
1497 context->outfile = of;
1498 av_dump_format(of, 0, filepath, 1);
1499
1500 return true;
1501
1502fail:
1503 if (of->pb) {
1504 avio_close(of->pb);
1505 }
1506
1507 if (context->video_stream) {
1508 context->video_stream = nullptr;
1509 }
1510
1511 if (context->audio_stream) {
1512 context->audio_stream = nullptr;
1513 }
1514
1515 avformat_free_context(of);
1516 return false;
1517}
1518
1536static void flush_ffmpeg(AVCodecContext *c, AVStream *stream, AVFormatContext *outfile)
1537{
1538 char error_str[AV_ERROR_MAX_STRING_SIZE];
1539 AVPacket *packet = av_packet_alloc();
1540
1541 avcodec_send_frame(c, nullptr);
1542
1543 /* Get the packets frames. */
1544 int ret = 1;
1545 while (ret >= 0) {
1546 ret = avcodec_receive_packet(c, packet);
1547
1548 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1549 /* No more packets to flush. */
1550 break;
1551 }
1552 if (ret < 0) {
1553 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
1554 fprintf(stderr, "Error encoding delayed frame: %s\n", error_str);
1555 break;
1556 }
1557
1558 packet->stream_index = stream->index;
1559 av_packet_rescale_ts(packet, c->time_base, stream->time_base);
1560# ifdef FFMPEG_USE_DURATION_WORKAROUND
1561 my_guess_pkt_duration(outfile, stream, packet);
1562# endif
1563
1564 int write_ret = av_interleaved_write_frame(outfile, packet);
1565 if (write_ret != 0) {
1566 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
1567 fprintf(stderr, "Error writing delayed frame: %s\n", error_str);
1568 break;
1569 }
1570 }
1571
1572 av_packet_free(&packet);
1573}
1574
1575/* **********************************************************************
1576 * * public interface
1577 * ********************************************************************** */
1578
1579/* Get the output filename-- similar to the other output formats */
1580static void ffmpeg_filepath_get(FFMpegContext *context,
1581 char filepath[FILE_MAX],
1582 const RenderData *rd,
1583 bool preview,
1584 const char *suffix)
1585{
1586 char autosplit[20];
1587
1588 const char **exts = get_file_extensions(rd->ffcodecdata.type);
1589 const char **fe = exts;
1590 int sfra, efra;
1591
1592 if (!filepath || !exts) {
1593 return;
1594 }
1595
1596 if (preview) {
1597 sfra = rd->psfra;
1598 efra = rd->pefra;
1599 }
1600 else {
1601 sfra = rd->sfra;
1602 efra = rd->efra;
1603 }
1604
1605 BLI_strncpy(filepath, rd->pic, FILE_MAX);
1607
1609
1610 autosplit[0] = '\0';
1611
1612 if ((rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT) != 0) {
1613 if (context) {
1614 SNPRINTF(autosplit, "_%03d", context->ffmpeg_autosplit_count);
1615 }
1616 }
1617
1618 if (rd->scemode & R_EXTENSION) {
1619 while (*fe) {
1620 if (BLI_strcasecmp(filepath + strlen(filepath) - strlen(*fe), *fe) == 0) {
1621 break;
1622 }
1623 fe++;
1624 }
1625
1626 if (*fe == nullptr) {
1627 BLI_strncat(filepath, autosplit, FILE_MAX);
1628
1629 BLI_path_frame_range(filepath, FILE_MAX, sfra, efra, 4);
1630 BLI_strncat(filepath, *exts, FILE_MAX);
1631 }
1632 else {
1633 *(filepath + strlen(filepath) - strlen(*fe)) = '\0';
1634 BLI_strncat(filepath, autosplit, FILE_MAX);
1635 BLI_strncat(filepath, *fe, FILE_MAX);
1636 }
1637 }
1638 else {
1639 if (BLI_path_frame_check_chars(filepath)) {
1640 BLI_path_frame_range(filepath, FILE_MAX, sfra, efra, 4);
1641 }
1642
1643 BLI_strncat(filepath, autosplit, FILE_MAX);
1644 }
1645
1646 BLI_path_suffix(filepath, FILE_MAX, suffix, "");
1647}
1648
1649void BKE_ffmpeg_filepath_get(char filepath[/*FILE_MAX*/ 1024],
1650 const RenderData *rd,
1651 bool preview,
1652 const char *suffix)
1653{
1654 ffmpeg_filepath_get(nullptr, filepath, rd, preview, suffix);
1655}
1656
1657bool BKE_ffmpeg_start(void *context_v,
1658 const Scene *scene,
1659 RenderData *rd,
1660 int rectx,
1661 int recty,
1662 ReportList *reports,
1663 bool preview,
1664 const char *suffix)
1665{
1666 FFMpegContext *context = static_cast<FFMpegContext *>(context_v);
1667
1668 context->ffmpeg_autosplit_count = 0;
1669 context->ffmpeg_preview = preview;
1670 context->stamp_data = BKE_stamp_info_from_scene_static(scene);
1671
1672 bool success = start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
1673# ifdef WITH_AUDASPACE
1674 if (context->audio_stream) {
1675 AVCodecContext *c = context->audio_codec;
1676
1677 AUD_DeviceSpecs specs;
1678# ifdef FFMPEG_USE_OLD_CHANNEL_VARS
1679 specs.channels = AUD_Channels(c->channels);
1680# else
1681 specs.channels = AUD_Channels(c->ch_layout.nb_channels);
1682# endif
1683
1684 switch (av_get_packed_sample_fmt(c->sample_fmt)) {
1685 case AV_SAMPLE_FMT_U8:
1686 specs.format = AUD_FORMAT_U8;
1687 break;
1688 case AV_SAMPLE_FMT_S16:
1689 specs.format = AUD_FORMAT_S16;
1690 break;
1691 case AV_SAMPLE_FMT_S32:
1692 specs.format = AUD_FORMAT_S32;
1693 break;
1694 case AV_SAMPLE_FMT_FLT:
1695 specs.format = AUD_FORMAT_FLOAT32;
1696 break;
1697 case AV_SAMPLE_FMT_DBL:
1698 specs.format = AUD_FORMAT_FLOAT64;
1699 break;
1700 default:
1701 return -31415;
1702 }
1703
1704 specs.rate = rd->ffcodecdata.audio_mixrate;
1705 context->audio_mixdown_device = BKE_sound_mixdown(
1706 scene, specs, preview ? rd->psfra : rd->sfra, rd->ffcodecdata.audio_volume);
1707 }
1708# endif
1709 return success;
1710}
1711
1712static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit);
1713
1714# ifdef WITH_AUDASPACE
1715static void write_audio_frames(FFMpegContext *context, double to_pts)
1716{
1717 AVCodecContext *c = context->audio_codec;
1718
1719 while (context->audio_stream) {
1720 if ((context->audio_time_total >= to_pts) || !write_audio_frame(context)) {
1721 break;
1722 }
1723 context->audio_time_total += double(context->audio_input_samples) / double(c->sample_rate);
1724 context->audio_time += double(context->audio_input_samples) / double(c->sample_rate);
1725 }
1726}
1727# endif
1728
1729bool BKE_ffmpeg_append(void *context_v,
1730 RenderData *rd,
1731 int start_frame,
1732 int frame,
1733 const ImBuf *image,
1734 const char *suffix,
1735 ReportList *reports)
1736{
1737 FFMpegContext *context = static_cast<FFMpegContext *>(context_v);
1738 AVFrame *avframe;
1739 bool success = true;
1740
1741 PRINT("Writing frame %i, render width=%d, render height=%d\n", frame, image->x, image->y);
1742
1743 if (context->video_stream) {
1744 avframe = generate_video_frame(context, image);
1745 success = (avframe && write_video_frame(context, avframe, reports));
1746# ifdef WITH_AUDASPACE
1747 /* Add +1 frame because we want to encode audio up until the next video frame. */
1748 write_audio_frames(
1749 context, (frame - start_frame + 1) / (double(rd->frs_sec) / double(rd->frs_sec_base)));
1750# else
1751 UNUSED_VARS(start_frame);
1752# endif
1753
1754 if (context->ffmpeg_autosplit) {
1755 if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
1756 end_ffmpeg_impl(context, true);
1757 context->ffmpeg_autosplit_count++;
1758
1759 success &= start_ffmpeg_impl(context, rd, image->x, image->y, suffix, reports);
1760 }
1761 }
1762 }
1763
1764 return success;
1765}
1766
1767static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
1768{
1769 PRINT("Closing FFMPEG...\n");
1770
1771# ifdef WITH_AUDASPACE
1772 if (is_autosplit == false) {
1773 if (context->audio_mixdown_device) {
1774 AUD_Device_free(context->audio_mixdown_device);
1775 context->audio_mixdown_device = nullptr;
1776 }
1777 }
1778# else
1779 UNUSED_VARS(is_autosplit);
1780# endif
1781
1782 if (context->video_stream) {
1783 PRINT("Flushing delayed video frames...\n");
1784 flush_ffmpeg(context->video_codec, context->video_stream, context->outfile);
1785 }
1786
1787 if (context->audio_stream) {
1788 PRINT("Flushing delayed audio frames...\n");
1789 flush_ffmpeg(context->audio_codec, context->audio_stream, context->outfile);
1790 }
1791
1792 if (context->outfile) {
1793 av_write_trailer(context->outfile);
1794 }
1795
1796 /* Close the video codec */
1797
1798 if (context->video_stream != nullptr) {
1799 PRINT("zero video stream %p\n", context->video_stream);
1800 context->video_stream = nullptr;
1801 }
1802
1803 if (context->audio_stream != nullptr) {
1804 context->audio_stream = nullptr;
1805 }
1806
1807 /* free the temp buffer */
1808 if (context->current_frame != nullptr) {
1809 delete_picture(context->current_frame);
1810 context->current_frame = nullptr;
1811 }
1812 if (context->img_convert_frame != nullptr) {
1813 delete_picture(context->img_convert_frame);
1814 context->img_convert_frame = nullptr;
1815 }
1816
1817 if (context->outfile != nullptr && context->outfile->oformat) {
1818 if (!(context->outfile->oformat->flags & AVFMT_NOFILE)) {
1819 avio_close(context->outfile->pb);
1820 }
1821 }
1822
1823 if (context->video_codec != nullptr) {
1824 avcodec_free_context(&context->video_codec);
1825 context->video_codec = nullptr;
1826 }
1827 if (context->audio_codec != nullptr) {
1828 avcodec_free_context(&context->audio_codec);
1829 context->audio_codec = nullptr;
1830 }
1831
1832 if (context->outfile != nullptr) {
1833 avformat_free_context(context->outfile);
1834 context->outfile = nullptr;
1835 }
1836 if (context->audio_input_buffer != nullptr) {
1837 av_free(context->audio_input_buffer);
1838 context->audio_input_buffer = nullptr;
1839 }
1840
1841 if (context->audio_deinterleave_buffer != nullptr) {
1842 av_free(context->audio_deinterleave_buffer);
1843 context->audio_deinterleave_buffer = nullptr;
1844 }
1845
1846 if (context->img_convert_ctx != nullptr) {
1847 BKE_ffmpeg_sws_release_context(context->img_convert_ctx);
1848 context->img_convert_ctx = nullptr;
1849 }
1850}
1851
1852void BKE_ffmpeg_end(void *context_v)
1853{
1854 FFMpegContext *context = static_cast<FFMpegContext *>(context_v);
1855 end_ffmpeg_impl(context, false);
1856}
1857
1858void BKE_ffmpeg_preset_set(RenderData *rd, int preset)
1859{
1860 bool is_ntsc = (rd->frs_sec != 25);
1861
1862 switch (preset) {
1863 case FFMPEG_PRESET_H264:
1864 rd->ffcodecdata.type = FFMPEG_AVI;
1865 rd->ffcodecdata.codec = AV_CODEC_ID_H264;
1866 rd->ffcodecdata.video_bitrate = 6000;
1867 rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
1868 rd->ffcodecdata.rc_max_rate = 9000;
1869 rd->ffcodecdata.rc_min_rate = 0;
1870 rd->ffcodecdata.rc_buffer_size = 224 * 8;
1871 rd->ffcodecdata.mux_packet_size = 2048;
1872 rd->ffcodecdata.mux_rate = 10080000;
1873 break;
1874
1875 case FFMPEG_PRESET_THEORA:
1876 case FFMPEG_PRESET_XVID:
1877 if (preset == FFMPEG_PRESET_XVID) {
1878 rd->ffcodecdata.type = FFMPEG_AVI;
1879 rd->ffcodecdata.codec = AV_CODEC_ID_MPEG4;
1880 }
1881 else if (preset == FFMPEG_PRESET_THEORA) {
1882 rd->ffcodecdata.type = FFMPEG_OGG; /* XXX broken */
1883 rd->ffcodecdata.codec = AV_CODEC_ID_THEORA;
1884 }
1885
1886 rd->ffcodecdata.video_bitrate = 6000;
1887 rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
1888 rd->ffcodecdata.rc_max_rate = 9000;
1889 rd->ffcodecdata.rc_min_rate = 0;
1890 rd->ffcodecdata.rc_buffer_size = 224 * 8;
1891 rd->ffcodecdata.mux_packet_size = 2048;
1892 rd->ffcodecdata.mux_rate = 10080000;
1893 break;
1894
1895 case FFMPEG_PRESET_AV1:
1896 rd->ffcodecdata.type = FFMPEG_AV1;
1897 rd->ffcodecdata.codec = AV_CODEC_ID_AV1;
1898 rd->ffcodecdata.video_bitrate = 6000;
1899 rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
1900 rd->ffcodecdata.rc_max_rate = 9000;
1901 rd->ffcodecdata.rc_min_rate = 0;
1902 rd->ffcodecdata.rc_buffer_size = 224 * 8;
1903 rd->ffcodecdata.mux_packet_size = 2048;
1904 rd->ffcodecdata.mux_rate = 10080000;
1905 break;
1906 }
1907}
1908
1909void BKE_ffmpeg_image_type_verify(RenderData *rd, const ImageFormatData *imf)
1910{
1911 int audio = 0;
1912
1913 if (imf->imtype == R_IMF_IMTYPE_FFMPEG) {
1914 if (rd->ffcodecdata.type <= 0 || rd->ffcodecdata.codec <= 0 ||
1915 rd->ffcodecdata.audio_codec <= 0 || rd->ffcodecdata.video_bitrate <= 1)
1916 {
1917 BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
1920 rd->ffcodecdata.type = FFMPEG_MKV;
1921 }
1922 if (rd->ffcodecdata.type == FFMPEG_OGG) {
1923 rd->ffcodecdata.type = FFMPEG_MPEG2;
1924 }
1925
1926 audio = 1;
1927 }
1928 else if (imf->imtype == R_IMF_IMTYPE_H264) {
1929 if (rd->ffcodecdata.codec != AV_CODEC_ID_H264) {
1930 BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
1931 audio = 1;
1932 }
1933 }
1934 else if (imf->imtype == R_IMF_IMTYPE_XVID) {
1935 if (rd->ffcodecdata.codec != AV_CODEC_ID_MPEG4) {
1936 BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_XVID);
1937 audio = 1;
1938 }
1939 }
1940 else if (imf->imtype == R_IMF_IMTYPE_THEORA) {
1941 if (rd->ffcodecdata.codec != AV_CODEC_ID_THEORA) {
1942 BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_THEORA);
1943 audio = 1;
1944 }
1945 }
1946 else if (imf->imtype == R_IMF_IMTYPE_AV1) {
1947 if (rd->ffcodecdata.codec != AV_CODEC_ID_AV1) {
1948 BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_AV1);
1949 audio = 1;
1950 }
1951 }
1952
1953 if (audio && rd->ffcodecdata.audio_codec < 0) {
1954 rd->ffcodecdata.audio_codec = AV_CODEC_ID_NONE;
1955 rd->ffcodecdata.audio_bitrate = 128;
1956 }
1957}
1958
1959bool BKE_ffmpeg_alpha_channel_is_supported(const RenderData *rd)
1960{
1961 int codec = rd->ffcodecdata.codec;
1962
1963 return ELEM(codec,
1964 AV_CODEC_ID_FFV1,
1965 AV_CODEC_ID_QTRLE,
1966 AV_CODEC_ID_PNG,
1967 AV_CODEC_ID_VP9,
1968 AV_CODEC_ID_HUFFYUV);
1969}
1970
1971void *BKE_ffmpeg_context_create()
1972{
1973 /* New FFMPEG data struct. */
1974 FFMpegContext *context = static_cast<FFMpegContext *>(
1975 MEM_callocN(sizeof(FFMpegContext), "new FFMPEG context"));
1976
1977 context->ffmpeg_codec = AV_CODEC_ID_MPEG4;
1978 context->ffmpeg_audio_codec = AV_CODEC_ID_NONE;
1979 context->ffmpeg_video_bitrate = 1150;
1980 context->ffmpeg_audio_bitrate = 128;
1981 context->ffmpeg_gop_size = 12;
1982 context->ffmpeg_autosplit = 0;
1983 context->ffmpeg_autosplit_count = 0;
1984 context->ffmpeg_preview = false;
1985 context->stamp_data = nullptr;
1986 context->audio_time_total = 0.0;
1987
1988 return context;
1989}
1990
1991void BKE_ffmpeg_context_free(void *context_v)
1992{
1993 FFMpegContext *context = static_cast<FFMpegContext *>(context_v);
1994 if (context == nullptr) {
1995 return;
1996 }
1997 if (context->stamp_data) {
1998 MEM_freeN(context->stamp_data);
1999 }
2000 MEM_freeN(context);
2001}
2002
2003#endif /* WITH_FFMPEG */
StampData * BKE_stamp_info_from_scene_static(const Scene *scene)
void BKE_stamp_info_callback(void *data, StampData *stamp_data, StampCallback callback, bool noskip)
const char * BKE_main_blendfile_path_from_global()
Definition main.cc:837
void BKE_report(ReportList *reports, eReportType type, const char *message)
Definition report.cc:125
#define BLI_assert(a)
Definition BLI_assert.h:50
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:57
bool BLI_file_ensure_parent_dir_exists(const char *filepath) ATTR_NONNULL(1)
Definition fileops_c.cc:429
void BLI_kdtree_nd_ free(KDTree *tree)
MINLINE int power_of_2_min_i(int n)
MINLINE int min_ii(int a, int b)
MINLINE int is_power_of_2_i(int n)
MINLINE unsigned int log2_floor_u(unsigned int x)
bool BLI_path_abs(char path[FILE_MAX], const char *basepath) ATTR_NONNULL(1
bool bool BLI_path_suffix(char *path, size_t path_maxncpy, const char *suffix, const char *sep) ATTR_NONNULL(1
bool void bool BLI_path_frame_check_chars(const char *path) ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT
#define FILE_MAX
bool BLI_path_frame_range(char *path, size_t path_maxncpy, int sta, int end, int digits) ATTR_NONNULL(1)
#define SNPRINTF(dst, format,...)
Definition BLI_string.h:597
char char size_t char * BLI_strncat(char *__restrict dst, const char *__restrict src, size_t dst_maxncpy) ATTR_NONNULL(1
int char char int BLI_strcasecmp(const char *s1, const char *s2) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1
size_t BLI_snprintf(char *__restrict dst, size_t dst_maxncpy, const char *__restrict format,...) ATTR_NONNULL(1
char * BLI_strncpy(char *__restrict dst, const char *__restrict src, size_t dst_maxncpy) ATTR_NONNULL(1
unsigned int uint
int BLI_system_thread_count(void)
Definition threads.cc:253
void BLI_mutex_lock(ThreadMutex *mutex)
Definition threads.cc:345
void BLI_mutex_unlock(ThreadMutex *mutex)
Definition threads.cc:350
pthread_mutex_t ThreadMutex
Definition BLI_threads.h:83
#define UNUSED_VARS(...)
#define UNUSED_VARS_NDEBUG(...)
#define ELEM(...)
#define STREQ(a, b)
typedef double(DMatrix)[4][4]
@ R_EXTENSION
@ FFM_PRESET_GOOD
@ FFM_PRESET_REALTIME
@ FFM_PRESET_BEST
@ FFMPEG_LOSSLESS_OUTPUT
@ FFMPEG_AUTOSPLIT_OUTPUT
@ FFMPEG_USE_MAX_B_FRAMES
@ FFM_CRF_MEDIUM
@ R_IMF_IMTYPE_FFMPEG
@ R_IMF_IMTYPE_H264
@ R_IMF_IMTYPE_THEORA
@ R_IMF_IMTYPE_AV1
@ R_IMF_IMTYPE_XVID
@ R_IMF_PLANES_RGBA
@ FFM_CHANNELS_SURROUND4
@ FFM_CHANNELS_STEREO
@ FFM_CHANNELS_SURROUND51
@ FFM_CHANNELS_SURROUND71
@ FFM_CHANNELS_MONO
const char * IMB_ffmpeg_last_error()
Read Guarded memory(de)allocation.
int64_t size() const
void remove_and_reorder(const int64_t index)
void append(const T &value)
#define printf
double time
#define NULL
#define fabsf(x)
#define sqrtf(x)
draw_view in_light_buf[] float
draw_view push_constant(Type::INT, "radiance_src") .push_constant(Type capture_info_buf storage_buf(1, Qualifier::READ, "ObjectBounds", "bounds_buf[]") .push_constant(Type draw_view int
FFMPEG_INLINE size_t ffmpeg_get_buffer_alignment()
FFMPEG_INLINE void my_guess_pkt_duration(AVFormatContext *s, AVStream *st, AVPacket *pkt)
format
void MEM_freeN(void *vmemh)
Definition mallocn.cc:105
void *(* MEM_callocN)(size_t len, const char *str)
Definition mallocn.cc:42
ccl_device_inline float2 fabs(const float2 a)
static void error(const char *str)
#define PRINT(format,...)
Definition moviecache.cc:35
T round(const T &a)
const btScalar eps
Definition poly34.cpp:11
return ret
__int64 int64_t
Definition stdint.h:89
unsigned char uint8_t
Definition stdint.h:78
struct ImageFormatData im_format
char pic[1024]
struct FFMpegCodecData ffcodecdata
ccl_device_inline int abs(int x)
Definition util/math.h:120