Blender V5.0
movie_write.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2006 Peter Schlaile.
2 * SPDX-FileCopyrightText: 2023-2024 Blender Authors
3 *
4 * SPDX-License-Identifier: GPL-2.0-or-later */
5
9
10#include "movie_write.hh"
11
12#include "BLI_string_ref.hh"
13
14#include "DNA_scene_types.h"
15
16#include "MOV_write.hh"
17
18#include "BKE_report.hh"
19
20#ifdef WITH_FFMPEG
21# include <cstdio>
22# include <cstring>
23
24# include "MEM_guardedalloc.h"
25
26# include "BLI_fileops.h"
27# include "BLI_math_base.h"
28# include "BLI_math_color.h"
29# include "BLI_path_utils.hh"
30# include "BLI_string.h"
31# include "BLI_string_utf8.h"
32# include "BLI_utildefines.h"
33
34# include "BKE_image.hh"
35# include "BKE_main.hh"
36# include "BKE_path_templates.hh"
37
38# include "IMB_imbuf.hh"
39
40# include "MOV_enums.hh"
41# include "MOV_util.hh"
42
43# include "IMB_colormanagement.hh"
44
45# include "CLG_log.h"
46
47# include "ffmpeg_swscale.hh"
48# include "movie_util.hh"
49
50static CLG_LogRef LOG = {"video.write"};
51static constexpr int64_t ffmpeg_autosplit_size = 2'000'000'000;
52
53static void ffmpeg_dict_set_int(AVDictionary **dict, const char *key, int value)
54{
55 av_dict_set_int(dict, key, value, 0);
56}
57
58static void ffmpeg_movie_close(MovieWriter *context);
59static bool ffmpeg_filepath_get(MovieWriter *context,
60 char filepath[FILE_MAX],
61 const Scene *scene,
62 const RenderData *rd,
63 bool preview,
64 const char *suffix,
65 ReportList *reports);
66
67static AVFrame *alloc_frame(AVPixelFormat pix_fmt, int width, int height)
68{
69 AVFrame *f = av_frame_alloc();
70 if (f == nullptr) {
71 return nullptr;
72 }
73 const size_t align = ffmpeg_get_buffer_alignment();
74 f->format = pix_fmt;
75 f->width = width;
76 f->height = height;
77 if (av_frame_get_buffer(f, align) < 0) {
78 av_frame_free(&f);
79 return nullptr;
80 }
81 return f;
82}
83
84/* Get the correct file extensions for the requested format,
85 * first is always desired guess_format parameter */
86static const char **get_file_extensions(int format)
87{
88 switch (format) {
89 case FFMPEG_DV: {
90 static const char *rv[] = {".dv", nullptr};
91 return rv;
92 }
93 case FFMPEG_MPEG1: {
94 static const char *rv[] = {".mpg", ".mpeg", nullptr};
95 return rv;
96 }
97 case FFMPEG_MPEG2: {
98 static const char *rv[] = {".dvd", ".vob", ".mpg", ".mpeg", nullptr};
99 return rv;
100 }
101 case FFMPEG_MPEG4: {
102 static const char *rv[] = {".mp4", ".mpg", ".mpeg", nullptr};
103 return rv;
104 }
105 case FFMPEG_AVI: {
106 static const char *rv[] = {".avi", nullptr};
107 return rv;
108 }
109 case FFMPEG_MOV: {
110 static const char *rv[] = {".mov", nullptr};
111 return rv;
112 }
113 case FFMPEG_H264: {
114 /* FIXME: avi for now... */
115 static const char *rv[] = {".avi", nullptr};
116 return rv;
117 }
118
119 case FFMPEG_XVID: {
120 /* FIXME: avi for now... */
121 static const char *rv[] = {".avi", nullptr};
122 return rv;
123 }
124 case FFMPEG_FLV: {
125 static const char *rv[] = {".flv", nullptr};
126 return rv;
127 }
128 case FFMPEG_MKV: {
129 static const char *rv[] = {".mkv", nullptr};
130 return rv;
131 }
132 case FFMPEG_OGG: {
133 static const char *rv[] = {".ogv", ".ogg", nullptr};
134 return rv;
135 }
136 case FFMPEG_WEBM: {
137 static const char *rv[] = {".webm", nullptr};
138 return rv;
139 }
140 case FFMPEG_AV1: {
141 static const char *rv[] = {".mp4", ".mkv", nullptr};
142 return rv;
143 }
144 default:
145 return nullptr;
146 }
147}
148
149static void add_hdr_mastering_display_metadata(AVCodecParameters *codecpar,
150 AVCodecContext *c,
151 const ImageFormatData *imf)
152{
153 if (c->color_primaries != AVCOL_PRI_BT2020) {
154 return;
155 }
156
157 int max_luminance = 0;
158 if (c->color_trc == AVCOL_TRC_ARIB_STD_B67) {
159 /* HLG is always 1000 nits. */
160 max_luminance = 1000;
161 }
162 else if (c->color_trc == AVCOL_TRC_SMPTEST2084) {
163 /* PQ uses heuristic based on view transform name. In the future this could become
164 * a user control, but this solves the common cases. */
166 if (view_name.find("HDR 500 nits") != blender::StringRef::not_found) {
167 max_luminance = 500;
168 }
169 else if (view_name.find("HDR 1000 nits") != blender::StringRef::not_found) {
170 max_luminance = 1000;
171 }
172 else if (view_name.find("HDR 2000 nits") != blender::StringRef::not_found) {
173 max_luminance = 2000;
174 }
175 else if (view_name.find("HDR 4000 nits") != blender::StringRef::not_found) {
176 max_luminance = 4000;
177 }
178 else if (view_name.find("HDR 10000 nits") != blender::StringRef::not_found) {
179 max_luminance = 10000;
180 }
181 }
182
183 /* If we don't know anything, don't write metadata. The video player will make some
184 * default assumption, often 1000 nits. */
185 if (max_luminance == 0) {
186 return;
187 }
188
189 AVPacketSideData *side_data = av_packet_side_data_new(&codecpar->coded_side_data,
190 &codecpar->nb_coded_side_data,
191 AV_PKT_DATA_MASTERING_DISPLAY_METADATA,
192 sizeof(AVMasteringDisplayMetadata),
193 0);
194 if (side_data == nullptr) {
195 CLOG_ERROR(&LOG, "Failed to attached mastering display metadata to stream");
196 return;
197 }
198
199 AVMasteringDisplayMetadata *mastering_metadata = reinterpret_cast<AVMasteringDisplayMetadata *>(
200 side_data->data);
201
202 /* Rec.2020 primaries and D65 white point. */
203 mastering_metadata->has_primaries = 1;
204 mastering_metadata->display_primaries[0][0] = av_make_q(34000, 50000);
205 mastering_metadata->display_primaries[0][1] = av_make_q(16000, 50000);
206 mastering_metadata->display_primaries[1][0] = av_make_q(13250, 50000);
207 mastering_metadata->display_primaries[1][1] = av_make_q(34500, 50000);
208 mastering_metadata->display_primaries[2][0] = av_make_q(7500, 50000);
209 mastering_metadata->display_primaries[2][1] = av_make_q(3000, 50000);
210
211 mastering_metadata->white_point[0] = av_make_q(15635, 50000);
212 mastering_metadata->white_point[1] = av_make_q(16450, 50000);
213
214 mastering_metadata->has_luminance = 1;
215 mastering_metadata->min_luminance = av_make_q(1, 10000);
216 mastering_metadata->max_luminance = av_make_q(max_luminance, 1);
217}
218
219/* Write a frame to the output file */
220static bool write_video_frame(MovieWriter *context, AVFrame *frame, ReportList *reports)
221{
222 int ret, success = 1;
223 AVPacket *packet = av_packet_alloc();
224
225 AVCodecContext *c = context->video_codec;
226
227 frame->pts = context->video_time;
228 context->video_time++;
229
230 char error_str[AV_ERROR_MAX_STRING_SIZE];
231 ret = avcodec_send_frame(c, frame);
232 if (ret < 0) {
233 /* Can't send frame to encoder. This shouldn't happen. */
234 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
235 CLOG_ERROR(&LOG, "Can't send video frame: %s", error_str);
236 success = -1;
237 }
238
239 while (ret >= 0) {
240 ret = avcodec_receive_packet(c, packet);
241
242 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
243 /* No more packets available. */
244 break;
245 }
246 if (ret < 0) {
247 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
248 CLOG_ERROR(&LOG, "Error encoding frame: %s", error_str);
249 break;
250 }
251
252 packet->stream_index = context->video_stream->index;
253 av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
254# ifdef FFMPEG_USE_DURATION_WORKAROUND
255 my_guess_pkt_duration(context->outfile, context->video_stream, packet);
256# endif
257
258 if (av_interleaved_write_frame(context->outfile, packet) != 0) {
259 success = -1;
260 break;
261 }
262 }
263
264 if (!success) {
265 BKE_report(reports, RPT_ERROR, "Error writing frame");
266 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
267 CLOG_INFO(&LOG, "ffmpeg: error writing video frame: %s", error_str);
268 }
269
270 av_packet_free(&packet);
271
272 return success;
273}
274
275/* Allocate new ImBuf of the size of the given input which only contains float buffer with pixels
276 * from the input.
277 *
278 * For the float image buffers it is similar to IMB_dupImBuf() but it ensures that the byte buffer
279 * is not allocated.
280 *
281 * For the byte image buffers it is similar to IMB_dupImBuf() followed by IMB_float_from_byte(),
282 * but without temporary allocation, and result containing only single float buffer.
283 *
284 * No color space conversion is performed. The result float buffer might be in a non-linear space
285 * denoted by the float_buffer.colorspace. */
286static ImBuf *alloc_imbuf_for_colorspace_transform(const ImBuf *input_ibuf)
287{
288 if (!input_ibuf) {
289 return nullptr;
290 }
291
292 /* Allocate new image buffer without float buffer just yet.
293 * This allows to properly initialize the number of channels used in the buffer. */
294 /* TODO(sergey): Make it a reusable function.
295 * This is a common pattern used in few areas with the goal to bypass the hardcoded number of
296 * channels used by IMB_allocImBuf(). */
297 ImBuf *result_ibuf = IMB_allocImBuf(input_ibuf->x, input_ibuf->y, input_ibuf->planes, 0);
298 result_ibuf->channels = input_ibuf->float_buffer.data ? input_ibuf->channels : 4;
299
300 /* Allocate float buffer with the proper number of channels. */
301 const size_t num_pixels = IMB_get_pixel_count(input_ibuf);
302 float *buffer = MEM_malloc_arrayN<float>(num_pixels * result_ibuf->channels, "movie hdr image");
303 IMB_assign_float_buffer(result_ibuf, buffer, IB_TAKE_OWNERSHIP);
304
305 /* Transfer flags related to color space conversion from the original image buffer. */
306 result_ibuf->flags |= (input_ibuf->flags & IB_alphamode_channel_packed);
307
308 if (input_ibuf->float_buffer.data) {
309 /* Simple case: copy pixels from the source image as-is, without any conversion.
310 * The result has the same colorspace as the input. */
311 memcpy(result_ibuf->float_buffer.data,
312 input_ibuf->float_buffer.data,
313 num_pixels * input_ibuf->channels * sizeof(float));
314 result_ibuf->float_buffer.colorspace = input_ibuf->float_buffer.colorspace;
315 }
316 else {
317 /* Convert byte buffer to float buffer.
318 * The exact profile is not important here: it should match for the source and destination so
319 * that the function only does alpha and byte->float conversions. */
320 const bool predivide = IMB_alpha_affects_rgb(input_ibuf);
322 input_ibuf->byte_buffer.data,
325 predivide,
326 input_ibuf->x,
327 input_ibuf->y,
328 result_ibuf->x,
329 input_ibuf->x);
330 }
331
332 return result_ibuf;
333}
334
335/* read and encode a frame of video from the buffer */
336static AVFrame *generate_video_frame(MovieWriter *context, const ImBuf *input_ibuf)
337{
338 /* Use float input if needed. */
339 const bool use_float =
340 context->img_convert_frame != nullptr &&
341 !(context->img_convert_frame->format == AV_PIX_FMT_RGBA &&
342 ELEM(context->img_convert_frame->colorspace, AVCOL_SPC_RGB, AVCOL_SPC_UNSPECIFIED));
343
344 const ImBuf *image = (use_float && input_ibuf->float_buffer.data == nullptr) ?
345 alloc_imbuf_for_colorspace_transform(input_ibuf) :
346 input_ibuf;
347
348 const uint8_t *pixels = image->byte_buffer.data;
349 const float *pixels_fl = image->float_buffer.data;
350
351 if ((!use_float && (pixels == nullptr)) || (use_float && (pixels_fl == nullptr))) {
352 if (image != input_ibuf) {
353 IMB_freeImBuf(const_cast<ImBuf *>(image));
354 }
355 return nullptr;
356 }
357
358 AVCodecParameters *codec = context->video_stream->codecpar;
359 int height = codec->height;
360 AVFrame *rgb_frame;
361
362 if (context->img_convert_frame != nullptr) {
363 /* Pixel format conversion is needed. */
364 rgb_frame = context->img_convert_frame;
365 }
366 else {
367 /* The output pixel format is Blender's internal pixel format. */
368 rgb_frame = context->current_frame;
369 }
370
371 /* Ensure frame is writable. Some video codecs might have made previous frame
372 * shared (i.e. not writable). */
373 av_frame_make_writable(rgb_frame);
374
375 const size_t linesize_dst = rgb_frame->linesize[0];
376 if (use_float) {
377 /* Float image: need to split up the image into a planar format,
378 * because `libswscale` does not support RGBA->YUV conversions from
379 * packed float formats.
380 * Un-premultiply the image if the output format supports alpha, to
381 * match the format of the byte image. */
382 BLI_assert_msg(rgb_frame->linesize[1] == linesize_dst &&
383 rgb_frame->linesize[2] == linesize_dst &&
384 rgb_frame->linesize[3] == linesize_dst,
385 "ffmpeg frame should be 4 same size planes for a floating point image case");
386 for (int y = 0; y < height; y++) {
387 size_t dst_offset = linesize_dst * (height - y - 1);
388 float *dst_g = reinterpret_cast<float *>(rgb_frame->data[0] + dst_offset);
389 float *dst_b = reinterpret_cast<float *>(rgb_frame->data[1] + dst_offset);
390 float *dst_r = reinterpret_cast<float *>(rgb_frame->data[2] + dst_offset);
391 float *dst_a = reinterpret_cast<float *>(rgb_frame->data[3] + dst_offset);
392 const float *src = pixels_fl + image->x * y * 4;
393
394 if (MOV_codec_supports_alpha(context->ffmpeg_codec, context->ffmpeg_profile)) {
395 for (int x = 0; x < image->x; x++) {
396 float tmp[4];
397 premul_to_straight_v4_v4(tmp, src);
398 *dst_r++ = tmp[0];
399 *dst_g++ = tmp[1];
400 *dst_b++ = tmp[2];
401 *dst_a++ = tmp[3];
402 src += 4;
403 }
404 }
405 else {
406 for (int x = 0; x < image->x; x++) {
407 *dst_r++ = src[0];
408 *dst_g++ = src[1];
409 *dst_b++ = src[2];
410 *dst_a++ = src[3];
411 src += 4;
412 }
413 }
414 }
415 }
416 else {
417 /* Byte image: flip the image vertically. */
418 const size_t linesize_src = rgb_frame->width * 4;
419 for (int y = 0; y < height; y++) {
420 uint8_t *target = rgb_frame->data[0] + linesize_dst * (height - y - 1);
421 const uint8_t *src = pixels + linesize_src * y;
422
423 /* NOTE: this is endianness-sensitive. */
424 /* The target buffer is always expected to contain little-endian RGBA values. */
425 memcpy(target, src, linesize_src);
426 }
427 }
428
429 /* Convert to the output pixel format, if it's different that Blender's internal one. */
430 if (context->img_convert_frame != nullptr) {
431 BLI_assert(context->img_convert_ctx != nullptr);
432 /* Ensure the frame we are scaling to is writable as well. */
433 av_frame_make_writable(context->current_frame);
434 ffmpeg_sws_scale_frame(context->img_convert_ctx, context->current_frame, rgb_frame);
435 }
436
437 if (image != input_ibuf) {
438 IMB_freeImBuf(const_cast<ImBuf *>(image));
439 }
440
441 return context->current_frame;
442}
443
444static AVRational calc_time_base(uint den, double num, AVCodecID codec_id)
445{
446 /* Convert the input 'num' to an integer. Simply shift the decimal places until we get an integer
447 * (within a floating point error range).
448 * For example if we have `den = 3` and `num = 0.1` then the fps is: `den/num = 30` fps.
449 * When converting this to a FFMPEG time base, we want num to be an integer.
450 * So we simply move the decimal places of both numbers. i.e. `den = 30`, `num = 1`. */
451 float eps = FLT_EPSILON;
452 const uint DENUM_MAX = (codec_id == AV_CODEC_ID_MPEG4) ? (1UL << 16) - 1 : (1UL << 31) - 1;
453
454 /* Calculate the precision of the initial floating point number. */
455 if (num > 1.0) {
456 const uint num_integer_bits = log2_floor_u(uint(num));
457
458 /* Formula for calculating the epsilon value: (power of two range) / (pow mantissa bits)
459 * For example, a float has 23 mantissa bits and the float value 3.5f as a pow2 range of
460 * (4-2=2):
461 * (2) / pow2(23) = floating point precision for 3.5f
462 */
463 eps = float(1 << num_integer_bits) * FLT_EPSILON;
464 }
465
466 /* Calculate how many decimal shifts we can do until we run out of precision. */
467 const int max_num_shift = fabsf(log10f(eps));
468 /* Calculate how many times we can shift the denominator. */
469 const int max_den_shift = log10f(DENUM_MAX) - log10f(den);
470 const int max_iter = min_ii(max_num_shift, max_den_shift);
471
472 for (int i = 0; i < max_iter && fabs(num - round(num)) > eps; i++) {
473 /* Increase the number and denominator until both are integers. */
474 num *= 10;
475 den *= 10;
476 eps *= 10;
477 }
478
479 AVRational time_base;
480 time_base.den = den;
481 time_base.num = int(num);
482
483 return time_base;
484}
485
486static const AVCodec *get_av1_encoder(
487 MovieWriter *context, const RenderData *rd, AVDictionary **opts, int rectx, int recty)
488{
489 /* There are three possible encoders for AV1: `libaom-av1`, librav1e, and `libsvtav1`. librav1e
490 * tends to give the best compression quality while `libsvtav1` tends to be the fastest encoder.
491 * One of each will be picked based on the preset setting, and if a particular encoder is not
492 * available, then use the default returned by FFMpeg. */
493 const AVCodec *codec = nullptr;
494 switch (context->ffmpeg_preset) {
495 case FFM_PRESET_BEST:
496 /* `libaom-av1` may produce better VMAF-scoring videos in several cases, but there are cases
497 * where using a different encoder is desirable, such as in #103849. */
498 codec = avcodec_find_encoder_by_name("librav1e");
499 if (!codec) {
500 /* Fall back to `libaom-av1` if librav1e is not found. */
501 codec = avcodec_find_encoder_by_name("libaom-av1");
502 }
503 break;
505 codec = avcodec_find_encoder_by_name("libsvtav1");
506 break;
507 case FFM_PRESET_GOOD:
508 default:
509 codec = avcodec_find_encoder_by_name("libaom-av1");
510 break;
511 }
512
513 /* Use the default AV1 encoder if the specified encoder wasn't found. */
514 if (!codec) {
515 codec = avcodec_find_encoder(AV_CODEC_ID_AV1);
516 }
517
518 /* Apply AV1 encoder specific settings. */
519 if (codec) {
520 if (STREQ(codec->name, "librav1e")) {
521 /* Set "tiles" to 8 to enable multi-threaded encoding. */
522 if (rd->threads > 8) {
523 ffmpeg_dict_set_int(opts, "tiles", rd->threads);
524 }
525 else {
526 ffmpeg_dict_set_int(opts, "tiles", 8);
527 }
528
529 /* Use a reasonable speed setting based on preset. Speed ranges from 0-10.
530 * Must check context->ffmpeg_preset again in case this encoder was selected due to the
531 * absence of another. */
532 switch (context->ffmpeg_preset) {
533 case FFM_PRESET_BEST:
534 ffmpeg_dict_set_int(opts, "speed", 4);
535 break;
537 ffmpeg_dict_set_int(opts, "speed", 10);
538 break;
539 case FFM_PRESET_GOOD:
540 default:
541 ffmpeg_dict_set_int(opts, "speed", 6);
542 break;
543 }
544 /* Set gop_size as rav1e's "--keyint". */
545 char buffer[64];
546 SNPRINTF_UTF8(buffer, "keyint=%d", context->ffmpeg_gop_size);
547 av_dict_set(opts, "rav1e-params", buffer, 0);
548 }
549 else if (STREQ(codec->name, "libsvtav1")) {
550 /* Set preset value based on ffmpeg_preset.
551 * Must check `context->ffmpeg_preset` again in case this encoder was selected due to the
552 * absence of another. */
553 switch (context->ffmpeg_preset) {
555 ffmpeg_dict_set_int(opts, "preset", 8);
556 break;
557 case FFM_PRESET_BEST:
558 ffmpeg_dict_set_int(opts, "preset", 3);
559 break;
560 case FFM_PRESET_GOOD:
561 default:
562 ffmpeg_dict_set_int(opts, "preset", 5);
563 break;
564 }
565 }
566 else if (STREQ(codec->name, "libaom-av1")) {
567 /* Speed up libaom-av1 encoding by enabling multi-threading and setting tiles. */
568 ffmpeg_dict_set_int(opts, "row-mt", 1);
569 const char *tiles_string = nullptr;
570 bool tiles_string_is_dynamic = false;
571 if (rd->threads > 0) {
572 /* See if threads is a square. */
573 int threads_sqrt = sqrtf(rd->threads);
574 if (threads_sqrt < 4) {
575 /* Ensure a default minimum. */
576 threads_sqrt = 4;
577 }
578 if (is_power_of_2_i(threads_sqrt) && threads_sqrt * threads_sqrt == rd->threads) {
579 /* Is a square num, therefore just do "sqrt x sqrt" for tiles parameter. */
580 int digits = 0;
581 for (int t_sqrt_copy = threads_sqrt; t_sqrt_copy > 0; t_sqrt_copy /= 10) {
582 ++digits;
583 }
584 /* A char array need only an alignment of 1. */
585 char *tiles_string_mut = (char *)calloc(digits * 2 + 2, 1);
586 BLI_snprintf_utf8(tiles_string_mut, digits * 2 + 2, "%dx%d", threads_sqrt, threads_sqrt);
587 tiles_string_is_dynamic = true;
588 tiles_string = tiles_string_mut;
589 }
590 else {
591 /* Is not a square num, set greater side based on longer side, or use a square if both
592 * sides are equal. */
593 int sqrt_p2 = power_of_2_min_i(threads_sqrt);
594 if (sqrt_p2 < 2) {
595 /* Ensure a default minimum. */
596 sqrt_p2 = 2;
597 }
598 int sqrt_p2_next = power_of_2_min_i(int(rd->threads) / sqrt_p2);
599 if (sqrt_p2_next < 1) {
600 sqrt_p2_next = 1;
601 }
602 if (sqrt_p2 > sqrt_p2_next) {
603 /* Ensure sqrt_p2_next is greater or equal to sqrt_p2. */
604 int temp = sqrt_p2;
605 sqrt_p2 = sqrt_p2_next;
606 sqrt_p2_next = temp;
607 }
608 int combined_digits = 0;
609 for (int sqrt_p2_copy = sqrt_p2; sqrt_p2_copy > 0; sqrt_p2_copy /= 10) {
610 ++combined_digits;
611 }
612 for (int sqrt_p2_copy = sqrt_p2_next; sqrt_p2_copy > 0; sqrt_p2_copy /= 10) {
613 ++combined_digits;
614 }
615 /* A char array need only an alignment of 1. */
616 char *tiles_string_mut = (char *)calloc(combined_digits + 2, 1);
617 if (rectx > recty) {
619 tiles_string_mut, combined_digits + 2, "%dx%d", sqrt_p2_next, sqrt_p2);
620 }
621 else if (rectx < recty) {
623 tiles_string_mut, combined_digits + 2, "%dx%d", sqrt_p2, sqrt_p2_next);
624 }
625 else {
626 BLI_snprintf_utf8(tiles_string_mut, combined_digits + 2, "%dx%d", sqrt_p2, sqrt_p2);
627 }
628 tiles_string_is_dynamic = true;
629 tiles_string = tiles_string_mut;
630 }
631 }
632 else {
633 /* Thread count unknown, default to 8. */
634 if (rectx > recty) {
635 tiles_string = "4x2";
636 }
637 else if (rectx < recty) {
638 tiles_string = "2x4";
639 }
640 else {
641 tiles_string = "2x2";
642 }
643 }
644 av_dict_set(opts, "tiles", tiles_string, 0);
645 if (tiles_string_is_dynamic) {
646 free((void *)tiles_string);
647 }
648 /* libaom-av1 uses "cpu-used" instead of "preset" for defining compression quality.
649 * This value is in a range from 0-8. 0 and 8 are extremes, but we will allow 8.
650 * Must check context->ffmpeg_preset again in case this encoder was selected due to the
651 * absence of another. */
652 switch (context->ffmpeg_preset) {
654 ffmpeg_dict_set_int(opts, "cpu-used", 8);
655 break;
656 case FFM_PRESET_BEST:
657 ffmpeg_dict_set_int(opts, "cpu-used", 4);
658 break;
659 case FFM_PRESET_GOOD:
660 default:
661 ffmpeg_dict_set_int(opts, "cpu-used", 6);
662 break;
663 }
664 }
665 }
666
667 return codec;
668}
669
670/* Remap H.264 CRF to H.265 CRF: 17..32 range (23 default) to 20..37 range (28 default).
671 * https://trac.ffmpeg.org/wiki/Encode/H.265 */
672static int remap_crf_to_h265_crf(int crf, bool is_10_or_12_bpp)
673{
674 /* 10/12 bit videos seem to need slightly lower CRF value for similar quality. */
675 const int bias = is_10_or_12_bpp ? -3 : 0;
676 switch (crf) {
678 return 20 + bias;
679 case FFM_CRF_HIGH:
680 return 24 + bias;
681 case FFM_CRF_MEDIUM:
682 return 28 + bias;
683 case FFM_CRF_LOW:
684 return 31 + bias;
685 case FFM_CRF_VERYLOW:
686 return 34 + bias;
687 case FFM_CRF_LOWEST:
688 return 37 + bias;
689 }
690 return crf;
691}
692
693static const AVCodec *get_prores_encoder(const ImageFormatData *imf, int rectx, int recty)
694{
695 /* The prores_aw encoder currently (April 2025) has issues when encoding alpha with high
696 * resolution but is faster in most cases for similar quality. Use it instead of prores_ks
697 * if possible. (Upstream issue https://trac.ffmpeg.org/ticket/11536) */
698 if (imf->planes == R_IMF_PLANES_RGBA) {
699 if ((size_t(rectx) * size_t(recty)) > (3840 * 2160)) {
700 return avcodec_find_encoder_by_name("prores_ks");
701 }
702 }
703 return avcodec_find_encoder_by_name("prores_aw");
704}
705
706/* 10bpp H264: remap 0..51 range to -12..51 range
707 * https://trac.ffmpeg.org/wiki/Encode/H.264#a1.ChooseaCRFvalue */
708static int remap_crf_to_h264_10bpp_crf(int crf)
709{
710 crf = int(-12.0f + (crf / 51.0f) * 63.0f);
711 crf = max_ii(crf, 0);
712 return crf;
713}
714
715static void set_quality_rate_options(const MovieWriter *context,
716 const AVCodecID codec_id,
717 const FFMpegCodecData *ffcodecdata,
718 const ImageFormatData *imf,
719 AVDictionary **opts)
720{
721 AVCodecContext *c = context->video_codec;
722
723 /* Handle constant bit rate (CBR) case. */
724 if (!MOV_codec_supports_crf(codec_id) || context->ffmpeg_crf < 0) {
725 c->bit_rate = context->ffmpeg_video_bitrate * 1000;
726 c->rc_max_rate = ffcodecdata->rc_max_rate * 1000;
727 c->rc_min_rate = ffcodecdata->rc_min_rate * 1000;
728 c->rc_buffer_size = ffcodecdata->rc_buffer_size * 1024;
729 return;
730 }
731
732 /* For VP9 bit rate must be set to zero to get CRF mode, just set it to zero for all codecs:
733 * https://trac.ffmpeg.org/wiki/Encode/VP9 */
734 c->bit_rate = 0;
735
736 const bool is_10_bpp = imf->depth == R_IMF_CHAN_DEPTH_10;
737 const bool is_12_bpp = imf->depth == R_IMF_CHAN_DEPTH_12;
738 const bool av1_librav1e = codec_id == AV_CODEC_ID_AV1 && STREQ(c->codec->name, "librav1e");
739 const bool av1_libsvtav1 = codec_id == AV_CODEC_ID_AV1 && STREQ(c->codec->name, "libsvtav1");
740
741 /* Handle "lossless" case. */
742 if (context->ffmpeg_crf == FFM_CRF_LOSSLESS) {
743 if (codec_id == AV_CODEC_ID_VP9) {
744 /* VP9 needs "lossless": https://trac.ffmpeg.org/wiki/Encode/VP9#LosslessVP9 */
745 ffmpeg_dict_set_int(opts, "lossless", 1);
746 }
747 else if (codec_id == AV_CODEC_ID_H264 && is_10_bpp) {
748 /* 10bpp H264 needs "qp": https://trac.ffmpeg.org/wiki/Encode/H.264#a1.ChooseaCRFvalue */
749 ffmpeg_dict_set_int(opts, "qp", 0);
750 }
751 else if (codec_id == AV_CODEC_ID_H265) {
752 /* H.265 needs "lossless" in private params; also make it much less verbose. */
753 av_dict_set(opts, "x265-params", "log-level=1:lossless=1", 0);
754 }
755 else if (codec_id == AV_CODEC_ID_AV1 && (av1_librav1e || av1_libsvtav1)) {
756 /* AV1 in some encoders needs qp=0 for lossless. */
757 ffmpeg_dict_set_int(opts, "qp", 0);
758 }
759 else {
760 /* For others crf=0 means lossless. */
761 ffmpeg_dict_set_int(opts, "crf", 0);
762 }
763 return;
764 }
765
766 /* Handle CRF setting cases. */
767 int crf = context->ffmpeg_crf;
768
769 if (codec_id == AV_CODEC_ID_H264 && is_10_bpp) {
770 crf = remap_crf_to_h264_10bpp_crf(crf);
771 }
772 else if (codec_id == AV_CODEC_ID_H265) {
773 crf = remap_crf_to_h265_crf(crf, is_10_bpp || is_12_bpp);
774 /* Make H.265 much less verbose. */
775 av_dict_set(opts, "x265-params", "log-level=1", 0);
776 }
777
778 if (av1_librav1e) {
779 /* Remap crf 0..51 to qp 0..255 for AV1 librav1e. */
780 int qp = int(float(crf) / 51.0f * 255.0f);
781 qp = clamp_i(qp, 0, 255);
782 ffmpeg_dict_set_int(opts, "qp", qp);
783 }
784 else if (av1_libsvtav1) {
785 /* libsvtav1 used to take CRF as "qp" parameter, do that. */
786 ffmpeg_dict_set_int(opts, "qp", crf);
787 }
788 else {
789 ffmpeg_dict_set_int(opts, "crf", crf);
790 }
791}
792
793static void set_colorspace_options(AVCodecContext *c, const ColorSpace *colorspace)
794{
795 const AVPixFmtDescriptor *pix_fmt_desc = av_pix_fmt_desc_get(c->pix_fmt);
796 const bool is_rgb_format = (pix_fmt_desc->flags & AV_PIX_FMT_FLAG_RGB);
797 const bool rgb_matrix = false;
798
799 int cicp[4];
800 if (colorspace && IMB_colormanagement_space_to_cicp(
801 colorspace, ColorManagedFileOutput::Video, rgb_matrix, cicp))
802 {
803 /* Note ffmpeg enums are documented to match CICP. */
804 c->color_primaries = AVColorPrimaries(cicp[0]);
805 c->color_trc = AVColorTransferCharacteristic(cicp[1]);
806 c->colorspace = (is_rgb_format) ? AVCOL_SPC_RGB : AVColorSpace(cicp[2]);
807 c->color_range = AVCOL_RANGE_JPEG;
808 }
809 else if (!is_rgb_format) {
810 /* Note BT.709 is wrong for sRGB.
811 * But we have been writing sRGB like this forever, and there is the so called
812 * "Quicktime gamma shift bug" that complicates things. */
813 c->color_primaries = AVCOL_PRI_BT709;
814 c->color_trc = AVCOL_TRC_BT709;
815 c->colorspace = AVCOL_SPC_BT709;
816 /* TODO(sergey): Consider making the range an option to cover more use-cases. */
817 c->color_range = AVCOL_RANGE_MPEG;
818 }
819 else {
820 /* We don't set anything for pure sRGB writing, for backwards compatibility. */
821 }
822}
823
824static AVStream *alloc_video_stream(MovieWriter *context,
825 const RenderData *rd,
826 const ImageFormatData *imf,
827 AVCodecID codec_id,
828 AVFormatContext *of,
829 int rectx,
830 int recty,
831 char *error,
832 int error_size)
833{
834 AVStream *st;
835 const AVCodec *codec;
836 AVDictionary *opts = nullptr;
837
838 error[0] = '\0';
839
840 st = avformat_new_stream(of, nullptr);
841 if (!st) {
842 return nullptr;
843 }
844 st->id = 0;
845
846 /* Set up the codec context */
847
848 if (codec_id == AV_CODEC_ID_AV1) {
849 /* Use get_av1_encoder() to get the ideal (hopefully) encoder for AV1 based
850 * on given parameters, and also set up opts. */
851 codec = get_av1_encoder(context, rd, &opts, rectx, recty);
852 }
853 else if (codec_id == AV_CODEC_ID_PRORES) {
854 codec = get_prores_encoder(imf, rectx, recty);
855 }
856 else {
857 codec = avcodec_find_encoder(codec_id);
858 }
859 if (!codec) {
860 CLOG_ERROR(&LOG, "Couldn't find valid video codec");
861 context->video_codec = nullptr;
862 return nullptr;
863 }
864
865 context->video_codec = avcodec_alloc_context3(codec);
866 AVCodecContext *c = context->video_codec;
867
868 /* Get some values from the current render settings */
869
870 c->width = rectx;
871 c->height = recty;
872
873 if (context->ffmpeg_type == FFMPEG_DV && rd->frs_sec != 25) {
874 /* FIXME: Really bad hack (tm) for NTSC support */
875 c->time_base.den = 2997;
876 c->time_base.num = 100;
877 }
878 else if (float(int(rd->frs_sec_base)) == rd->frs_sec_base) {
879 c->time_base.den = rd->frs_sec;
880 c->time_base.num = int(rd->frs_sec_base);
881 }
882 else {
883 c->time_base = calc_time_base(rd->frs_sec, rd->frs_sec_base, codec_id);
884 }
885
886 /* As per the time-base documentation here:
887 * https://www.ffmpeg.org/ffmpeg-codecs.html#Codec-Options
888 * We want to set the time base to (1 / fps) for fixed frame rate video.
889 * If it is not possible, we want to set the time-base numbers to something as
890 * small as possible.
891 */
892 if (c->time_base.num != 1) {
893 AVRational new_time_base;
894 if (av_reduce(
895 &new_time_base.num, &new_time_base.den, c->time_base.num, c->time_base.den, INT_MAX))
896 {
897 /* Exact reduction was possible. Use the new value. */
898 c->time_base = new_time_base;
899 }
900 }
901
902 st->time_base = c->time_base;
903
904 c->gop_size = context->ffmpeg_gop_size;
905 c->max_b_frames = context->ffmpeg_max_b_frames;
906
907 set_quality_rate_options(context, codec_id, &rd->ffcodecdata, imf, &opts);
908
909 if (context->ffmpeg_preset) {
910 /* 'preset' is used by h.264, 'deadline' is used by WEBM/VP9. I'm not
911 * setting those properties conditionally based on the video codec,
912 * as the FFmpeg encoder simply ignores unknown settings anyway. */
913 char const *preset_name = nullptr; /* Used by h.264. */
914 char const *deadline_name = nullptr; /* Used by WEBM/VP9. */
915 switch (context->ffmpeg_preset) {
916 case FFM_PRESET_GOOD:
917 preset_name = "medium";
918 deadline_name = "good";
919 break;
920 case FFM_PRESET_BEST:
921 preset_name = "slower";
922 deadline_name = "best";
923 break;
925 preset_name = "superfast";
926 deadline_name = "realtime";
927 break;
928 default:
929 CLOG_WARN(&LOG, "Unknown preset number %i, ignoring.", context->ffmpeg_preset);
930 }
931 /* "codec_id != AV_CODEC_ID_AV1" is required due to "preset" already being set by an AV1 codec.
932 */
933 if (preset_name != nullptr && codec_id != AV_CODEC_ID_AV1) {
934 av_dict_set(&opts, "preset", preset_name, 0);
935 }
936 if (deadline_name != nullptr) {
937 av_dict_set(&opts, "deadline", deadline_name, 0);
938 }
939 }
940
941 /* Be sure to use the correct pixel format(e.g. RGB, YUV) */
942
943 const enum AVPixelFormat *pix_fmts = ffmpeg_get_pix_fmts(c, codec);
944 if (pix_fmts) {
945 c->pix_fmt = pix_fmts[0];
946 }
947 else {
948 /* makes HuffYUV happy ... */
949 c->pix_fmt = AV_PIX_FMT_YUV422P;
950 }
951
952 const bool is_10_bpp = imf->depth == R_IMF_CHAN_DEPTH_10;
953 const bool is_12_bpp = imf->depth == R_IMF_CHAN_DEPTH_12;
954 const bool is_16_bpp = imf->depth == R_IMF_CHAN_DEPTH_16;
955
956 if (is_10_bpp) {
957 c->pix_fmt = AV_PIX_FMT_YUV420P10LE;
958 }
959 else if (is_12_bpp) {
960 c->pix_fmt = AV_PIX_FMT_YUV420P12LE;
961 }
962
963 if (context->ffmpeg_type == FFMPEG_XVID) {
964 /* Alas! */
965 c->pix_fmt = AV_PIX_FMT_YUV420P;
966 c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');
967 }
968
969 if (codec_id == AV_CODEC_ID_H265) {
970 /* H.265 needs hvc1 tag for Apple compatibility, see
971 * https://trac.ffmpeg.org/wiki/Encode/H.265#FinalCutandApplestuffcompatibility
972 * Note that in case we are doing H.265 into an XviD container,
973 * this overwrites the tag set above. But that should not be what anyone does. */
974 c->codec_tag = MKTAG('h', 'v', 'c', '1');
975 }
976
977 /* Keep lossless encodes in the RGB domain. */
978 if (codec_id == AV_CODEC_ID_HUFFYUV) {
979 if (imf->planes == R_IMF_PLANES_RGBA) {
980 c->pix_fmt = AV_PIX_FMT_BGRA;
981 }
982 else {
983 c->pix_fmt = AV_PIX_FMT_RGB32;
984 }
985 }
986
987 if (codec_id == AV_CODEC_ID_DNXHD) {
989 /* Set the block decision algorithm to be of the highest quality ("rd" == 2). */
990 c->mb_decision = 2;
991 }
992 }
993
994 if (codec_id == AV_CODEC_ID_FFV1) {
995 if (imf->planes == R_IMF_PLANES_BW) {
996 c->pix_fmt = AV_PIX_FMT_GRAY8;
997 if (is_10_bpp) {
998 c->pix_fmt = AV_PIX_FMT_GRAY10;
999 }
1000 else if (is_12_bpp) {
1001 c->pix_fmt = AV_PIX_FMT_GRAY12;
1002 }
1003 else if (is_16_bpp) {
1004 c->pix_fmt = AV_PIX_FMT_GRAY16;
1005 }
1006 }
1007 else if (imf->planes == R_IMF_PLANES_RGBA) {
1008 c->pix_fmt = AV_PIX_FMT_RGB32;
1009 if (is_10_bpp) {
1010 c->pix_fmt = AV_PIX_FMT_GBRAP10;
1011 }
1012 else if (is_12_bpp) {
1013 c->pix_fmt = AV_PIX_FMT_GBRAP12;
1014 }
1015 else if (is_16_bpp) {
1016 c->pix_fmt = AV_PIX_FMT_GBRAP16;
1017 }
1018 }
1019 else { /* RGB */
1020 c->pix_fmt = AV_PIX_FMT_0RGB32;
1021 if (is_10_bpp) {
1022 c->pix_fmt = AV_PIX_FMT_GBRP10;
1023 }
1024 else if (is_12_bpp) {
1025 c->pix_fmt = AV_PIX_FMT_GBRP12;
1026 }
1027 else if (is_16_bpp) {
1028 c->pix_fmt = AV_PIX_FMT_GBRP16;
1029 }
1030 }
1031 }
1032
1033 if (codec_id == AV_CODEC_ID_QTRLE) {
1034 if (imf->planes == R_IMF_PLANES_BW) {
1035 c->pix_fmt = AV_PIX_FMT_GRAY8;
1036 }
1037 else if (imf->planes == R_IMF_PLANES_RGBA) {
1038 c->pix_fmt = AV_PIX_FMT_ARGB;
1039 }
1040 else { /* RGB */
1041 c->pix_fmt = AV_PIX_FMT_RGB24;
1042 }
1043 }
1044
1045 if (codec_id == AV_CODEC_ID_VP9 && imf->planes == R_IMF_PLANES_RGBA) {
1046 c->pix_fmt = AV_PIX_FMT_YUVA420P;
1047 }
1048 else if (ELEM(codec_id, AV_CODEC_ID_H264, AV_CODEC_ID_H265, AV_CODEC_ID_VP9, AV_CODEC_ID_AV1) &&
1049 (context->ffmpeg_crf == 0))
1050 {
1051 /* Use 4:4:4 instead of 4:2:0 pixel format for lossless rendering. */
1052 c->pix_fmt = AV_PIX_FMT_YUV444P;
1053 if (is_10_bpp) {
1054 c->pix_fmt = AV_PIX_FMT_YUV444P10LE;
1055 }
1056 else if (is_12_bpp) {
1057 c->pix_fmt = AV_PIX_FMT_YUV444P12LE;
1058 }
1059 }
1060
1061 if (codec_id == AV_CODEC_ID_PNG) {
1062 if (imf->planes == R_IMF_PLANES_BW) {
1063 c->pix_fmt = AV_PIX_FMT_GRAY8;
1064 }
1065 else if (imf->planes == R_IMF_PLANES_RGBA) {
1066 c->pix_fmt = AV_PIX_FMT_RGBA;
1067 }
1068 else { /* RGB */
1069 c->pix_fmt = AV_PIX_FMT_RGB24;
1070 }
1071 }
1072 if (codec_id == AV_CODEC_ID_PRORES) {
1073 if ((context->ffmpeg_profile >= FFM_PRORES_PROFILE_422_PROXY) &&
1074 (context->ffmpeg_profile <= FFM_PRORES_PROFILE_422_HQ))
1075 {
1076 c->profile = context->ffmpeg_profile;
1077 c->pix_fmt = AV_PIX_FMT_YUV422P10LE;
1078 }
1079 else if ((context->ffmpeg_profile >= FFM_PRORES_PROFILE_4444) &&
1080 (context->ffmpeg_profile <= FFM_PRORES_PROFILE_4444_XQ))
1081 {
1082 c->profile = context->ffmpeg_profile;
1083 c->pix_fmt = AV_PIX_FMT_YUV444P10LE;
1084
1085 if (imf->planes == R_IMF_PLANES_RGBA) {
1086 c->pix_fmt = AV_PIX_FMT_YUVA444P10LE;
1087 }
1088 }
1089 else {
1090 CLOG_ERROR(&LOG, "ffmpeg: invalid profile %d", context->ffmpeg_profile);
1091 }
1092 }
1093
1094 if (of->oformat->flags & AVFMT_GLOBALHEADER) {
1095 CLOG_STR_INFO(&LOG, "ffmpeg: using global video header");
1096 c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
1097 }
1098
1099 /* Set colorspace based on display space of image. */
1100 const ColorSpace *display_colorspace = IMB_colormangement_display_get_color_space(
1101 &imf->view_settings, &imf->display_settings);
1102 set_colorspace_options(c, display_colorspace);
1103
1104 /* xasp & yasp got float lately... */
1105
1106 st->sample_aspect_ratio = c->sample_aspect_ratio = av_d2q((double(rd->xasp) / double(rd->yasp)),
1107 255);
1108 st->avg_frame_rate = av_inv_q(c->time_base);
1109
1110 if (codec->capabilities & AV_CODEC_CAP_OTHER_THREADS) {
1111 c->thread_count = 0;
1112 }
1113 else {
1114 c->thread_count = MOV_thread_count();
1115 }
1116
1117 if (codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) {
1118 c->thread_type = FF_THREAD_FRAME;
1119 }
1120 else if (codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) {
1121 c->thread_type = FF_THREAD_SLICE;
1122 }
1123
1124 int ret = avcodec_open2(c, codec, &opts);
1125
1126 if (ret < 0) {
1127 char error_str[AV_ERROR_MAX_STRING_SIZE];
1128 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
1129 CLOG_ERROR(&LOG, "Couldn't initialize video codec: %s\n", error_str);
1130 BLI_strncpy(error, ffmpeg_last_error(), error_size);
1131 av_dict_free(&opts);
1132 avcodec_free_context(&c);
1133 context->video_codec = nullptr;
1134 return nullptr;
1135 }
1136 av_dict_free(&opts);
1137
1138 /* FFMPEG expects its data in the output pixel format. */
1139 context->current_frame = alloc_frame(c->pix_fmt, c->width, c->height);
1140
1141 if (c->pix_fmt == AV_PIX_FMT_RGBA && ELEM(c->colorspace, AVCOL_SPC_RGB, AVCOL_SPC_UNSPECIFIED)) {
1142 /* Output pixel format and colorspace is the same we use internally, no conversion needed. */
1143 context->img_convert_frame = nullptr;
1144 context->img_convert_ctx = nullptr;
1145 }
1146 else {
1147 /* Output pixel format is different, allocate frame for conversion.
1148 * Setup RGB->YUV conversion with proper coefficients, depending on range and colorspace. */
1149 const AVPixelFormat src_format = is_10_bpp || is_12_bpp || is_16_bpp ? AV_PIX_FMT_GBRAPF32LE :
1150 AV_PIX_FMT_RGBA;
1151 context->img_convert_frame = alloc_frame(src_format, c->width, c->height);
1152 context->img_convert_ctx = ffmpeg_sws_get_context(
1153 c->width,
1154 c->height,
1155 src_format,
1156 true,
1157 -1,
1158 c->width,
1159 c->height,
1160 c->pix_fmt,
1161 c->color_range == AVCOL_RANGE_JPEG,
1162 c->colorspace != AVCOL_SPC_RGB ? c->colorspace : -1,
1163 SWS_BICUBIC);
1164 }
1165
1166 avcodec_parameters_from_context(st->codecpar, c);
1167
1168 add_hdr_mastering_display_metadata(st->codecpar, c, imf);
1169
1170 context->video_time = 0.0f;
1171
1172 return st;
1173}
1174
1175static void ffmpeg_add_metadata_callback(void *data,
1176 const char *propname,
1177 char *propvalue,
1178 int /*propvalue_maxncpy*/)
1179{
1180 AVDictionary **metadata = (AVDictionary **)data;
1181 av_dict_set(metadata, propname, propvalue, 0);
1182}
1183
1184static bool start_ffmpeg_impl(MovieWriter *context,
1185 const Scene *scene,
1186 const RenderData *rd,
1187 const ImageFormatData *imf,
1188 int rectx,
1189 int recty,
1190 const char *suffix,
1191 ReportList *reports)
1192{
1193 /* Handle to the output file */
1194 AVFormatContext *of;
1195 const AVOutputFormat *fmt;
1196 char filepath[FILE_MAX], error[1024];
1197 const char **exts;
1198 int ret = 0;
1199
1200 context->ffmpeg_type = rd->ffcodecdata.type;
1201 context->ffmpeg_codec = mov_av_codec_id_get(rd->ffcodecdata.codec_id_get());
1202 context->ffmpeg_audio_codec = mov_av_codec_id_get(rd->ffcodecdata.audio_codec_id_get());
1203 context->ffmpeg_video_bitrate = rd->ffcodecdata.video_bitrate;
1204 context->ffmpeg_audio_bitrate = rd->ffcodecdata.audio_bitrate;
1205 context->ffmpeg_gop_size = rd->ffcodecdata.gop_size;
1206 context->ffmpeg_autosplit = (rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT) != 0;
1207 context->ffmpeg_crf = rd->ffcodecdata.constant_rate_factor;
1208 context->ffmpeg_preset = rd->ffcodecdata.ffmpeg_preset;
1209 context->ffmpeg_profile = 0;
1210
1211 if ((rd->ffcodecdata.flags & FFMPEG_USE_MAX_B_FRAMES) != 0) {
1212 context->ffmpeg_max_b_frames = rd->ffcodecdata.max_b_frames;
1213 }
1214
1215 /* Determine the correct filename */
1216 if (!ffmpeg_filepath_get(context, filepath, scene, rd, context->ffmpeg_preview, suffix, reports))
1217 {
1218 return false;
1219 }
1220 CLOG_INFO(&LOG,
1221 "ffmpeg: starting output to %s:\n"
1222 " type=%d, codec=%d, audio_codec=%d,\n"
1223 " video_bitrate=%d, audio_bitrate=%d,\n"
1224 " gop_size=%d, autosplit=%d\n"
1225 " width=%d, height=%d",
1226 filepath,
1227 context->ffmpeg_type,
1228 context->ffmpeg_codec,
1229 context->ffmpeg_audio_codec,
1230 context->ffmpeg_video_bitrate,
1231 context->ffmpeg_audio_bitrate,
1232 context->ffmpeg_gop_size,
1233 context->ffmpeg_autosplit,
1234 rectx,
1235 recty);
1236
1237 /* Sanity checks for the output file extensions. */
1238 exts = get_file_extensions(context->ffmpeg_type);
1239 if (!exts) {
1240 BKE_report(reports, RPT_ERROR, "No valid formats found");
1241 return false;
1242 }
1243
1244 fmt = av_guess_format(nullptr, exts[0], nullptr);
1245 if (!fmt) {
1246 BKE_report(reports, RPT_ERROR, "No valid formats found");
1247 return false;
1248 }
1249
1250 of = avformat_alloc_context();
1251 if (!of) {
1252 BKE_report(reports, RPT_ERROR, "Cannot allocate FFmpeg format context");
1253 return false;
1254 }
1255
1256 enum AVCodecID audio_codec = context->ffmpeg_audio_codec;
1257 enum AVCodecID video_codec = context->ffmpeg_codec;
1258
1259 of->url = av_strdup(filepath);
1260 /* Check if we need to force change the codec because of file type codec restrictions */
1261 switch (context->ffmpeg_type) {
1262 case FFMPEG_OGG:
1263 video_codec = AV_CODEC_ID_THEORA;
1264 break;
1265 case FFMPEG_DV:
1266 video_codec = AV_CODEC_ID_DVVIDEO;
1267 break;
1268 case FFMPEG_MPEG1:
1269 video_codec = AV_CODEC_ID_MPEG1VIDEO;
1270 break;
1271 case FFMPEG_MPEG2:
1272 video_codec = AV_CODEC_ID_MPEG2VIDEO;
1273 break;
1274 case FFMPEG_H264:
1275 video_codec = AV_CODEC_ID_H264;
1276 break;
1277 case FFMPEG_XVID:
1278 video_codec = AV_CODEC_ID_MPEG4;
1279 break;
1280 case FFMPEG_FLV:
1281 video_codec = AV_CODEC_ID_FLV1;
1282 break;
1283 case FFMPEG_AV1:
1284 video_codec = AV_CODEC_ID_AV1;
1285 break;
1286 default:
1287 /* These containers are not restricted to any specific codec types.
1288 * Currently we expect these to be `.avi`, `.mov`, `.mkv`, and `.mp4`. */
1289 video_codec = context->ffmpeg_codec;
1290 break;
1291 }
1292
1293 /* Returns after this must 'goto fail;' */
1294
1295# if LIBAVFORMAT_VERSION_MAJOR >= 59
1296 of->oformat = fmt;
1297# else
1298 /* *DEPRECATED* 2022/08/01 For FFMPEG (<5.0) remove this else branch and the `ifdef` above. */
1299 of->oformat = (AVOutputFormat *)fmt;
1300# endif
1301
1302 if (video_codec == AV_CODEC_ID_DVVIDEO) {
1303 if (rectx != 720) {
1304 BKE_report(reports, RPT_ERROR, "Render width has to be 720 pixels for DV!");
1305 goto fail;
1306 }
1307 if (rd->frs_sec != 25 && recty != 480) {
1308 BKE_report(reports, RPT_ERROR, "Render height has to be 480 pixels for DV-NTSC!");
1309 goto fail;
1310 }
1311 if (rd->frs_sec == 25 && recty != 576) {
1312 BKE_report(reports, RPT_ERROR, "Render height has to be 576 pixels for DV-PAL!");
1313 goto fail;
1314 }
1315 }
1316
1317 if (context->ffmpeg_type == FFMPEG_DV) {
1318 audio_codec = AV_CODEC_ID_PCM_S16LE;
1319 if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE &&
1320 rd->ffcodecdata.audio_mixrate != 48000 && rd->ffcodecdata.audio_channels != 2)
1321 {
1322 BKE_report(reports, RPT_ERROR, "FFmpeg only supports 48khz / stereo audio for DV!");
1323 goto fail;
1324 }
1325 }
1326
1327 if (video_codec == AV_CODEC_ID_PRORES) {
1328 context->ffmpeg_profile = rd->ffcodecdata.ffmpeg_prores_profile;
1329 }
1330
1331 if (video_codec != AV_CODEC_ID_NONE) {
1332 context->video_stream = alloc_video_stream(
1333 context, rd, imf, video_codec, of, rectx, recty, error, sizeof(error));
1334 CLOG_INFO(&LOG, "ffmpeg: alloc video stream %p", context->video_stream);
1335 if (!context->video_stream) {
1336 if (error[0]) {
1337 BKE_report(reports, RPT_ERROR, error);
1338 CLOG_INFO(&LOG, "ffmpeg: video stream error: %s", error);
1339 }
1340 else {
1341 BKE_report(reports, RPT_ERROR, "Error initializing video stream");
1342 CLOG_STR_INFO(&LOG, "ffmpeg: error initializing video stream");
1343 }
1344 goto fail;
1345 }
1346 }
1347
1348 if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE) {
1349 context->audio_stream = alloc_audio_stream(context,
1352 audio_codec,
1353 of,
1354 error,
1355 sizeof(error));
1356 if (!context->audio_stream) {
1357 if (error[0]) {
1358 BKE_report(reports, RPT_ERROR, error);
1359 CLOG_INFO(&LOG, "ffmpeg: audio stream error: %s", error);
1360 }
1361 else {
1362 BKE_report(reports, RPT_ERROR, "Error initializing audio stream");
1363 CLOG_STR_INFO(&LOG, "ffmpeg: error initializing audio stream");
1364 }
1365 goto fail;
1366 }
1367 }
1368 if (!(fmt->flags & AVFMT_NOFILE)) {
1369 if (avio_open(&of->pb, filepath, AVIO_FLAG_WRITE) < 0) {
1370 BKE_report(reports, RPT_ERROR, "Could not open file for writing");
1371 CLOG_INFO(&LOG, "ffmpeg: could not open file %s for writing", filepath);
1372 goto fail;
1373 }
1374 }
1375
1376 if (context->stamp_data != nullptr) {
1378 &of->metadata, context->stamp_data, ffmpeg_add_metadata_callback, false);
1379 }
1380
1381 ret = avformat_write_header(of, nullptr);
1382 if (ret < 0) {
1383 BKE_report(reports,
1384 RPT_ERROR,
1385 "Could not initialize streams, probably unsupported codec combination");
1386 char error_str[AV_ERROR_MAX_STRING_SIZE];
1387 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
1388 CLOG_INFO(&LOG, "ffmpeg: could not write media header: %s", error_str);
1389 goto fail;
1390 }
1391
1392 context->outfile = of;
1393 av_dump_format(of, 0, filepath, 1);
1394
1395 return true;
1396
1397fail:
1398 if (of->pb) {
1399 avio_close(of->pb);
1400 }
1401
1402 context->video_stream = nullptr;
1403 context->audio_stream = nullptr;
1404
1405 avformat_free_context(of);
1406 return false;
1407}
1408
1409/* Flush any pending frames. An encoder may use both past and future frames
1410 * to predict inter-frames (H.264 B-frames, for example); it can output
1411 * the frames in a different order from the one it was given. The delayed
1412 * frames must be flushed before we close the stream. */
1413static void flush_delayed_frames(AVCodecContext *c, AVStream *stream, AVFormatContext *outfile)
1414{
1415 char error_str[AV_ERROR_MAX_STRING_SIZE];
1416 AVPacket *packet = av_packet_alloc();
1417
1418 avcodec_send_frame(c, nullptr);
1419
1420 /* Get the packets frames. */
1421 int ret = 1;
1422 while (ret >= 0) {
1423 ret = avcodec_receive_packet(c, packet);
1424
1425 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1426 /* No more packets to flush. */
1427 break;
1428 }
1429 if (ret < 0) {
1430 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
1431 CLOG_ERROR(&LOG, "Error encoding delayed frame: %s", error_str);
1432 break;
1433 }
1434
1435 packet->stream_index = stream->index;
1436 av_packet_rescale_ts(packet, c->time_base, stream->time_base);
1437# ifdef FFMPEG_USE_DURATION_WORKAROUND
1438 my_guess_pkt_duration(outfile, stream, packet);
1439# endif
1440
1441 int write_ret = av_interleaved_write_frame(outfile, packet);
1442 if (write_ret != 0) {
1443 av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret);
1444 CLOG_ERROR(&LOG, "Error writing delayed frame: %s", error_str);
1445 break;
1446 }
1447 }
1448
1449 av_packet_free(&packet);
1450}
1451
1459static bool ffmpeg_filepath_get(MovieWriter *context,
1460 char filepath[FILE_MAX],
1461 const Scene *scene,
1462 const RenderData *rd,
1463 bool preview,
1464 const char *suffix,
1465 ReportList *reports)
1466{
1467 char autosplit[20];
1468
1469 const char **exts = get_file_extensions(rd->ffcodecdata.type);
1470 const char **fe = exts;
1471 int sfra, efra;
1472
1473 if (!filepath || !exts) {
1474 return false;
1475 }
1476
1477 if (preview) {
1478 sfra = rd->psfra;
1479 efra = rd->pefra;
1480 }
1481 else {
1482 sfra = rd->sfra;
1483 efra = rd->efra;
1484 }
1485
1486 BLI_strncpy(filepath, rd->pic, FILE_MAX);
1487
1489 BKE_add_template_variables_general(template_variables, &scene->id);
1490 BKE_add_template_variables_for_render_path(template_variables, *scene);
1491
1493 filepath, FILE_MAX, template_variables);
1494 if (!errors.is_empty()) {
1495 BKE_report_path_template_errors(reports, RPT_ERROR, filepath, errors);
1496 return false;
1497 }
1498
1500
1501 if (!BLI_file_ensure_parent_dir_exists(filepath)) {
1502 CLOG_ERROR(&LOG, "Couldn't create directory for file %s: %s", filepath, std::strerror(errno));
1503 return false;
1504 }
1505
1506 autosplit[0] = '\0';
1507
1508 if ((rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT) != 0) {
1509 if (context) {
1510 SNPRINTF(autosplit, "_%03d", context->ffmpeg_autosplit_count);
1511 }
1512 }
1513
1514 if (rd->scemode & R_EXTENSION) {
1515 while (*fe) {
1516 if (BLI_strcasecmp(filepath + strlen(filepath) - strlen(*fe), *fe) == 0) {
1517 break;
1518 }
1519 fe++;
1520 }
1521
1522 if (*fe == nullptr) {
1523 BLI_strncat(filepath, autosplit, FILE_MAX);
1524
1525 BLI_path_frame_range(filepath, FILE_MAX, sfra, efra, 4);
1526 BLI_strncat(filepath, *exts, FILE_MAX);
1527 }
1528 else {
1529 *(filepath + strlen(filepath) - strlen(*fe)) = '\0';
1530 BLI_strncat(filepath, autosplit, FILE_MAX);
1531 BLI_strncat(filepath, *fe, FILE_MAX);
1532 }
1533 }
1534 else {
1535 if (BLI_path_frame_check_chars(filepath)) {
1536 BLI_path_frame_range(filepath, FILE_MAX, sfra, efra, 4);
1537 }
1538
1539 BLI_strncat(filepath, autosplit, FILE_MAX);
1540 }
1541
1542 BLI_path_suffix(filepath, FILE_MAX, suffix, "");
1543
1544 return true;
1545}
1546
1547static void ffmpeg_get_filepath(char filepath[/*FILE_MAX*/ 1024],
1548 const Scene *scene,
1549 const RenderData *rd,
1550 bool preview,
1551 const char *suffix,
1552 ReportList *reports)
1553{
1554 ffmpeg_filepath_get(nullptr, filepath, scene, rd, preview, suffix, reports);
1555}
1556
1557static MovieWriter *ffmpeg_movie_open(const Scene *scene,
1558 const RenderData *rd,
1559 const ImageFormatData *imf,
1560 int rectx,
1561 int recty,
1562 ReportList *reports,
1563 bool preview,
1564 const char *suffix)
1565{
1566 MovieWriter *context = MEM_new<MovieWriter>("new FFMPEG context");
1567
1568 context->ffmpeg_codec = AV_CODEC_ID_MPEG4;
1569 context->ffmpeg_audio_codec = AV_CODEC_ID_NONE;
1570 context->ffmpeg_video_bitrate = 1150;
1571 context->ffmpeg_audio_bitrate = 128;
1572 context->ffmpeg_gop_size = 12;
1573 context->ffmpeg_autosplit = false;
1574 context->stamp_data = nullptr;
1575 context->audio_time_total = 0.0;
1576
1577 context->ffmpeg_autosplit_count = 0;
1578 context->ffmpeg_preview = preview;
1579 context->stamp_data = BKE_stamp_info_from_scene_static(scene);
1580
1581 bool success = start_ffmpeg_impl(context, scene, rd, imf, rectx, recty, suffix, reports);
1582
1583 if (success) {
1584 success = movie_audio_open(context,
1585 scene,
1586 preview ? rd->psfra : rd->sfra,
1589 reports);
1590 }
1591
1592 if (!success) {
1593 ffmpeg_movie_close(context);
1594 return nullptr;
1595 }
1596 return context;
1597}
1598
1599static void end_ffmpeg_impl(MovieWriter *context, bool is_autosplit);
1600
1601static bool ffmpeg_movie_append(MovieWriter *context,
1602 const Scene *scene,
1603 const RenderData *rd,
1604 const ImageFormatData *imf,
1605 int start_frame,
1606 int frame,
1607 const ImBuf *image,
1608 const char *suffix,
1609 ReportList *reports)
1610{
1611 AVFrame *avframe;
1612 bool success = true;
1613
1614 CLOG_INFO(&LOG, "ffmpeg: writing frame #%i (%ix%i)", frame, image->x, image->y);
1615
1616 if (context->video_stream) {
1617 avframe = generate_video_frame(context, image);
1618 success = (avframe && write_video_frame(context, avframe, reports));
1619 }
1620
1621 if (context->audio_stream) {
1622 /* Add +1 frame because we want to encode audio up until the next video frame. */
1623 write_audio_frames(
1624 context, (frame - start_frame + 1) / (double(rd->frs_sec) / double(rd->frs_sec_base)));
1625 }
1626
1627 if (context->ffmpeg_autosplit) {
1628 if (avio_tell(context->outfile->pb) > ffmpeg_autosplit_size) {
1629 end_ffmpeg_impl(context, true);
1630 context->ffmpeg_autosplit_count++;
1631
1632 success &= start_ffmpeg_impl(context, scene, rd, imf, image->x, image->y, suffix, reports);
1633 }
1634 }
1635
1636 return success;
1637}
1638
1639static void end_ffmpeg_impl(MovieWriter *context, bool is_autosplit)
1640{
1641 CLOG_STR_INFO(&LOG, "ffmpeg: closing");
1642
1643 movie_audio_close(context, is_autosplit);
1644
1645 if (context->video_stream) {
1646 CLOG_STR_INFO(&LOG, "ffmpeg: flush delayed video frames");
1647 flush_delayed_frames(context->video_codec, context->video_stream, context->outfile);
1648 }
1649
1650 if (context->audio_stream) {
1651 CLOG_STR_INFO(&LOG, "ffmpeg: flush delayed audio frames");
1652 flush_delayed_frames(context->audio_codec, context->audio_stream, context->outfile);
1653 }
1654
1655 if (context->outfile) {
1656 av_write_trailer(context->outfile);
1657 }
1658
1659 /* Close the video codec */
1660
1661 context->video_stream = nullptr;
1662 context->audio_stream = nullptr;
1663
1664 av_frame_free(&context->current_frame);
1665 av_frame_free(&context->img_convert_frame);
1666
1667 if (context->outfile != nullptr && context->outfile->oformat) {
1668 if (!(context->outfile->oformat->flags & AVFMT_NOFILE)) {
1669 avio_close(context->outfile->pb);
1670 }
1671 }
1672
1673 if (context->video_codec != nullptr) {
1674 avcodec_free_context(&context->video_codec);
1675 context->video_codec = nullptr;
1676 }
1677 if (context->audio_codec != nullptr) {
1678 avcodec_free_context(&context->audio_codec);
1679 context->audio_codec = nullptr;
1680 }
1681
1682 if (context->outfile != nullptr) {
1683 avformat_free_context(context->outfile);
1684 context->outfile = nullptr;
1685 }
1686 if (context->audio_input_buffer != nullptr) {
1687 av_free(context->audio_input_buffer);
1688 context->audio_input_buffer = nullptr;
1689 }
1690
1691 if (context->audio_deinterleave_buffer != nullptr) {
1692 av_free(context->audio_deinterleave_buffer);
1693 context->audio_deinterleave_buffer = nullptr;
1694 }
1695
1696 if (context->img_convert_ctx != nullptr) {
1697 ffmpeg_sws_release_context(context->img_convert_ctx);
1698 context->img_convert_ctx = nullptr;
1699 }
1700}
1701
1702static void ffmpeg_movie_close(MovieWriter *context)
1703{
1704 if (context == nullptr) {
1705 return;
1706 }
1707 end_ffmpeg_impl(context, false);
1708 if (context->stamp_data) {
1709 BKE_stamp_data_free(context->stamp_data);
1710 }
1711 MEM_delete(context);
1712}
1713
1714#endif /* WITH_FFMPEG */
1715
1716MovieWriter *MOV_write_begin(const Scene *scene,
1717 const RenderData *rd,
1718 const ImageFormatData *imf,
1719 int rectx,
1720 int recty,
1721 ReportList *reports,
1722 bool preview,
1723 const char *suffix)
1724{
1725 if (imf->imtype != R_IMF_IMTYPE_FFMPEG) {
1726 BKE_report(reports, RPT_ERROR, "Image format is not a movie format");
1727 return nullptr;
1728 }
1729
1730 MovieWriter *writer = nullptr;
1731#ifdef WITH_FFMPEG
1732 writer = ffmpeg_movie_open(scene, rd, imf, rectx, recty, reports, preview, suffix);
1733#else
1734 UNUSED_VARS(scene, rd, imf, rectx, recty, reports, preview, suffix);
1735#endif
1736 return writer;
1737}
1738
1739bool MOV_write_append(MovieWriter *writer,
1740 const Scene *scene,
1741 const RenderData *rd,
1742 const ImageFormatData *imf,
1743 int start_frame,
1744 int frame,
1745 const ImBuf *image,
1746 const char *suffix,
1747 ReportList *reports)
1748{
1749 if (writer == nullptr) {
1750 return false;
1751 }
1752
1753#ifdef WITH_FFMPEG
1754 bool ok = ffmpeg_movie_append(
1755 writer, scene, rd, imf, start_frame, frame, image, suffix, reports);
1756 return ok;
1757#else
1758 UNUSED_VARS(scene, rd, imf, start_frame, frame, image, suffix, reports);
1759 return false;
1760#endif
1761}
1762
1763void MOV_write_end(MovieWriter *writer)
1764{
1765#ifdef WITH_FFMPEG
1766 if (writer) {
1767 ffmpeg_movie_close(writer);
1768 }
1769#else
1770 UNUSED_VARS(writer);
1771#endif
1772}
1773
1774void MOV_filepath_from_settings(char filepath[/*FILE_MAX*/ 1024],
1775 const Scene *scene,
1776 const RenderData *rd,
1777 bool preview,
1778 const char *suffix,
1779 ReportList *reports)
1780{
1781#ifdef WITH_FFMPEG
1783 ffmpeg_get_filepath(filepath, scene, rd, preview, suffix, reports);
1784 return;
1785 }
1786#else
1787 UNUSED_VARS(scene, rd, preview, suffix, reports);
1788#endif
1789 filepath[0] = '\0';
1790}
StampData * BKE_stamp_info_from_scene_static(const Scene *scene)
void BKE_stamp_data_free(StampData *stamp_data)
void BKE_stamp_info_callback(void *data, StampData *stamp_data, StampCallback callback, bool noskip)
const char * BKE_main_blendfile_path_from_global()
Definition main.cc:892
Functions and classes for evaluating template expressions in filepaths.
void BKE_report_path_template_errors(ReportList *reports, eReportType report_type, blender::StringRef path, blender::Span< blender::bke::path_templates::Error > errors)
void BKE_add_template_variables_for_render_path(blender::bke::path_templates::VariableMap &variables, const Scene &scene)
void BKE_add_template_variables_general(blender::bke::path_templates::VariableMap &variables, const ID *path_owner_id)
blender::Vector< blender::bke::path_templates::Error > BKE_path_apply_template(char *path, int path_maxncpy, const blender::bke::path_templates::VariableMap &template_variables)
@ RPT_ERROR
Definition BKE_report.hh:39
void BKE_report(ReportList *reports, eReportType type, const char *message)
Definition report.cc:153
blender::ocio::ColorSpace ColorSpace
Definition BLF_api.hh:38
#define BLI_assert(a)
Definition BLI_assert.h:46
#define BLI_assert_msg(a, msg)
Definition BLI_assert.h:53
File and directory operations.
bool BLI_file_ensure_parent_dir_exists(const char *filepath) ATTR_NONNULL(1)
Definition fileops_c.cc:452
void BLI_kdtree_nd_ free(KDTree *tree)
MINLINE int power_of_2_min_i(int n)
MINLINE int min_ii(int a, int b)
MINLINE int max_ii(int a, int b)
MINLINE int is_power_of_2_i(int n)
MINLINE unsigned int log2_floor_u(unsigned int x)
MINLINE int clamp_i(int value, int min, int max)
MINLINE void premul_to_straight_v4_v4(float straight[4], const float premul[4])
ATTR_WARN_UNUSED_RESULT const size_t num
bool BLI_path_abs(char path[FILE_MAX], const char *basepath) ATTR_NONNULL(1
bool bool BLI_path_suffix(char *path, size_t path_maxncpy, const char *suffix, const char *sep) ATTR_NONNULL(1
bool void bool BLI_path_frame_check_chars(const char *path) ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT
#define FILE_MAX
bool BLI_path_frame_range(char *path, size_t path_maxncpy, int sta, int end, int digits) ATTR_NONNULL(1)
#define SNPRINTF(dst, format,...)
Definition BLI_string.h:604
char char size_t char * BLI_strncat(char *__restrict dst, const char *__restrict src, size_t dst_maxncpy) ATTR_NONNULL(1
int char char int BLI_strcasecmp(const char *s1, const char *s2) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1
char * BLI_strncpy(char *__restrict dst, const char *__restrict src, size_t dst_maxncpy) ATTR_NONNULL(1
size_t size_t size_t BLI_snprintf_utf8(char *__restrict dst, size_t dst_maxncpy, const char *__restrict format,...) ATTR_NONNULL(1
#define SNPRINTF_UTF8(dst, format,...)
unsigned int uint
#define UNUSED_VARS(...)
#define ELEM(...)
#define STREQ(a, b)
#define CLOG_ERROR(clg_ref,...)
Definition CLG_log.h:188
#define CLOG_WARN(clg_ref,...)
Definition CLG_log.h:189
#define CLOG_STR_INFO(clg_ref, str)
Definition CLG_log.h:198
#define CLOG_INFO(clg_ref,...)
Definition CLG_log.h:190
@ FFM_PRESET_GOOD
@ FFM_PRESET_REALTIME
@ FFM_PRESET_BEST
@ R_IMF_PLANES_RGBA
@ R_IMF_PLANES_BW
@ FFM_CRF_LOW
@ FFM_CRF_MEDIUM
@ FFM_CRF_VERYLOW
@ FFM_CRF_LOSSLESS
@ FFM_CRF_PERC_LOSSLESS
@ FFM_CRF_LOWEST
@ FFM_CRF_HIGH
@ R_IMF_CHAN_DEPTH_16
@ R_IMF_CHAN_DEPTH_12
@ R_IMF_CHAN_DEPTH_10
@ FFMPEG_LOSSLESS_OUTPUT
@ FFMPEG_AUTOSPLIT_OUTPUT
@ FFMPEG_USE_MAX_B_FRAMES
@ FFM_PRORES_PROFILE_422_PROXY
@ FFM_PRORES_PROFILE_4444_XQ
@ FFM_PRORES_PROFILE_422_HQ
@ FFM_PRORES_PROFILE_4444
@ R_IMF_IMTYPE_FFMPEG
@ R_EXTENSION
bool IMB_colormanagement_space_to_cicp(const ColorSpace *colorspace, const ColorManagedFileOutput output, const bool rgb_matrix, int cicp[4])
const ColorSpace * IMB_colormangement_display_get_color_space(const ColorManagedViewSettings *view_settings, const ColorManagedDisplaySettings *display_settings)
void IMB_assign_float_buffer(ImBuf *ibuf, float *buffer_data, ImBufOwnership ownership)
bool IMB_alpha_affects_rgb(const ImBuf *ibuf)
Definition conversion.cc:66
void IMB_buffer_float_from_byte(float *rect_to, const unsigned char *rect_from, int profile_to, int profile_from, bool predivide, int width, int height, int stride_to, int stride_from)
void IMB_freeImBuf(ImBuf *ibuf)
size_t IMB_get_pixel_count(const ImBuf *ibuf)
Get the length of the data of the given image buffer in pixels.
ImBuf * IMB_allocImBuf(unsigned int x, unsigned int y, unsigned char planes, unsigned int flags)
#define IB_PROFILE_SRGB
@ IB_TAKE_OWNERSHIP
@ IB_alphamode_channel_packed
Read Guarded memory(de)allocation.
@ FFMPEG_H264
Definition MOV_enums.hh:18
@ FFMPEG_FLV
Definition MOV_enums.hh:20
@ FFMPEG_MPEG1
Definition MOV_enums.hh:12
@ FFMPEG_AV1
Definition MOV_enums.hh:25
@ FFMPEG_DV
Definition MOV_enums.hh:17
@ FFMPEG_MPEG2
Definition MOV_enums.hh:13
@ FFMPEG_MKV
Definition MOV_enums.hh:21
@ FFMPEG_MOV
Definition MOV_enums.hh:16
@ FFMPEG_MPEG4
Definition MOV_enums.hh:14
@ FFMPEG_WEBM
Definition MOV_enums.hh:24
@ FFMPEG_AVI
Definition MOV_enums.hh:15
@ FFMPEG_XVID
Definition MOV_enums.hh:19
@ FFMPEG_OGG
Definition MOV_enums.hh:22
BMesh const char void * data
long long int int64_t
static constexpr int64_t not_found
constexpr int64_t find(char c, int64_t pos=0) const
bool is_empty() const
nullptr float
FFMPEG_INLINE size_t ffmpeg_get_buffer_alignment()
FFMPEG_INLINE enum AVPixelFormat * ffmpeg_get_pix_fmts(struct AVCodecContext *context, const AVCodec *codec)
FFMPEG_INLINE void my_guess_pkt_duration(AVFormatContext *s, AVStream *st, AVPacket *pkt)
#define round
format
#define LOG(level)
Definition log.h:97
void * MEM_malloc_arrayN(size_t len, size_t size, const char *str)
Definition mallocn.cc:133
ccl_device_inline float2 fabs(const float2 a)
static void error(const char *str)
bool MOV_codec_supports_alpha(IMB_Ffmpeg_Codec_ID codec_id, int ffmpeg_profile)
bool MOV_codec_supports_crf(IMB_Ffmpeg_Codec_ID codec_id)
void MOV_write_end(MovieWriter *writer)
void MOV_filepath_from_settings(char filepath[1024], const Scene *scene, const RenderData *rd, bool preview, const char *suffix, ReportList *reports)
MovieWriter * MOV_write_begin(const Scene *scene, const RenderData *rd, const ImageFormatData *imf, int rectx, int recty, ReportList *reports, bool preview, const char *suffix)
bool MOV_write_append(MovieWriter *writer, const Scene *scene, const RenderData *rd, const ImageFormatData *imf, int start_frame, int frame, const ImBuf *image, const char *suffix, ReportList *reports)
int context(const bContext *C, const char *member, bContextDataResult *result)
const btScalar eps
Definition poly34.cpp:11
return ret
#define fabsf
#define sqrtf
const ColorSpace * colorspace
ImBufFloatBuffer float_buffer
ImBufByteBuffer byte_buffer
unsigned char planes
ColorManagedDisplaySettings display_settings
ColorManagedViewSettings view_settings
struct ImageFormatData im_format
char pic[1024]
struct FFMpegCodecData ffcodecdata
i
Definition text_draw.cc:230