0
0
mirror of https://github.com/obsproject/obs-studio.git synced 2024-09-20 21:13:04 +02:00
obs-studio/libobs/media-io/video-scaler-ffmpeg.c
jp9000 fd37d9e9a8 Implement encoder interface (still preliminary)
- Implement OBS encoder interface.  It was previously incomplete, but
   now is reaching some level of completion, though probably should
   still be considered preliminary.

   I had originally implemented it so that encoders only have a 'reset'
   function to reset their parameters, but I felt that having both a
   'start' and 'stop' function would be useful.

   Encoders are now assigned to a specific video/audio media output each
   rather than implicitely assigned to the main obs video/audio
   contexts.  This allows separate encoder contexts that aren't
   necessarily assigned to the main video/audio context (which is useful
   for things such as recording specific sources).  Will probably have
   to do this for regular obs outputs as well.

   When creating an encoder, you must now explicitely state whether that
   encoder is an audio or video encoder.

   Audio and video can optionally be automatically converted depending
   on what the encoder specifies.

   When something 'attaches' to an encoder, the first attachment starts
   the encoder, and the encoder automatically attaches to the media
   output context associated with it.  Subsequent attachments won't have
   the same effect, they will just start receiving the same encoder data
   when the next keyframe plays (along with SEI if any).  When detaching
   from the encoder, the last detachment will fully stop the encoder and
   detach the encoder from the media output context associated with the
   encoder.

   SEI must actually be exported separately; because new encoder
   attachments may not always be at the beginning of the stream, the
   first keyframe they get must have that SEI data in it.  If the
   encoder has SEI data, it needs only add one small function to simply
   query that SEI data, and then that data will be handled automatically
   by libobs for all subsequent encoder attachments.

 - Implement x264 encoder plugin, move x264 files to separate plugin to
   separate necessary dependencies.

 - Change video/audio frame output structures to not use const
   qualifiers to prevent issues with non-const function usage elsewhere.
   This was an issue when writing the x264 encoder, as the x264 encoder
   expects non-const frame data.

   Change stagesurf_map to return a non-const data type to prevent this
   as well.

 - Change full range parameter of video scaler to be an enum rather than
   boolean
2014-03-16 16:21:34 -07:00

162 lines
4.7 KiB
C

/******************************************************************************
Copyright (C) 2014 by Hugh Bailey <obs.jim@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
#include "../util/bmem.h"
#include "video-scaler.h"
#include <libswscale/swscale.h>
struct video_scaler {
struct SwsContext *swscale;
int src_height;
};
static inline enum AVPixelFormat get_ffmpeg_video_format(
enum video_format format)
{
switch (format) {
case VIDEO_FORMAT_NONE: return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_I420: return AV_PIX_FMT_YUV420P;
case VIDEO_FORMAT_NV12: return AV_PIX_FMT_NV12;
case VIDEO_FORMAT_YVYU: return AV_PIX_FMT_NONE;
case VIDEO_FORMAT_YUY2: return AV_PIX_FMT_YUYV422;
case VIDEO_FORMAT_UYVY: return AV_PIX_FMT_UYVY422;
case VIDEO_FORMAT_RGBA: return AV_PIX_FMT_RGBA;
case VIDEO_FORMAT_BGRA: return AV_PIX_FMT_BGRA;
case VIDEO_FORMAT_BGRX: return AV_PIX_FMT_BGRA;
}
return AV_PIX_FMT_NONE;
}
static inline int get_ffmpeg_scale_type(enum video_scale_type type)
{
switch (type) {
case VIDEO_SCALE_DEFAULT: return SWS_FAST_BILINEAR;
case VIDEO_SCALE_POINT: return SWS_POINT;
case VIDEO_SCALE_FAST_BILINEAR: return SWS_FAST_BILINEAR;
case VIDEO_SCALE_BILINEAR: return SWS_BILINEAR | SWS_AREA;
case VIDEO_SCALE_BICUBIC: return SWS_BICUBIC;
}
return SWS_POINT;
}
static inline const int *get_ffmpeg_coeffs(enum video_colorspace cs)
{
switch (cs) {
case VIDEO_CS_DEFAULT: return sws_getCoefficients(SWS_CS_ITU601);
case VIDEO_CS_601: return sws_getCoefficients(SWS_CS_ITU601);
case VIDEO_CS_709: return sws_getCoefficients(SWS_CS_ITU709);
}
return sws_getCoefficients(SWS_CS_ITU601);
}
static inline int get_ffmpeg_range_type(enum video_range_type type)
{
switch (type) {
case VIDEO_RANGE_DEFAULT: return 0;
case VIDEO_RANGE_PARTIAL: return 0;
case VIDEO_RANGE_FULL: return 1;
}
return 0;
}
#define FIXED_1_0 (1<<16)
int video_scaler_create(video_scaler_t *scaler_out,
const struct video_scale_info *dst,
const struct video_scale_info *src,
enum video_scale_type type)
{
enum AVPixelFormat format_src = get_ffmpeg_video_format(src->format);
enum AVPixelFormat format_dst = get_ffmpeg_video_format(dst->format);
int scale_type = get_ffmpeg_scale_type(type);
const int *coeff_src = get_ffmpeg_coeffs(src->colorspace);
const int *coeff_dst = get_ffmpeg_coeffs(dst->colorspace);
int range_src = get_ffmpeg_range_type(src->range);
int range_dst = get_ffmpeg_range_type(dst->range);
struct video_scaler *scaler;
int ret;
if (!scaler_out)
return VIDEO_SCALER_FAILED;
if (format_src == AV_PIX_FMT_NONE ||
format_dst == AV_PIX_FMT_NONE)
return VIDEO_SCALER_BAD_CONVERSION;
scaler = bzalloc(sizeof(struct video_scaler));
scaler->src_height = src->height;
scaler->swscale = sws_getCachedContext(NULL,
src->width, src->height, format_src,
dst->width, dst->height, format_dst,
scale_type, NULL, NULL, NULL);
if (!scaler->swscale) {
blog(LOG_ERROR, "video_scaler_create: Could not create "
"swscale");
goto fail;
}
ret = sws_setColorspaceDetails(scaler->swscale,
coeff_src, range_src,
coeff_dst, range_dst,
0, FIXED_1_0, FIXED_1_0);
if (ret < 0) {
blog(LOG_DEBUG, "video_scaler_create: "
"sws_setColorspaceDetails failed, ignoring");
}
*scaler_out = scaler;
return VIDEO_SCALER_SUCCESS;
fail:
video_scaler_destroy(scaler);
return VIDEO_SCALER_FAILED;
}
void video_scaler_destroy(video_scaler_t scaler)
{
if (scaler) {
sws_freeContext(scaler->swscale);
bfree(scaler);
}
}
bool video_scaler_scale(video_scaler_t scaler,
uint8_t *output[], const uint32_t out_linesize[],
const uint8_t *const input[], const uint32_t in_linesize[])
{
if (!scaler)
return false;
int ret = sws_scale(scaler->swscale,
input, (const int *)in_linesize,
0, scaler->src_height,
output, (const int *)out_linesize);
if (ret <= 0) {
blog(LOG_ERROR, "video_scaler_scale: sws_scale failed: %d",
ret);
return false;
}
return true;
}