├── Makefile ├── README.txt ├── drmprime_out.c ├── drmprime_out.h └── hello_drmprime.c /Makefile: -------------------------------------------------------------------------------- 1 | ifndef FFINSTALL 2 | FFINSTALL=/usr 3 | endif 4 | CFLAGS=-I$(FFINSTALL)/include/arm-linux-gnueabihf -I/usr/include/libdrm 5 | LDFLAGS=-L$(FFINSTALL)/lib/arm-linux-gnueabihf 6 | LDLIBS=-lavcodec -lavfilter -lavutil -lavformat -ldrm -lpthread 7 | 8 | hello_drmprime: hello_drmprime.o drmprime_out.o 9 | 10 | -------------------------------------------------------------------------------- /README.txt: -------------------------------------------------------------------------------- 1 | # --- To build & test --- 2 | 3 | # Step 1 - get libraries: 4 | 5 | sudo apt install libavcodec-dev libavformat-dev libavfilter-dev libdrm-dev 6 | 7 | # Make 8 | 9 | make 10 | 11 | # Get test files 12 | 13 | wget http://www.jell.yfish.us/media/jellyfish-3-mbps-hd-hevc.mkv 14 | wget http://www.jell.yfish.us/media/jellyfish-3-mbps-hd-hevc-10bit.mkv 15 | 16 | # Test 17 | 18 | # Enable the V4L2 decoder 19 | sudo dtoverlay rpivid-v4l2 20 | 21 | # Stop using X if you were 22 | # to switch to a no-X window 23 | 24 | # Run the code 25 | ./hello_drmprime ~/jellyfish-3-mbps-hd-hevc.mkv 26 | 27 | # You should see jellyfish 28 | 29 | # So you don't have to do the dtoverlay step every time 30 | # Enable H265 V4L2 request decoder 31 | # add "dtoverlay=rpivid-v4l2" to /boot/config.txt 32 | # You may also want to add more CMA if you are going to try 4k videos 33 | # Change the "dtoverlay=vc4-fkms-v3d" line in config.txt to read 34 | # "dtoverlay=vc4-fkms-v3d,cma-512" 35 | reboot 36 | # Check it has turned up 37 | ls -la /dev/video* 38 | # This should include video19 39 | # crw-rw----+ 1 root video 81, 7 Aug 4 17:25 /dev/video19 40 | 41 | # --- Notes --- 42 | 43 | This is a trivial example prog on how to get DRM_PRIME frames out of ffmpeg 44 | and how to display them using drm. It makes no attempt to pace the video or 45 | scale it correctly, video will just be displayed at one frame per vsync 46 | (assuming that decode is keeping pace) and stretched to the edge of the screen. 47 | 48 | 49 | Current options: 50 | 51 | -l | --loop 52 | Play the filelist times 53 | 54 | -f | --frames 55 | Only play the first frames of each file 56 | 57 | -o 58 | Dump the raw output frame (after filter) to 59 | 60 | --deinterlace 61 | Apply the deinterlace filter to the stream before output 62 | 63 | 64 | -------------------------------------------------------------------------------- /drmprime_out.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 John Cox for Raspberry Pi Trading 3 | * 4 | * This file is part of FFmpeg. 5 | * 6 | * FFmpeg is free software; you can redistribute it and/or 7 | * modify it under the terms of the GNU Lesser General Public 8 | * License as published by the Free Software Foundation; either 9 | * version 2.1 of the License, or (at your option) any later version. 10 | * 11 | * FFmpeg is distributed in the hope that it will be useful, 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 | * Lesser General Public License for more details. 15 | * 16 | * You should have received a copy of the GNU Lesser General Public 17 | * License along with FFmpeg; if not, write to the Free Software 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 19 | */ 20 | 21 | 22 | // *** This module is a work in progress and its utility is strictly 23 | // limited to testing. 24 | 25 | #include 26 | #include 27 | #include 28 | 29 | #include 30 | #include 31 | 32 | #include 33 | #include 34 | 35 | #include "libavutil/frame.h" 36 | #include "libavutil/hwcontext.h" 37 | #include "libavutil/hwcontext_drm.h" 38 | #include "libavutil/pixdesc.h" 39 | 40 | 41 | #define TRACE_ALL 0 42 | 43 | #define DRM_MODULE "vc4" 44 | 45 | #define ERRSTR strerror(errno) 46 | 47 | struct drm_setup 48 | { 49 | int conId; 50 | uint32_t crtcId; 51 | int crtcIdx; 52 | uint32_t planeId; 53 | unsigned int out_fourcc; 54 | struct 55 | { 56 | int x, y, width, height; 57 | } compose; 58 | }; 59 | 60 | typedef struct drm_aux_s 61 | { 62 | unsigned int fb_handle; 63 | uint32_t bo_handles[AV_DRM_MAX_PLANES]; 64 | 65 | AVFrame *frame; 66 | } drm_aux_t; 67 | 68 | // Aux size should only need to be 2, but on a few streams (Hobbit) under FKMS 69 | // we get initial flicker probably due to dodgy drm timing 70 | #define AUX_SIZE 3 71 | typedef struct drmprime_out_env_s 72 | { 73 | AVClass *class; 74 | 75 | int drm_fd; 76 | uint32_t con_id; 77 | struct drm_setup setup; 78 | enum AVPixelFormat avfmt; 79 | int show_all; 80 | 81 | unsigned int ano; 82 | drm_aux_t aux[AUX_SIZE]; 83 | 84 | pthread_t q_thread; 85 | sem_t q_sem_in; 86 | sem_t q_sem_out; 87 | int q_terminate; 88 | AVFrame *q_next; 89 | 90 | } drmprime_out_env_t; 91 | 92 | 93 | static int find_plane(const int drmfd, const int crtcidx, const uint32_t format, 94 | uint32_t *const pplane_id) 95 | { 96 | drmModePlaneResPtr planes; 97 | drmModePlanePtr plane; 98 | unsigned int i; 99 | unsigned int j; 100 | int ret = 0; 101 | 102 | planes = drmModeGetPlaneResources(drmfd); 103 | if (!planes) { 104 | fprintf(stderr, "drmModeGetPlaneResources failed: %s\n", ERRSTR); 105 | return -1; 106 | } 107 | 108 | for (i = 0; i < planes->count_planes; ++i) { 109 | plane = drmModeGetPlane(drmfd, planes->planes[i]); 110 | if (!planes) { 111 | fprintf(stderr, "drmModeGetPlane failed: %s\n", ERRSTR); 112 | break; 113 | } 114 | 115 | if (!(plane->possible_crtcs & (1 << crtcidx))) { 116 | drmModeFreePlane(plane); 117 | continue; 118 | } 119 | 120 | for (j = 0; j < plane->count_formats; ++j) { 121 | if (plane->formats[j] == format) break; 122 | } 123 | 124 | if (j == plane->count_formats) { 125 | drmModeFreePlane(plane); 126 | continue; 127 | } 128 | 129 | *pplane_id = plane->plane_id; 130 | drmModeFreePlane(plane); 131 | break; 132 | } 133 | 134 | if (i == planes->count_planes) ret = -1; 135 | 136 | drmModeFreePlaneResources(planes); 137 | return ret; 138 | } 139 | 140 | static void da_uninit(drmprime_out_env_t *const de, drm_aux_t *da) 141 | { 142 | unsigned int i; 143 | 144 | if (da->fb_handle != 0) { 145 | drmModeRmFB(de->drm_fd, da->fb_handle); 146 | da->fb_handle = 0; 147 | } 148 | 149 | for (i = 0; i != AV_DRM_MAX_PLANES; ++i) { 150 | if (da->bo_handles[i]) { 151 | struct drm_gem_close gem_close = {.handle = da->bo_handles[i]}; 152 | drmIoctl(de->drm_fd, DRM_IOCTL_GEM_CLOSE, &gem_close); 153 | da->bo_handles[i] = 0; 154 | } 155 | } 156 | 157 | av_frame_free(&da->frame); 158 | } 159 | 160 | static int do_display(drmprime_out_env_t *const de, AVFrame *frame) 161 | { 162 | const AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor *)frame->data[0]; 163 | drm_aux_t *da = de->aux + de->ano; 164 | const uint32_t format = desc->layers[0].format; 165 | int ret = 0; 166 | 167 | #if TRACE_ALL 168 | fprintf(stderr, "<<< %s: fd=%d\n", __func__, desc->objects[0].fd); 169 | #endif 170 | 171 | if (de->setup.out_fourcc != format) { 172 | if (find_plane(de->drm_fd, de->setup.crtcIdx, format, &de->setup.planeId)) { 173 | av_frame_free(&frame); 174 | fprintf(stderr, "No plane for format: %#x\n", format); 175 | return -1; 176 | } 177 | de->setup.out_fourcc = format; 178 | } 179 | 180 | { 181 | drmVBlank vbl = { 182 | .request = { 183 | .type = DRM_VBLANK_RELATIVE, 184 | .sequence = 0 185 | } 186 | }; 187 | 188 | while (drmWaitVBlank(de->drm_fd, &vbl)) { 189 | if (errno != EINTR) { 190 | // This always fails - don't know why 191 | // fprintf(stderr, "drmWaitVBlank failed: %s\n", ERRSTR); 192 | break; 193 | } 194 | } 195 | } 196 | 197 | da_uninit(de, da); 198 | 199 | { 200 | uint32_t pitches[4] = { 0 }; 201 | uint32_t offsets[4] = { 0 }; 202 | uint64_t modifiers[4] = { 0 }; 203 | uint32_t bo_handles[4] = { 0 }; 204 | int i, j, n; 205 | 206 | da->frame = frame; 207 | 208 | memset(da->bo_handles, 0, sizeof(da->bo_handles)); 209 | for (i = 0; i < desc->nb_objects; ++i) { 210 | if (drmPrimeFDToHandle(de->drm_fd, desc->objects[i].fd, da->bo_handles + i) != 0) { 211 | fprintf(stderr, "drmPrimeFDToHandle[%d](%d) failed: %s\n", i, desc->objects[i].fd, ERRSTR); 212 | return -1; 213 | } 214 | } 215 | 216 | n = 0; 217 | for (i = 0; i < desc->nb_layers; ++i) { 218 | for (j = 0; j < desc->layers[i].nb_planes; ++j) { 219 | const AVDRMPlaneDescriptor *const p = desc->layers[i].planes + j; 220 | const AVDRMObjectDescriptor *const obj = desc->objects + p->object_index; 221 | pitches[n] = p->pitch; 222 | offsets[n] = p->offset; 223 | modifiers[n] = obj->format_modifier; 224 | bo_handles[n] = da->bo_handles[p->object_index]; 225 | ++n; 226 | } 227 | } 228 | 229 | #if 1 && TRACE_ALL 230 | fprintf(stderr, "%dx%d, fmt: %x, boh=%d,%d,%d,%d, pitch=%d,%d,%d,%d," 231 | " offset=%d,%d,%d,%d, mod=%llx,%llx,%llx,%llx\n", 232 | av_frame_cropped_width(frame), 233 | av_frame_cropped_height(frame), 234 | desc->layers[0].format, 235 | bo_handles[0], 236 | bo_handles[1], 237 | bo_handles[2], 238 | bo_handles[3], 239 | pitches[0], 240 | pitches[1], 241 | pitches[2], 242 | pitches[3], 243 | offsets[0], 244 | offsets[1], 245 | offsets[2], 246 | offsets[3], 247 | (long long)modifiers[0], 248 | (long long)modifiers[1], 249 | (long long)modifiers[2], 250 | (long long)modifiers[3] 251 | ); 252 | #endif 253 | 254 | if (drmModeAddFB2WithModifiers(de->drm_fd, 255 | av_frame_cropped_width(frame), 256 | av_frame_cropped_height(frame), 257 | desc->layers[0].format, bo_handles, 258 | pitches, offsets, modifiers, 259 | &da->fb_handle, DRM_MODE_FB_MODIFIERS /** 0 if no mods */) != 0) { 260 | fprintf(stderr, "drmModeAddFB2WithModifiers failed: %s\n", ERRSTR); 261 | return -1; 262 | } 263 | } 264 | 265 | ret = drmModeSetPlane(de->drm_fd, de->setup.planeId, de->setup.crtcId, 266 | da->fb_handle, 0, 267 | de->setup.compose.x, de->setup.compose.y, 268 | de->setup.compose.width, 269 | de->setup.compose.height, 270 | 0, 0, 271 | av_frame_cropped_width(frame) << 16, 272 | av_frame_cropped_height(frame) << 16); 273 | 274 | if (ret != 0) { 275 | fprintf(stderr, "drmModeSetPlane failed: %s\n", ERRSTR); 276 | } 277 | 278 | de->ano = de->ano + 1 >= AUX_SIZE ? 0 : de->ano + 1; 279 | 280 | return ret; 281 | } 282 | 283 | static int do_sem_wait(sem_t *const sem, const int nowait) 284 | { 285 | while (nowait ? sem_trywait(sem) : sem_wait(sem)) { 286 | if (errno != EINTR) return -errno; 287 | } 288 | return 0; 289 | } 290 | 291 | static void* display_thread(void *v) 292 | { 293 | drmprime_out_env_t *const de = v; 294 | int i; 295 | 296 | #if TRACE_ALL 297 | fprintf(stderr, "<<< %s\n", __func__); 298 | #endif 299 | 300 | sem_post(&de->q_sem_out); 301 | 302 | for (;;) { 303 | AVFrame *frame; 304 | 305 | do_sem_wait(&de->q_sem_in, 0); 306 | 307 | if (de->q_terminate) 308 | break; 309 | 310 | frame = de->q_next; 311 | de->q_next = NULL; 312 | sem_post(&de->q_sem_out); 313 | 314 | do_display(de, frame); 315 | } 316 | 317 | #if TRACE_ALL 318 | fprintf(stderr, ">>> %s\n", __func__); 319 | #endif 320 | 321 | for (i = 0; i != AUX_SIZE; ++i) 322 | da_uninit(de, de->aux + i); 323 | 324 | av_frame_free(&de->q_next); 325 | 326 | return NULL; 327 | } 328 | 329 | static int find_crtc(int drmfd, struct drm_setup *s, uint32_t *const pConId) 330 | { 331 | int ret = -1; 332 | int i; 333 | drmModeRes *res = drmModeGetResources(drmfd); 334 | drmModeConnector *c; 335 | 336 | if (!res) { 337 | printf("drmModeGetResources failed: %s\n", ERRSTR); 338 | return -1; 339 | } 340 | 341 | if (res->count_crtcs <= 0) { 342 | printf("drm: no crts\n"); 343 | goto fail_res; 344 | } 345 | 346 | if (!s->conId) { 347 | fprintf(stderr, 348 | "No connector ID specified. Choosing default from list:\n"); 349 | 350 | for (i = 0; i < res->count_connectors; i++) { 351 | drmModeConnector *con = 352 | drmModeGetConnector(drmfd, res->connectors[i]); 353 | drmModeEncoder *enc = NULL; 354 | drmModeCrtc *crtc = NULL; 355 | 356 | if (con->encoder_id) { 357 | enc = drmModeGetEncoder(drmfd, con->encoder_id); 358 | if (enc->crtc_id) { 359 | crtc = drmModeGetCrtc(drmfd, enc->crtc_id); 360 | } 361 | } 362 | 363 | if (!s->conId && crtc) { 364 | s->conId = con->connector_id; 365 | s->crtcId = crtc->crtc_id; 366 | } 367 | 368 | fprintf(stderr, "Connector %d (crtc %d): type %d, %dx%d%s\n", 369 | con->connector_id, 370 | crtc ? crtc->crtc_id : 0, 371 | con->connector_type, 372 | crtc ? crtc->width : 0, 373 | crtc ? crtc->height : 0, 374 | (s->conId == (int)con->connector_id ? 375 | " (chosen)" : "")); 376 | } 377 | 378 | if (!s->conId) { 379 | fprintf(stderr, 380 | "No suitable enabled connector found.\n"); 381 | return -1;; 382 | } 383 | } 384 | 385 | s->crtcIdx = -1; 386 | 387 | for (i = 0; i < res->count_crtcs; ++i) { 388 | if (s->crtcId == res->crtcs[i]) { 389 | s->crtcIdx = i; 390 | break; 391 | } 392 | } 393 | 394 | if (s->crtcIdx == -1) { 395 | fprintf(stderr, "drm: CRTC %u not found\n", s->crtcId); 396 | goto fail_res; 397 | } 398 | 399 | if (res->count_connectors <= 0) { 400 | fprintf(stderr, "drm: no connectors\n"); 401 | goto fail_res; 402 | } 403 | 404 | c = drmModeGetConnector(drmfd, s->conId); 405 | if (!c) { 406 | fprintf(stderr, "drmModeGetConnector failed: %s\n", ERRSTR); 407 | goto fail_res; 408 | } 409 | 410 | if (!c->count_modes) { 411 | fprintf(stderr, "connector supports no mode\n"); 412 | goto fail_conn; 413 | } 414 | 415 | { 416 | drmModeCrtc *crtc = drmModeGetCrtc(drmfd, s->crtcId); 417 | s->compose.x = crtc->x; 418 | s->compose.y = crtc->y; 419 | s->compose.width = crtc->width; 420 | s->compose.height = crtc->height; 421 | drmModeFreeCrtc(crtc); 422 | } 423 | 424 | if (pConId) *pConId = c->connector_id; 425 | ret = 0; 426 | 427 | fail_conn: 428 | drmModeFreeConnector(c); 429 | 430 | fail_res: 431 | drmModeFreeResources(res); 432 | 433 | return ret; 434 | } 435 | 436 | int drmprime_out_display(drmprime_out_env_t *de, struct AVFrame *src_frame) 437 | { 438 | AVFrame *frame; 439 | int ret; 440 | 441 | if ((src_frame->flags & AV_FRAME_FLAG_CORRUPT) != 0) { 442 | fprintf(stderr, "Discard corrupt frame: fmt=%d, ts=%" PRId64 "\n", src_frame->format, src_frame->pts); 443 | return 0; 444 | } 445 | 446 | if (src_frame->format == AV_PIX_FMT_DRM_PRIME) { 447 | frame = av_frame_alloc(); 448 | av_frame_ref(frame, src_frame); 449 | } else if (src_frame->format == AV_PIX_FMT_VAAPI) { 450 | frame = av_frame_alloc(); 451 | frame->format = AV_PIX_FMT_DRM_PRIME; 452 | if (av_hwframe_map(frame, src_frame, 0) != 0) { 453 | fprintf(stderr, "Failed to map frame (format=%d) to DRM_PRiME\n", src_frame->format); 454 | av_frame_free(&frame); 455 | return AVERROR(EINVAL); 456 | } 457 | } else { 458 | fprintf(stderr, "Frame (format=%d) not DRM_PRiME\n", src_frame->format); 459 | return AVERROR(EINVAL); 460 | } 461 | 462 | ret = do_sem_wait(&de->q_sem_out, !de->show_all); 463 | if (ret) { 464 | av_frame_free(&frame); 465 | } else { 466 | de->q_next = frame; 467 | sem_post(&de->q_sem_in); 468 | } 469 | 470 | return 0; 471 | } 472 | 473 | void drmprime_out_delete(drmprime_out_env_t *de) 474 | { 475 | de->q_terminate = 1; 476 | sem_post(&de->q_sem_in); 477 | pthread_join(de->q_thread, NULL); 478 | sem_destroy(&de->q_sem_in); 479 | sem_destroy(&de->q_sem_out); 480 | 481 | av_frame_free(&de->q_next); 482 | 483 | if (de->drm_fd >= 0) { 484 | close(de->drm_fd); 485 | de->drm_fd = -1; 486 | } 487 | 488 | free(de); 489 | } 490 | 491 | drmprime_out_env_t* drmprime_out_new() 492 | { 493 | int rv; 494 | drmprime_out_env_t* const de = calloc(1, sizeof(*de)); 495 | if (de == NULL) 496 | return NULL; 497 | 498 | const char *drm_module = DRM_MODULE; 499 | 500 | de->drm_fd = -1; 501 | de->con_id = 0; 502 | de->setup = (struct drm_setup) { 0 }; 503 | de->q_terminate = 0; 504 | de->show_all = 1; 505 | 506 | if ((de->drm_fd = drmOpen(drm_module, NULL)) < 0) { 507 | rv = AVERROR(errno); 508 | fprintf(stderr, "Failed to drmOpen %s: %s\n", drm_module, av_err2str(rv)); 509 | goto fail_free; 510 | } 511 | 512 | if (find_crtc(de->drm_fd, &de->setup, &de->con_id) != 0) { 513 | fprintf(stderr, "failed to find valid mode\n"); 514 | rv = AVERROR(EINVAL); 515 | goto fail_close; 516 | } 517 | 518 | sem_init(&de->q_sem_in, 0, 0); 519 | sem_init(&de->q_sem_out, 0, 0); 520 | if (pthread_create(&de->q_thread, NULL, display_thread, de)) { 521 | rv = AVERROR(errno); 522 | fprintf(stderr, "Failed to create display thread: %s\n", av_err2str(rv)); 523 | goto fail_close; 524 | } 525 | 526 | return de; 527 | 528 | fail_close: 529 | close(de->drm_fd); 530 | de->drm_fd = -1; 531 | fail_free: 532 | free(de); 533 | fprintf(stderr, ">>> %s: FAIL\n", __func__); 534 | return NULL; 535 | } 536 | 537 | -------------------------------------------------------------------------------- /drmprime_out.h: -------------------------------------------------------------------------------- 1 | struct AVFrame; 2 | typedef struct drmprime_out_env_s drmprime_out_env_t; 3 | 4 | int drmprime_out_display(drmprime_out_env_t * dpo, struct AVFrame * frame); 5 | void drmprime_out_delete(drmprime_out_env_t * dpo); 6 | drmprime_out_env_t * drmprime_out_new(); 7 | 8 | -------------------------------------------------------------------------------- /hello_drmprime.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2017 Jun Zhao 3 | * Copyright (c) 2017 Kaixuan Liu 4 | * 5 | * HW Acceleration API (video decoding) decode sample 6 | * 7 | * Permission is hereby granted, free of charge, to any person obtaining a copy 8 | * of this software and associated documentation files (the "Software"), to deal 9 | * in the Software without restriction, including without limitation the rights 10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | * copies of the Software, and to permit persons to whom the Software is 12 | * furnished to do so, subject to the following conditions: 13 | * 14 | * The above copyright notice and this permission notice shall be included in 15 | * all copies or substantial portions of the Software. 16 | * 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 | * THE SOFTWARE. 24 | */ 25 | 26 | /** 27 | * @file 28 | * HW-Accelerated decoding example. 29 | * 30 | * @example hw_decode.c 31 | * This example shows how to do HW-accelerated decoding with output 32 | * frames from the HW video surfaces. 33 | */ 34 | 35 | #include 36 | #include 37 | 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #include 44 | #include 45 | #include 46 | #include 47 | 48 | #include "drmprime_out.h" 49 | 50 | static enum AVPixelFormat hw_pix_fmt; 51 | static FILE *output_file = NULL; 52 | static long frames = 0; 53 | 54 | static AVFilterContext *buffersink_ctx = NULL; 55 | static AVFilterContext *buffersrc_ctx = NULL; 56 | static AVFilterGraph *filter_graph = NULL; 57 | 58 | static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType type) 59 | { 60 | int err = 0; 61 | 62 | ctx->hw_frames_ctx = NULL; 63 | // ctx->hw_device_ctx gets freed when we call avcodec_free_context 64 | if ((err = av_hwdevice_ctx_create(&ctx->hw_device_ctx, type, 65 | NULL, NULL, 0)) < 0) { 66 | fprintf(stderr, "Failed to create specified HW device.\n"); 67 | return err; 68 | } 69 | 70 | return err; 71 | } 72 | 73 | static enum AVPixelFormat get_hw_format(AVCodecContext *ctx, 74 | const enum AVPixelFormat *pix_fmts) 75 | { 76 | const enum AVPixelFormat *p; 77 | 78 | for (p = pix_fmts; *p != -1; p++) { 79 | if (*p == hw_pix_fmt) 80 | return *p; 81 | } 82 | 83 | fprintf(stderr, "Failed to get HW surface format.\n"); 84 | return AV_PIX_FMT_NONE; 85 | } 86 | 87 | static int decode_write(AVCodecContext * const avctx, 88 | drmprime_out_env_t * const dpo, 89 | AVPacket *packet) 90 | { 91 | AVFrame *frame = NULL, *sw_frame = NULL; 92 | uint8_t *buffer = NULL; 93 | int size; 94 | int ret = 0; 95 | unsigned int i; 96 | 97 | ret = avcodec_send_packet(avctx, packet); 98 | if (ret < 0) { 99 | fprintf(stderr, "Error during decoding\n"); 100 | return ret; 101 | } 102 | 103 | for (;;) { 104 | if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) { 105 | fprintf(stderr, "Can not alloc frame\n"); 106 | ret = AVERROR(ENOMEM); 107 | goto fail; 108 | } 109 | 110 | ret = avcodec_receive_frame(avctx, frame); 111 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { 112 | av_frame_free(&frame); 113 | av_frame_free(&sw_frame); 114 | return 0; 115 | } else if (ret < 0) { 116 | fprintf(stderr, "Error while decoding\n"); 117 | goto fail; 118 | } 119 | 120 | // push the decoded frame into the filtergraph if it exists 121 | if (filter_graph != NULL && 122 | (ret = av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF)) < 0) { 123 | fprintf(stderr, "Error while feeding the filtergraph\n"); 124 | goto fail; 125 | } 126 | 127 | do { 128 | if (filter_graph != NULL) { 129 | av_frame_unref(frame); 130 | ret = av_buffersink_get_frame(buffersink_ctx, frame); 131 | if (ret == AVERROR(EAGAIN)) { 132 | ret = 0; 133 | break; 134 | } 135 | if (ret < 0) { 136 | if (ret != AVERROR_EOF) 137 | fprintf(stderr, "Failed to get frame: %s", av_err2str(ret)); 138 | goto fail; 139 | } 140 | } 141 | 142 | drmprime_out_display(dpo, frame); 143 | 144 | if (output_file != NULL) { 145 | AVFrame *tmp_frame; 146 | 147 | if (frame->format == hw_pix_fmt) { 148 | /* retrieve data from GPU to CPU */ 149 | if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) { 150 | fprintf(stderr, "Error transferring the data to system memory\n"); 151 | goto fail; 152 | } 153 | tmp_frame = sw_frame; 154 | } else 155 | tmp_frame = frame; 156 | 157 | size = av_image_get_buffer_size(tmp_frame->format, tmp_frame->width, 158 | tmp_frame->height, 1); 159 | buffer = av_malloc(size); 160 | if (!buffer) { 161 | fprintf(stderr, "Can not alloc buffer\n"); 162 | ret = AVERROR(ENOMEM); 163 | goto fail; 164 | } 165 | ret = av_image_copy_to_buffer(buffer, size, 166 | (const uint8_t * const *)tmp_frame->data, 167 | (const int *)tmp_frame->linesize, tmp_frame->format, 168 | tmp_frame->width, tmp_frame->height, 1); 169 | if (ret < 0) { 170 | fprintf(stderr, "Can not copy image to buffer\n"); 171 | goto fail; 172 | } 173 | 174 | if ((ret = fwrite(buffer, 1, size, output_file)) < 0) { 175 | fprintf(stderr, "Failed to dump raw data.\n"); 176 | goto fail; 177 | } 178 | } 179 | } while (buffersink_ctx != NULL); // Loop if we have a filter to drain 180 | 181 | if (frames == 0 || --frames == 0) 182 | ret = -1; 183 | 184 | fail: 185 | av_frame_free(&frame); 186 | av_frame_free(&sw_frame); 187 | av_freep(&buffer); 188 | if (ret < 0) 189 | return ret; 190 | } 191 | return 0; 192 | } 193 | 194 | // Copied almost directly from ffmpeg filtering_video.c example 195 | static int init_filters(const AVStream * const stream, 196 | const AVCodecContext * const dec_ctx, 197 | const char * const filters_descr) 198 | { 199 | char args[512]; 200 | int ret = 0; 201 | const AVFilter *buffersrc = avfilter_get_by_name("buffer"); 202 | const AVFilter *buffersink = avfilter_get_by_name("buffersink"); 203 | AVFilterInOut *outputs = avfilter_inout_alloc(); 204 | AVFilterInOut *inputs = avfilter_inout_alloc(); 205 | AVRational time_base = stream->time_base; 206 | enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_DRM_PRIME, AV_PIX_FMT_NONE }; 207 | 208 | filter_graph = avfilter_graph_alloc(); 209 | if (!outputs || !inputs || !filter_graph) { 210 | ret = AVERROR(ENOMEM); 211 | goto end; 212 | } 213 | 214 | /* buffer video source: the decoded frames from the decoder will be inserted here. */ 215 | snprintf(args, sizeof(args), 216 | "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", 217 | dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, 218 | time_base.num, time_base.den, 219 | dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den); 220 | 221 | ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", 222 | args, NULL, filter_graph); 223 | if (ret < 0) { 224 | av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); 225 | goto end; 226 | } 227 | 228 | /* buffer video sink: to terminate the filter chain. */ 229 | ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", 230 | NULL, NULL, filter_graph); 231 | if (ret < 0) { 232 | av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); 233 | goto end; 234 | } 235 | 236 | ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts, 237 | AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN); 238 | if (ret < 0) { 239 | av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n"); 240 | goto end; 241 | } 242 | 243 | /* 244 | * Set the endpoints for the filter graph. The filter_graph will 245 | * be linked to the graph described by filters_descr. 246 | */ 247 | 248 | /* 249 | * The buffer source output must be connected to the input pad of 250 | * the first filter described by filters_descr; since the first 251 | * filter input label is not specified, it is set to "in" by 252 | * default. 253 | */ 254 | outputs->name = av_strdup("in"); 255 | outputs->filter_ctx = buffersrc_ctx; 256 | outputs->pad_idx = 0; 257 | outputs->next = NULL; 258 | 259 | /* 260 | * The buffer sink input must be connected to the output pad of 261 | * the last filter described by filters_descr; since the last 262 | * filter output label is not specified, it is set to "out" by 263 | * default. 264 | */ 265 | inputs->name = av_strdup("out"); 266 | inputs->filter_ctx = buffersink_ctx; 267 | inputs->pad_idx = 0; 268 | inputs->next = NULL; 269 | 270 | if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, 271 | &inputs, &outputs, NULL)) < 0) 272 | goto end; 273 | 274 | if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) 275 | goto end; 276 | 277 | end: 278 | avfilter_inout_free(&inputs); 279 | avfilter_inout_free(&outputs); 280 | 281 | return ret; 282 | } 283 | 284 | void usage() 285 | { 286 | fprintf(stderr, "Usage: hello_drmprime [-l loop_count] [-f ] [-o yuv_output_file] [--deinterlace] [ ...]\n"); 287 | exit(1); 288 | } 289 | 290 | int main(int argc, char *argv[]) 291 | { 292 | AVFormatContext *input_ctx = NULL; 293 | int video_stream, ret; 294 | AVStream *video = NULL; 295 | AVCodecContext *decoder_ctx = NULL; 296 | AVCodec *decoder = NULL; 297 | AVPacket packet; 298 | enum AVHWDeviceType type; 299 | const char * in_file; 300 | char * const * in_filelist; 301 | unsigned int in_count; 302 | unsigned int in_n = 0; 303 | const char * hwdev = "drm"; 304 | int i; 305 | drmprime_out_env_t * dpo; 306 | long loop_count = 1; 307 | long frame_count = -1; 308 | const char * out_name = NULL; 309 | bool wants_deinterlace = false; 310 | 311 | { 312 | char * const * a = argv + 1; 313 | int n = argc - 1; 314 | 315 | while (n-- > 0 && a[0][0] == '-') { 316 | const char *arg = *a++; 317 | char *e; 318 | 319 | if (strcmp(arg, "-l") == 0 || strcmp(arg, "--loop") == 0) { 320 | if (n == 0) 321 | usage(); 322 | loop_count = strtol(*a, &e, 0); 323 | if (*e != 0) 324 | usage(); 325 | --n; 326 | ++a; 327 | } 328 | else if (strcmp(arg, "-f") == 0 || strcmp(arg, "--frames") == 0) { 329 | if (n == 0) 330 | usage(); 331 | frame_count = strtol(*a, &e, 0); 332 | if (*e != 0) 333 | usage(); 334 | --n; 335 | ++a; 336 | } 337 | else if (strcmp(arg, "-o") == 0) { 338 | if (n == 0) 339 | usage(); 340 | out_name = *a; 341 | --n; 342 | ++a; 343 | } 344 | else if (strcmp(arg, "--deinterlace") == 0) { 345 | wants_deinterlace = true; 346 | } 347 | else 348 | break; 349 | } 350 | 351 | // Last args are input files 352 | if (n < 0) 353 | usage(); 354 | 355 | in_filelist = a; 356 | in_count = n + 1; 357 | loop_count *= in_count; 358 | } 359 | 360 | type = av_hwdevice_find_type_by_name(hwdev); 361 | if (type == AV_HWDEVICE_TYPE_NONE) { 362 | fprintf(stderr, "Device type %s is not supported.\n", hwdev); 363 | fprintf(stderr, "Available device types:"); 364 | while((type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE) 365 | fprintf(stderr, " %s", av_hwdevice_get_type_name(type)); 366 | fprintf(stderr, "\n"); 367 | return -1; 368 | } 369 | 370 | dpo = drmprime_out_new(); 371 | if (dpo == NULL) { 372 | fprintf(stderr, "Failed to open drmprime output\n"); 373 | return 1; 374 | } 375 | 376 | /* open the file to dump raw data */ 377 | if (out_name != NULL) { 378 | if ((output_file = fopen(out_name, "w+")) == NULL) { 379 | fprintf(stderr, "Failed to open output file %s: %s\n", out_name, strerror(errno)); 380 | return -1; 381 | } 382 | } 383 | 384 | loopy: 385 | in_file = in_filelist[in_n]; 386 | if (++in_n >= in_count) 387 | in_n = 0; 388 | 389 | /* open the input file */ 390 | if (avformat_open_input(&input_ctx, in_file, NULL, NULL) != 0) { 391 | fprintf(stderr, "Cannot open input file '%s'\n", in_file); 392 | return -1; 393 | } 394 | 395 | if (avformat_find_stream_info(input_ctx, NULL) < 0) { 396 | fprintf(stderr, "Cannot find input stream information.\n"); 397 | return -1; 398 | } 399 | 400 | /* find the video stream information */ 401 | ret = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0); 402 | if (ret < 0) { 403 | fprintf(stderr, "Cannot find a video stream in the input file\n"); 404 | return -1; 405 | } 406 | video_stream = ret; 407 | 408 | if (decoder->id == AV_CODEC_ID_H264) { 409 | if ((decoder = avcodec_find_decoder_by_name("h264_v4l2m2m")) == NULL) { 410 | fprintf(stderr, "Cannot find the h264 v4l2m2m decoder\n"); 411 | return -1; 412 | } 413 | hw_pix_fmt = AV_PIX_FMT_DRM_PRIME; 414 | } 415 | else { 416 | for (i = 0;; i++) { 417 | const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i); 418 | if (!config) { 419 | fprintf(stderr, "Decoder %s does not support device type %s.\n", 420 | decoder->name, av_hwdevice_get_type_name(type)); 421 | return -1; 422 | } 423 | if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX && 424 | config->device_type == type) { 425 | hw_pix_fmt = config->pix_fmt; 426 | break; 427 | } 428 | } 429 | } 430 | 431 | if (!(decoder_ctx = avcodec_alloc_context3(decoder))) 432 | return AVERROR(ENOMEM); 433 | 434 | video = input_ctx->streams[video_stream]; 435 | if (avcodec_parameters_to_context(decoder_ctx, video->codecpar) < 0) 436 | return -1; 437 | 438 | decoder_ctx->get_format = get_hw_format; 439 | 440 | if (hw_decoder_init(decoder_ctx, type) < 0) 441 | return -1; 442 | 443 | decoder_ctx->thread_count = 3; 444 | 445 | if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) { 446 | fprintf(stderr, "Failed to open codec for stream #%u\n", video_stream); 447 | return -1; 448 | } 449 | 450 | if (wants_deinterlace) { 451 | if (init_filters(video, decoder_ctx, "deinterlace_v4l2m2m") < 0) { 452 | fprintf(stderr, "Failed to init deinterlace\n"); 453 | return -1; 454 | } 455 | } 456 | 457 | /* actual decoding and dump the raw data */ 458 | frames = frame_count; 459 | while (ret >= 0) { 460 | if ((ret = av_read_frame(input_ctx, &packet)) < 0) 461 | break; 462 | 463 | if (video_stream == packet.stream_index) 464 | ret = decode_write(decoder_ctx, dpo, &packet); 465 | 466 | av_packet_unref(&packet); 467 | } 468 | 469 | /* flush the decoder */ 470 | packet.data = NULL; 471 | packet.size = 0; 472 | ret = decode_write(decoder_ctx, dpo, &packet); 473 | av_packet_unref(&packet); 474 | 475 | if (output_file) 476 | fclose(output_file); 477 | avfilter_graph_free(&filter_graph); 478 | avcodec_free_context(&decoder_ctx); 479 | avformat_close_input(&input_ctx); 480 | 481 | if (--loop_count > 0) 482 | goto loopy; 483 | 484 | drmprime_out_delete(dpo); 485 | 486 | return 0; 487 | } 488 | --------------------------------------------------------------------------------