diff --git a/src/zm_ffmpeg.h b/src/zm_ffmpeg.h index 0e3219a144..dfe2140d8d 100644 --- a/src/zm_ffmpeg.h +++ b/src/zm_ffmpeg.h @@ -201,7 +201,7 @@ void zm_dump_codecpar(const AVCodecParameters *par); #ifndef DBG_OFF # define ZM_DUMP_PACKET(pkt, text) \ - Debug(2, "%s: pts: %" PRId64 ", dts: %" PRId64 \ + if (pkt) { Debug(2, "%s: pts: %" PRId64 ", dts: %" PRId64 \ ", size: %d, stream_index: %d, flags: %04x, keyframe(%d) pos: %" PRId64 ", duration: %" AV_PACKET_DURATION_FMT, \ text,\ pkt->pts,\ @@ -211,7 +211,9 @@ void zm_dump_codecpar(const AVCodecParameters *par); pkt->flags,\ pkt->flags & AV_PKT_FLAG_KEY,\ pkt->pos,\ - pkt->duration) + pkt->duration); } else { \ + Error("Null packet send to ZM_DUMP_PACKET"); \ + } # define ZM_DUMP_STREAM_PACKET(stream, pkt, text) \ if (logDebugging()) { \ diff --git a/src/zm_monitor.cpp b/src/zm_monitor.cpp index 8b61fc37ee..6232796aba 100644 --- a/src/zm_monitor.cpp +++ b/src/zm_monitor.cpp @@ -963,7 +963,7 @@ bool Monitor::connect() { Warning("Already connected. Please call disconnect first."); } if (!camera) LoadCamera(); - uint64_t image_size = camera->ImageSize(); + size_t image_size = camera->ImageSize(); mem_size = sizeof(SharedData) + sizeof(TriggerData) + (zone_count * sizeof(int)) // Per zone scores @@ -980,9 +980,9 @@ bool Monitor::connect() { "zone_count %d * sizeof int %zu " "VideoStoreData=%zu " "timestamps=%zu " - "images=%dx%zd = %zd " - "analysis images=%dx%" PRIi64 " = %" PRId64 " " - "image_format = %dx%" PRIi64 " = %" PRId64 " " + "images=%dx%zu = %zu " + "analysis images=%dx%zu = %zu " + "image_format = %dx%zu = %zu " "total=%jd", sizeof(SharedData), sizeof(TriggerData), @@ -990,9 +990,9 @@ bool Monitor::connect() { sizeof(int), sizeof(VideoStoreData), (image_buffer_count * sizeof(struct timeval)), - image_buffer_count, image_size, (image_buffer_count * image_size), - image_buffer_count, image_size, (image_buffer_count * image_size), - image_buffer_count, sizeof(AVPixelFormat), (image_buffer_count * sizeof(AVPixelFormat)), + image_buffer_count, image_size, static_cast((image_buffer_count * image_size)), + image_buffer_count, image_size, static_cast((image_buffer_count * image_size)), + image_buffer_count, sizeof(AVPixelFormat), static_cast(image_buffer_count * sizeof(AVPixelFormat)), static_cast(mem_size)); #if ZM_MEM_MAPPED mem_file = stringtf("%s/zm.mmap.%u", staticConfig.PATH_MAP.c_str(), id); diff --git a/src/zm_videostore.cpp b/src/zm_videostore.cpp index 0ab8c784a5..6b74ab9d02 100644 --- a/src/zm_videostore.cpp +++ b/src/zm_videostore.cpp @@ -301,12 +301,37 @@ bool VideoStore::open() { video_out_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; } + // We have to re-parse the options because each attempt to open destroys the dictionary + AVDictionary *opts = 0; + ret = av_dict_parse_string(&opts, options.c_str(), "=", ",#\n", 0); + if (ret < 0) { + Warning("Could not parse ffmpeg encoder options list '%s'", options.c_str()); + } else { + const AVDictionaryEntry *entry = av_dict_get(opts, "reorder_queue_size", nullptr, AV_DICT_MATCH_CASE); + if (entry) { + reorder_queue_size = std::stoul(entry->value); + Debug(1, "reorder_queue_size set to %zu", reorder_queue_size); + // remove it to prevent complaining later. + av_dict_set(&opts, "reorder_queue_size", nullptr, AV_DICT_MATCH_CASE); + } + } // When encoding, we are going to use the timestamp values instead of packet pts/dts video_out_ctx->time_base = AV_TIME_BASE_Q; video_out_ctx->codec_id = codec_data[i].codec_id; video_out_ctx->pix_fmt = codec_data[i].hw_pix_fmt; Debug(1, "Setting pix fmt to %d %s", codec_data[i].hw_pix_fmt, av_get_pix_fmt_name(codec_data[i].hw_pix_fmt)); - video_out_ctx->level = 32; + const AVDictionaryEntry *opts_level = av_dict_get(opts, "level", nullptr, AV_DICT_MATCH_CASE); + if (opts_level) { + video_out_ctx->level = std::stoul(opts_level->value); + } else { + video_out_ctx->level = 32; + } + const AVDictionaryEntry *opts_gop_size = av_dict_get(opts, "gop_size", nullptr, AV_DICT_MATCH_CASE); + if (opts_gop_size) { + video_out_ctx->gop_size = std::stoul(opts_gop_size->value); + } else { + video_out_ctx->gop_size = 12; + } // Don't have an input stream, so need to tell it what we are sending it, or are transcoding video_out_ctx->width = monitor->Width(); @@ -315,7 +340,6 @@ bool VideoStore::open() { if (video_out_ctx->codec_id == AV_CODEC_ID_H264) { video_out_ctx->bit_rate = 2000000; - video_out_ctx->gop_size = 12; video_out_ctx->max_b_frames = 1; } else if (video_out_ctx->codec_id == AV_CODEC_ID_MPEG2VIDEO) { /* just for testing, we also add B frames */ @@ -364,20 +388,6 @@ bool VideoStore::open() { } // end if hwdevice_type != NONE #endif - // We have to re-parse the options because each attempt to open destroys the dictionary - AVDictionary *opts = 0; - ret = av_dict_parse_string(&opts, options.c_str(), "=", ",#\n", 0); - if (ret < 0) { - Warning("Could not parse ffmpeg encoder options list '%s'", options.c_str()); - } else { - const AVDictionaryEntry *entry = av_dict_get(opts, "reorder_queue_size", nullptr, AV_DICT_MATCH_CASE); - if (entry) { - reorder_queue_size = std::stoul(entry->value); - Debug(1, "reorder_queue_size set to %zu", reorder_queue_size); - // remove it to prevent complaining later. - av_dict_set(&opts, "reorder_queue_size", nullptr, AV_DICT_MATCH_CASE); - } - } if ((ret = avcodec_open2(video_out_ctx, video_out_codec, &opts)) < 0) { if (wanted_encoder != "" and wanted_encoder != "auto") { Warning("Can't open video codec (%s) %s", diff --git a/web/lang/ja_jp.php b/web/lang/ja_jp.php index 101b9c3e6f..fa05b2156a 100644 --- a/web/lang/ja_jp.php +++ b/web/lang/ja_jp.php @@ -51,7 +51,7 @@ // // Example // header( "Content-Type: text/html; charset=iso-8859-1" ); -header( "Content-Type: text/html; charset=Shift_JIS" ); +header( "Content-Type: text/html; charset=UTF-8" ); // You may need to change your locale here if your default one is incorrect for the // language described in this file, or if you have multiple languages supported.