├── .gitignore ├── Cargo.toml ├── LICENSE-MIT ├── src ├── progress_stream.rs ├── bin.rs ├── insta360.rs ├── lib.rs ├── desc_reader.rs └── writer.rs ├── README.md ├── .github └── workflows │ └── release.yml └── LICENSE-APACHE /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /tests 3 | *.mp4 4 | publish.sh 5 | Cargo.lock 6 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mp4-merge" 3 | version = "0.1.11" 4 | edition = "2021" 5 | authors = ["Adrian "] 6 | license = "MIT OR Apache-2.0" 7 | description = "A tool and library to losslessly join multiple .mp4 files shot with same camera and settings" 8 | repository = "https://github.com/gyroflow/mp4-merge" 9 | readme = "README.md" 10 | 11 | [dependencies] 12 | byteorder = "1.5.0" 13 | log = "0.4" 14 | filetime_creation = "0.2" 15 | 16 | [lib] 17 | name = "mp4_merge" 18 | path = "src/lib.rs" 19 | 20 | [[bin]] 21 | name = "mp4_merge" 22 | path = "src/bin.rs" -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Gyroflow 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/progress_stream.rs: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT OR Apache-2.0 2 | // Copyright © 2022 Adrian 3 | 4 | use std::io::{ Read, Write, Seek, Result, SeekFrom }; 5 | 6 | pub struct ProgressStream { 7 | inner: R, 8 | callback: C, 9 | total: usize 10 | } 11 | impl ProgressStream { 12 | pub fn new(inner: R, callback: C) -> Self { 13 | Self { inner, callback, total: 0 } 14 | } 15 | } 16 | impl Read for ProgressStream { 17 | fn read(&mut self, buf: &mut [u8]) -> Result { 18 | let read = self.inner.read(buf)?; 19 | self.total += read; 20 | (self.callback)(self.total); 21 | Ok(read) 22 | } 23 | } 24 | impl Seek for ProgressStream { 25 | fn seek(&mut self, pos: SeekFrom) -> Result { self.inner.seek(pos) } 26 | } 27 | impl Write for ProgressStream { 28 | fn write(&mut self, buf: &[u8]) -> Result { 29 | let written = self.inner.write(buf)?; 30 | self.total += written; 31 | (self.callback)(self.total); 32 | Ok(written) 33 | } 34 | fn flush(&mut self) -> Result<()> { self.inner.flush() } 35 | } 36 | -------------------------------------------------------------------------------- /src/bin.rs: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT OR Apache-2.0 2 | // Copyright © 2022 Adrian 3 | 4 | use std::io::Write; 5 | use std::path::*; 6 | use mp4_merge::{join_files, update_file_times}; 7 | 8 | fn main() { 9 | let _time = std::time::Instant::now(); 10 | 11 | let mut files = Vec::new(); 12 | let mut output_file = None; 13 | 14 | let mut args = std::env::args().skip(1); 15 | while let Some(arg) = args.next() { 16 | if arg == "--out" { 17 | if let Some(out) = args.next() { 18 | output_file = Some(Path::new(&out).to_owned()) 19 | } 20 | continue; 21 | } 22 | let p = Path::new(&arg); 23 | if !p.exists() { 24 | eprintln!("File doesn't exist {:?}", p); 25 | continue; 26 | } 27 | println!("Merging file {:?}", p); 28 | files.push(p.to_owned()); 29 | if output_file.is_none() { 30 | output_file = Some(p.with_file_name(format!("{}_joined.mp4", p.file_name().unwrap().to_str().unwrap()))); 31 | } 32 | } 33 | if files.is_empty() { eprintln!("No input files!"); return; } 34 | if output_file.is_none() { eprintln!("Output file not specified!"); return; } 35 | 36 | let final_output_file = output_file.as_ref().unwrap(); 37 | 38 | println!("Output file {:?}", final_output_file); 39 | 40 | join_files(&files, final_output_file, |progress| { 41 | print!("\rMerging... {:.2}%", progress * 100.0); 42 | std::io::stdout().flush().unwrap(); 43 | }).unwrap(); 44 | 45 | update_file_times(&files[0], final_output_file); 46 | 47 | println!("\rDone in {:.3}s ", _time.elapsed().as_millis() as f64 / 1000.0); 48 | std::io::stdout().flush().unwrap(); 49 | } 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # mp4-merge 2 | A tool and library to losslessly join multiple .mp4 files shot with same camera and settings. 3 | 4 | This is useful to merge multiple files that are created by the camera because of the 4GB limit on the SD Card. 5 | 6 | This tool can merge all these separate files to a new one without transcoding or losing any data. 7 | 8 | All original tracks are preserved, all metadata is kept as in the original. 9 | 10 | It was created to help stabilizing such files in [Gyroflow](https://github.com/gyroflow/gyroflow). 11 | 12 | ## Download: 13 | See the [Releases](https://github.com/gyroflow/mp4-merge/releases) page. 14 | 15 | ## Usage: 16 | The easiest way is to just drag & drop multiple .mp4 files onto the `mp4_merge` executable. 17 | 18 | Usage from command line: 19 | - Merge specified files and output to `IN_FILE1.mp4_joined.mp4` 20 | ```shell 21 | mp4_merge IN_FILE1.mp4 IN_FILE2.mp4 IN_FILE3.mp4 ... 22 | ``` 23 | - Merge specified files and output to `result.mp4` 24 | 25 | ```shell 26 | mp4_merge IN_FILE1.mp4 IN_FILE2.mp4 IN_FILE3.mp4 ... --out result.mp4 27 | ``` 28 | 29 | ## Use as a Rust library: 30 | 31 | ```toml 32 | [dependencies] 33 | mp4-merge = "0.1.11" 34 | ``` 35 | ```rust 36 | let files = ["IN_FILE1.mp4", "IN_FILE2.mp4"]; 37 | mp4_merge::join_files(&files, "out.mp4", |progress| { 38 | println!("Merging... {:.2}%", progress * 100.0); 39 | }).unwrap(); 40 | 41 | ``` 42 | 43 | ## How does this work? 44 | The idea is to merge the raw track data together, and then rewrite the `stbl` box (which is the descriptor of the raw data) to account for the additional data. In order to do this this library does the following: 45 | 1. Scan every provided file and collect: 46 | - `mdat` offset and size 47 | - Duration stored in `mvhd`, `tkhd`, `mdhd` boxes 48 | - `stbl` descriptions: `stts`, `stsz`, `stss`, `stsc`, `stco`/`co64` 49 | 2. Merge all these descriptions: sum durations, append `stbl` lists to each other and add chunk offsets based on previous file `mdat` size. 50 | 3. Take the first file, go through every box and write it to the output file, while: 51 | - If `mdat`: write raw data from all `mdat` boxes from all files, and store it as a large box (64-bit) 52 | - If `mvhd`, `tkhd` or `mdhd`: patch the duration value to the sum of all durations 53 | - If `stbl`: write these boxes from scratch, using merged lists from the description 54 | - If `stco`: rewrite to `co64` to be able to fit more than 4 GB of data. 55 | 4. Done 56 | 57 |
58 | 59 | #### License 60 | 61 | 62 | Licensed under either of Apache License, Version 63 | 2.0 or MIT license at your option. 64 | 65 | 66 |
67 | 68 | 69 | Unless you explicitly state otherwise, any contribution intentionally submitted 70 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 71 | be dual licensed as above, without any additional terms or conditions. 72 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | #tags: [ 'v*' ] 6 | 7 | jobs: 8 | build: 9 | strategy: 10 | fail-fast: false 11 | matrix: 12 | targets: [ 13 | { os: ubuntu-latest, target: x86_64-unknown-linux-musl, cross: false, name: linux64, ext: '' }, 14 | { os: ubuntu-latest, target: i686-unknown-linux-musl, cross: true, name: linux32, ext: '' }, 15 | { os: ubuntu-latest, target: aarch64-unknown-linux-musl, cross: true, name: linux-arm64, ext: '' }, 16 | { os: macos-latest, target: x86_64-apple-darwin, cross: false, name: mac64, ext: '' }, 17 | { os: macos-latest, target: aarch64-apple-darwin, cross: true, name: mac-arm64, ext: '' }, 18 | { os: windows-latest, target: x86_64-pc-windows-msvc, cross: false, name: windows64, ext: '.exe' }, 19 | { os: windows-latest, target: i686-pc-windows-msvc, cross: false, name: windows32, ext: '.exe' }, 20 | { os: windows-latest, target: aarch64-pc-windows-msvc, cross: false, name: windows-arm64, ext: '.exe' } 21 | ] 22 | runs-on: ${{ matrix.targets.os }} 23 | steps: 24 | - uses: actions/checkout@v4 25 | - name: Install Rust 26 | uses: dtolnay/rust-toolchain@stable 27 | with: 28 | targets: "${{ matrix.targets.target }}" 29 | - uses: Swatinem/rust-cache@v2 30 | 31 | - name: Add rust target 32 | run: rustup target add ${{ matrix.targets.target }} 33 | 34 | - name: Setup target ${{ matrix.targets.target }} 35 | if: ${{ matrix.targets.target == 'aarch64-apple-darwin' }} 36 | run: | 37 | xcrun --show-sdk-path 38 | xcodebuild -sdk -version 39 | echo "SDKROOT=$(xcrun -sdk macosx14.5 --show-sdk-path)" >> $GITHUB_ENV 40 | echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx14.5 --show-sdk-platform-version)" >> $GITHUB_ENV 41 | 42 | - name: Build mp4_merge 43 | env: 44 | CARGO_TARGET_DIR: ./target 45 | uses: actions-rs/cargo@v1 46 | with: 47 | use-cross: ${{ matrix.targets.cross }} 48 | command: build 49 | args: --target ${{ matrix.targets.target }} --release 50 | 51 | - name: Strip 52 | if: ${{ matrix.targets.os != 'windows-latest' }} 53 | continue-on-error: true 54 | run: strip target/${{ matrix.targets.target }}/release/mp4_merge 2>/dev/null || true 55 | 56 | - name: Copy dist 57 | run: | 58 | mkdir dist 59 | mv target/${{ matrix.targets.target }}/release/mp4_merge${{ matrix.targets.ext }} dist/mp4_merge-${{ matrix.targets.name }}${{ matrix.targets.ext }} 60 | 61 | - name: Save Binaries 62 | uses: actions/upload-artifact@v4 63 | with: 64 | name: mp4_merge-${{ matrix.targets.name }} 65 | path: dist/mp4_merge* 66 | 67 | github_release: 68 | name: Create GitHub release 69 | needs: build 70 | runs-on: ubuntu-latest 71 | steps: 72 | - name: Download artifacts 73 | uses: actions/download-artifact@v4 74 | 75 | - run: ls -l 76 | 77 | - name: Publish 78 | uses: softprops/action-gh-release@v1 79 | if: startsWith(github.ref, 'refs/tags/') 80 | with: 81 | files: ./* 82 | 83 | -------------------------------------------------------------------------------- /src/insta360.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::BTreeMap, io::*}; 2 | use byteorder::{ LittleEndian, ReadBytesExt, WriteBytesExt }; 3 | use crate::writer::get_first; 4 | 5 | pub const HEADER_SIZE: usize = 32 + 4 + 4 + 32; // padding(32), size(4), version(4), magic(32) 6 | pub const MAGIC: &[u8] = b"8db42d694ccc418790edff439fe026bf"; 7 | 8 | pub fn get_insta360_offsets(files: &mut [(R, usize)]) -> Result>> { 9 | let mut ret = Vec::new(); 10 | for (ref mut stream, size) in files { 11 | let mut stream = std::io::BufReader::with_capacity(16*1024, stream); 12 | 13 | let mut buf = vec![0u8; HEADER_SIZE]; 14 | stream.seek(SeekFrom::End(-(HEADER_SIZE as i64)))?; 15 | stream.read_exact(&mut buf)?; 16 | let mut offsets = BTreeMap::new(); 17 | if &buf[HEADER_SIZE-32..] == MAGIC { 18 | let extra_size = (&buf[32..]).read_u32::()? as i64; 19 | let data_version = (&buf[36..]).read_u32::()?; 20 | let extra_start = *size - extra_size as usize; 21 | 22 | let mut offset = (HEADER_SIZE + 4+1+1) as i64; 23 | 24 | stream.seek(SeekFrom::End(-offset + 1))?; 25 | let first_id = stream.read_u8()?; 26 | if first_id == 0 { // record::RecordType::Offsets 27 | let size = stream.read_u32::()? as i64; 28 | buf.resize(size as usize, 0); 29 | stream.seek(SeekFrom::End(-offset - size))?; 30 | stream.read_exact(&mut buf)?; 31 | 32 | { // Parse offsets record 33 | let len = buf.len() as u64; 34 | let mut d = Cursor::new(buf.clone()); 35 | 36 | while d.position() < len as u64 { 37 | let id = d.read_u8()?; 38 | let format = d.read_u8()?; 39 | let size = d.read_u32::()? as i64; 40 | let offset = d.read_u32::()?; 41 | if id > 0 { 42 | offsets.insert(extra_start as u64 + offset as u64, (data_version, id, format, size)); 43 | } 44 | } 45 | } 46 | } else { 47 | while offset < extra_size { 48 | stream.seek(SeekFrom::End(-offset))?; 49 | 50 | let format = stream.read_u8()?; 51 | let id = stream.read_u8()?; 52 | let size = stream.read_u32::()? as i64; 53 | 54 | buf.resize(size as usize, 0); 55 | 56 | stream.seek(SeekFrom::End(-offset - size))?; 57 | if id > 0 { 58 | offsets.insert(stream.stream_position()?, (data_version, id, format, size)); 59 | } 60 | 61 | offset += size + 4+1+1; 62 | } 63 | } 64 | } 65 | ret.push(offsets); 66 | } 67 | Ok(ret) 68 | } 69 | 70 | pub fn merge_metadata(files: &mut [(R, usize)], offsets: &[BTreeMap], mut f_out: W) -> Result<()> { 71 | assert_eq!(files.len(), offsets.len()); 72 | 73 | let mut total_size = 0; 74 | let mut data_version = 3; 75 | 76 | for (offset, (ver, id, format, size)) in offsets.first().unwrap() { 77 | data_version = *ver; 78 | let first_stream = get_first(files); 79 | first_stream.seek(SeekFrom::Start(*offset))?; 80 | std::io::copy(&mut first_stream.take(*size as u64), &mut f_out)?; 81 | 82 | let format2 = first_stream.read_u8()?; 83 | let id2 = first_stream.read_u8()?; 84 | let mut size2 = first_stream.read_u32::()? as i64; 85 | 86 | if *id != id2 || *format != format2 || *size != size2 { 87 | return Err(Error::new(ErrorKind::InvalidData, "Invalid metadata")); 88 | } 89 | 90 | if id2 != 0 && id2 != 1 && id2 != 2 && id2 != 5 { // If not Offsets, Metadata, Thumbnail, ThumbnailExt 91 | // Merge binary data 92 | for (file_i, map) in offsets.iter().enumerate() { 93 | if file_i == 0 { continue; } 94 | for (offset, (_ver, id, _format, size)) in map { 95 | if id2 == *id { 96 | let stream_i = files.get_mut(file_i).map(|x| &mut x.0).unwrap(); 97 | stream_i.seek(SeekFrom::Start(*offset))?; 98 | std::io::copy(&mut stream_i.take(*size as u64), &mut f_out)?; 99 | size2 += *size as i64; 100 | } 101 | } 102 | } 103 | } 104 | f_out.write_u8(format2)?; 105 | f_out.write_u8(id2)?; 106 | f_out.write_u32::(size2 as u32)?; 107 | total_size += size2 + 1+1+4; 108 | } 109 | 110 | f_out.write_u128::(0)?; // padding 111 | f_out.write_u128::(0)?; // padding 112 | f_out.write_u32::(total_size as u32 + 72)?; 113 | f_out.write_u32::(data_version)?; // version 114 | f_out.write(MAGIC)?; 115 | 116 | Ok(()) 117 | } 118 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT OR Apache-2.0 2 | // Copyright © 2022 Adrian 3 | 4 | use std::io::{ Read, Seek, Write, Result }; 5 | use std::path::*; 6 | use byteorder::{ BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt }; 7 | use std::time::Instant; 8 | 9 | mod desc_reader; 10 | mod progress_stream; 11 | mod writer; 12 | mod insta360; 13 | use progress_stream::*; 14 | 15 | // We need to: 16 | // - Merge mdat boxes 17 | // - Sum moov/mvhd/duration 18 | // - Sum moov/trak/tkhd/duration 19 | // - Sum moov/trak/mdia/mdhd/duration 20 | // - Sum moov/trak/edts/elst/segment duration 21 | // - Merge lists moov/trak/mdia/minf/stbl/stts 22 | // - Merge lists moov/trak/mdia/minf/stbl/stsz 23 | // - Merge lists moov/trak/mdia/minf/stbl/stss 24 | // - Merge lists moov/trak/mdia/minf/stbl/stco and co64 25 | // - Rewrite stco to co64 26 | 27 | const fn fourcc(s: &str) -> u32 { 28 | let s = s.as_bytes(); 29 | (s[3] as u32) | ((s[2] as u32) << 8) | ((s[1] as u32) << 16) | ((s[0] as u32) << 24) 30 | } 31 | const fn has_children(typ: u32, is_read: bool) -> bool { 32 | typ == fourcc("moov") || typ == fourcc("trak") || typ == fourcc("edts") || 33 | typ == fourcc("mdia") || typ == fourcc("minf") || typ == fourcc("stbl") || 34 | (typ == fourcc("stsd") && is_read) 35 | } 36 | fn typ_to_str(typ: u32) -> String { 37 | match String::from_utf8(vec![(typ >> 24) as u8, (typ >> 16) as u8, (typ >> 8) as u8, typ as u8 ]) { 38 | Ok(x) => x, 39 | Err(_) => format!("{:08X}", typ) 40 | } 41 | } 42 | 43 | pub fn read_box(reader: &mut R) -> Result<(u32, u64, u64, i64)> { 44 | let pos = reader.stream_position()?; 45 | let size = reader.read_u32::()?; 46 | let typ = reader.read_u32::()?; 47 | if size == 1 { 48 | let largesize = reader.read_u64::()?; 49 | Ok((typ, pos, largesize, 16)) 50 | } else { 51 | Ok((typ, pos, size as u64, 8)) 52 | } 53 | } 54 | 55 | pub fn join_files, F: Fn(f64)>(files: &[P], output_file: &P, progress_cb: F) -> Result<()> { 56 | let mut open_files = Vec::with_capacity(files.len()); 57 | for x in files { 58 | let f = std::fs::File::open(x)?; 59 | let size = f.metadata()?.len() as usize; 60 | open_files.push((f, size)); 61 | } 62 | join_file_streams(&mut open_files, std::fs::File::create(output_file)?, progress_cb) 63 | } 64 | 65 | pub fn join_file_streams(files: &mut [(I, usize)], output_file: O, progress_cb: F) -> Result<()> { 66 | // Get the merged description from all source files 67 | let mut desc = desc_reader::Desc::default(); 68 | desc.moov_tracks.resize(10, Default::default()); 69 | let mut total_size = 0; 70 | let num_files = files.len() as f64; 71 | let mut insta360_max_read = None; 72 | for (i, fs) in files.iter_mut().enumerate() { 73 | let filesize = fs.1; 74 | let mut fs = std::io::BufReader::with_capacity(16*1024, &mut fs.0); 75 | total_size += filesize; 76 | 77 | { // Find mdat first 78 | while let Ok((typ, offs, size, header_size)) = read_box(&mut fs) { 79 | let org_pos = fs.stream_position()?; 80 | if typ == fourcc("mdat") { 81 | log::debug!("Reading {}, offset: {}, size: {size}, header_size: {header_size}", typ_to_str(typ), offs); 82 | desc.mdat_position.push((None, org_pos, size - header_size as u64)); 83 | desc.mdat_final_position = org_pos; 84 | break; 85 | } 86 | fs.seek(std::io::SeekFrom::Start(org_pos + size - header_size as u64))?; 87 | } 88 | 89 | if insta360_max_read.is_none() { 90 | fs.seek(std::io::SeekFrom::End(-40))?; 91 | let mut buf = vec![0u8; 40]; 92 | fs.read_exact(&mut buf)?; 93 | // Check if it's Insta360 94 | if &buf[8..] == insta360::MAGIC { 95 | insta360_max_read = Some(filesize as u64 - (&buf[..]).read_u32::()? as u64); 96 | } 97 | } 98 | 99 | fs.seek(std::io::SeekFrom::Start(0))?; 100 | } 101 | 102 | desc_reader::read_desc(&mut fs, &mut desc, 0, u64::MAX, i)?; 103 | 104 | if let Some(mdat) = desc.mdat_position.last_mut() { 105 | mdat.0 = Some(i); 106 | desc.mdat_offset += mdat.2; 107 | for t in &mut desc.moov_tracks { 108 | t.sample_offset = t.stsz_count; 109 | t.chunk_offset = t.stco.len() as u32; 110 | } 111 | } 112 | 113 | progress_cb(((i as f64 + 1.0) / num_files) * 0.1); 114 | } 115 | 116 | // Write it to the file 117 | let mut debounce = Instant::now(); 118 | let f_out = ProgressStream::new(output_file, |total| { 119 | if (Instant::now() - debounce).as_millis() > 100 { 120 | progress_cb((0.1 + ((total as f64 / total_size as f64) * 0.9)).min(0.9999)); 121 | debounce = Instant::now(); 122 | } 123 | }); 124 | let mut f_out = std::io::BufWriter::with_capacity(64*1024, f_out); 125 | 126 | writer::get_first(files).seek(std::io::SeekFrom::Start(0))?; 127 | writer::rewrite_from_desc(files, &mut f_out, &mut desc, 0, insta360_max_read.unwrap_or(u64::MAX))?; 128 | 129 | // Patch final mdat positions 130 | for track in &desc.moov_tracks { 131 | f_out.seek(std::io::SeekFrom::Start(track.co64_final_position))?; 132 | for x in &track.stco { 133 | f_out.write_u64::(*x + desc.mdat_final_position)?; 134 | } 135 | } 136 | 137 | if insta360_max_read.is_some() { 138 | // Merge Insta360 metadata 139 | f_out.seek(std::io::SeekFrom::End(0))?; 140 | let offsets = insta360::get_insta360_offsets(files)?; 141 | insta360::merge_metadata(files, &offsets, f_out)?; 142 | } 143 | 144 | progress_cb(1.0); 145 | 146 | Ok(()) 147 | } 148 | 149 | pub fn update_file_times(input_path: &PathBuf, output_path: &PathBuf) { 150 | if let Err(e) = || -> std::io::Result<()> { 151 | let org_time = filetime_creation::FileTime::from_creation_time(&std::fs::metadata(&input_path)?).ok_or(std::io::ErrorKind::Other)?; 152 | if cfg!(target_os = "windows") { 153 | ::log::debug!("Updating creation time of {} to {}", output_path.display(), org_time.to_string()); 154 | filetime_creation::set_file_ctime(output_path, org_time)?; 155 | } else { 156 | ::log::debug!("Updating modification time of {} to {}", output_path.display(), org_time.to_string()); 157 | filetime_creation::set_file_mtime(output_path, org_time)?; 158 | } 159 | Ok(()) 160 | }() { 161 | ::log::warn!("Failed to update file times: {e:?}"); 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/desc_reader.rs: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT OR Apache-2.0 2 | // Copyright © 2022 Adrian 3 | 4 | use std::io::{ Read, Seek, Result, SeekFrom }; 5 | use byteorder::{ ReadBytesExt, BigEndian }; 6 | use crate::{ fourcc, read_box, typ_to_str }; 7 | 8 | #[derive(Default, Clone, Debug)] 9 | pub struct TrackDesc { 10 | pub tkhd_duration: u64, 11 | pub elst_segment_duration: u64, 12 | pub mdhd_timescale: u32, 13 | pub mdhd_duration: u64, 14 | pub stts: Vec<(u32, u32)>, 15 | pub stsz: Vec, 16 | pub stco: Vec, 17 | pub stss: Vec, 18 | pub sdtp: Vec, 19 | pub sample_offset: u32, 20 | pub chunk_offset: u32, 21 | pub stsz_sample_size: u32, 22 | pub stsz_count: u32, 23 | pub stsc: Vec<(u32, u32, u32)>, // first_chunk, samples_per_chunk, sample_description_index 24 | pub co64_final_position: u64, 25 | pub skip: bool, 26 | } 27 | 28 | #[derive(Default, Clone, Debug)] 29 | pub struct Desc { 30 | pub mdat_position: Vec<(Option, u64, u64)>, // file path, offset, size 31 | pub mvhd_timescale_per_file: Vec, 32 | pub moov_mvhd_timescale: u32, 33 | pub moov_mvhd_duration: u64, 34 | pub moov_tracks: Vec, 35 | pub mdat_offset: u64, 36 | pub mdat_final_position: u64, 37 | } 38 | 39 | pub fn read_desc(d: &mut R, desc: &mut Desc, track: usize, max_read: u64, file_index: usize) -> Result<()> { 40 | let mut tl_track = track; 41 | let start_offs = d.stream_position()?; 42 | desc.mvhd_timescale_per_file.push(0); 43 | while let Ok((typ, offs, size, header_size)) = read_box(d) { 44 | if size == 0 || typ == 0 { continue; } 45 | if crate::has_children(typ, true) { 46 | read_desc(d, desc, tl_track, size - header_size as u64, file_index)?; 47 | 48 | if typ == fourcc("trak") { 49 | tl_track += 1; 50 | } 51 | } else { 52 | log::debug!("Reading {}, offset: {}, size: {size}, header_size: {header_size}", typ_to_str(typ), offs); 53 | let org_pos = d.stream_position()?; 54 | // if typ == fourcc("mdat") { 55 | // desc.mdat_position.push((None, org_pos, size - header_size as u64)); 56 | // desc.mdat_final_position = org_pos; 57 | // } 58 | if typ == fourcc("mvhd") || typ == fourcc("tkhd") || typ == fourcc("mdhd") { 59 | let (v, _flags) = (d.read_u8()?, d.read_u24::()?); 60 | if typ == fourcc("mvhd") { 61 | let timescale = if v == 1 { d.seek(SeekFrom::Current(8+8))?; d.read_u32::()? } 62 | else { d.seek(SeekFrom::Current(4+4))?; d.read_u32::()? }; 63 | let duration = if v == 1 { d.read_u64::()? } 64 | else { d.read_u32::()? as u64 }; 65 | if desc.moov_mvhd_timescale == 0 { 66 | desc.moov_mvhd_timescale = timescale; 67 | } 68 | desc.mvhd_timescale_per_file[file_index] = timescale; 69 | desc.moov_mvhd_duration += ((duration as f64 / timescale as f64) * desc.moov_mvhd_timescale as f64).ceil() as u64; 70 | } 71 | if let Some(track_desc) = desc.moov_tracks.get_mut(tl_track) { 72 | if typ == fourcc("tkhd") { 73 | let duration = if v == 1 { d.seek(SeekFrom::Current(8+8+4+4))?; d.read_u64::()? } 74 | else { d.seek(SeekFrom::Current(4+4+4+4))?; d.read_u32::()? as u64 }; 75 | track_desc.tkhd_duration += ((duration as f64 / *desc.mvhd_timescale_per_file.get(file_index).ok_or(std::io::Error::other("Invalid index"))? as f64) * desc.moov_mvhd_timescale as f64).ceil() as u64; 76 | } 77 | if typ == fourcc("mdhd") { 78 | let timescale = if v == 1 { d.seek(SeekFrom::Current(8+8))?; d.read_u32::()? } 79 | else { d.seek(SeekFrom::Current(4+4))?; d.read_u32::()? }; 80 | let duration = if v == 1 { d.read_u64::()? } 81 | else { d.read_u32::()? as u64 }; 82 | if track_desc.mdhd_timescale == 0 { 83 | track_desc.mdhd_timescale = timescale; 84 | } 85 | let add_duration = ((duration as f64 / timescale as f64) * track_desc.mdhd_timescale as f64).ceil() as u64; 86 | track_desc.mdhd_duration += add_duration; 87 | } 88 | } 89 | } 90 | if typ == fourcc("elst") || typ == fourcc("stts") || typ == fourcc("stsz") || typ == fourcc("stss") || 91 | typ == fourcc("stco") || typ == fourcc("co64") || typ == fourcc("sdtp") || typ == fourcc("stsc") { 92 | let track_desc = desc.moov_tracks.get_mut(tl_track).unwrap(); 93 | if !(track_desc.skip && file_index > 0) { 94 | let (v, _flags) = (d.read_u8()?, d.read_u24::()?); 95 | 96 | if typ == fourcc("elst") { 97 | let entry_count = d.read_u32::()?; 98 | for _ in 0..entry_count { 99 | let segment_duration = if v == 1 { d.read_u64::()? } else { d.read_u32::()? as u64 }; 100 | let media_time = if v == 1 { d.read_i64::()? } else { d.read_i32::()? as i64 }; 101 | d.seek(SeekFrom::Current(4))?; // Skip Media rate 102 | if media_time != -1 { 103 | track_desc.elst_segment_duration += segment_duration; 104 | } 105 | } 106 | } 107 | if typ == fourcc("stsz") { 108 | track_desc.stsz_sample_size = d.read_u32::()?; 109 | let count = d.read_u32::()?; 110 | if track_desc.stsz_sample_size == 0 { 111 | for _ in 0..count { track_desc.stsz.push(d.read_u32::()?); } 112 | } 113 | track_desc.stsz_count += count; 114 | } 115 | if typ == fourcc("sdtp") { 116 | let count = size - header_size as u64 - 4; 117 | for _ in 0..count { track_desc.sdtp.push(d.read_u8()?); } 118 | } 119 | if typ == fourcc("stss") || typ == fourcc("stco") || typ == fourcc("co64") || typ == fourcc("stts") || typ == fourcc("stsc") { 120 | let count = d.read_u32::()?; 121 | let current_file_mdat_position = desc.mdat_position.last().unwrap().1; 122 | let mdat_offset = desc.mdat_offset as i64 - current_file_mdat_position as i64; 123 | for _ in 0..count { 124 | if typ == fourcc("stss") { track_desc.stss.push(d.read_u32::()? + track_desc.sample_offset); } 125 | if typ == fourcc("stco") { track_desc.stco.push((d.read_u32::()? as i64 + mdat_offset) as u64); } 126 | if typ == fourcc("co64") { track_desc.stco.push((d.read_u64::()? as i64 + mdat_offset) as u64); } 127 | if typ == fourcc("stts") { track_desc.stts.push((d.read_u32::()?, d.read_u32::()?)); } 128 | if typ == fourcc("stsc") { track_desc.stsc.push(( 129 | d.read_u32::()? + track_desc.chunk_offset, 130 | d.read_u32::()?, 131 | d.read_u32::()? 132 | )); } 133 | } 134 | } 135 | } 136 | } 137 | if typ == fourcc("tmcd") { 138 | // Timecode shouldn't be merged 139 | let track_desc = desc.moov_tracks.get_mut(tl_track).unwrap(); 140 | track_desc.skip = true; 141 | } 142 | d.seek(SeekFrom::Start(org_pos + size - header_size as u64))?; 143 | } 144 | if d.stream_position()? - start_offs >= max_read { 145 | break; 146 | } 147 | } 148 | Ok(()) 149 | } 150 | -------------------------------------------------------------------------------- /src/writer.rs: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: MIT OR Apache-2.0 2 | // Copyright © 2022 Adrian 3 | 4 | use std::io::{ Read, Write, Seek, Result, SeekFrom }; 5 | use byteorder::{ ReadBytesExt, WriteBytesExt, BigEndian }; 6 | use crate::{ fourcc, read_box, typ_to_str, desc_reader::Desc }; 7 | 8 | pub(crate) fn get_first(files: &mut [(R, usize)]) -> &mut R { files.get_mut(0).map(|x| &mut x.0).unwrap() } 9 | 10 | pub fn rewrite_from_desc(files: &mut [(R, usize)], output_file: &mut W, desc: &mut Desc, track: usize, max_read: u64) -> Result { 11 | let mut total_read_size = 0; 12 | let mut total_new_size = 0; 13 | let mut tl_track = track; 14 | while let Ok((typ, offs, size, header_size)) = read_box(get_first(files)) { 15 | if size == 0 || typ == 0 { break; } 16 | 17 | total_read_size += size; 18 | let mut new_size = size; 19 | if crate::has_children(typ, false) { 20 | let d = get_first(files); 21 | // Copy the header 22 | d.seek(SeekFrom::Current(-header_size))?; 23 | let out_pos = output_file.stream_position()?; 24 | std::io::copy(&mut d.take(header_size as u64), output_file)?; 25 | new_size = rewrite_from_desc(files, output_file, desc, tl_track, size - header_size as u64)?; 26 | new_size += header_size as u64; 27 | 28 | if typ == fourcc("trak") { 29 | tl_track += 1; 30 | } 31 | 32 | if new_size != size { 33 | log::debug!("Patching size from {size} to {new_size}"); 34 | patch_bytes(output_file, out_pos, &(new_size as u32).to_be_bytes())?; 35 | } 36 | } else if typ == fourcc("mdat") { 37 | log::debug!("Merging mdat's, offset: {}, size: {size}", offs); 38 | 39 | output_file.write_all(&1u32.to_be_bytes())?; 40 | output_file.write_all(&fourcc("mdat").to_be_bytes())?; 41 | let pos = output_file.stream_position()?; 42 | output_file.write_all(&0u64.to_be_bytes())?; 43 | new_size = 16; 44 | 45 | desc.mdat_final_position = output_file.stream_position()?; 46 | 47 | // Merge all mdats 48 | for (file_index, mo, ms) in &desc.mdat_position { 49 | if let Some(file_index) = file_index { 50 | if let Some(f) = files.get_mut(*file_index).map(|x| &mut x.0) { 51 | let prev_pos = f.stream_position()?; 52 | f.seek(SeekFrom::Start(*mo))?; 53 | std::io::copy(&mut f.take(*ms), output_file)?; 54 | f.seek(SeekFrom::Start(prev_pos))?; 55 | new_size += ms; 56 | } 57 | } 58 | } 59 | patch_bytes(output_file, pos, &new_size.to_be_bytes())?; 60 | 61 | get_first(files).seek(SeekFrom::Current(size as i64 - header_size))?; 62 | 63 | } else if typ == fourcc("mvhd") || typ == fourcc("tkhd") || typ == fourcc("mdhd") || typ == fourcc("elst") { 64 | log::debug!("Writing {} with patched duration, offset: {}, size: {size}", typ_to_str(typ), offs); 65 | let d = get_first(files); 66 | 67 | let (v, _flags) = (d.read_u8()?, d.read_u24::()?); 68 | 69 | // Copy the original box 70 | d.seek(SeekFrom::Current(-header_size - 4))?; 71 | let pos = output_file.stream_position()? + header_size as u64 + 4; 72 | std::io::copy(&mut d.take(size), output_file)?; 73 | 74 | // Patch values 75 | if typ == fourcc("mvhd") { 76 | if v == 1 { patch_bytes(output_file, pos+8+8+4, &desc.moov_mvhd_duration.to_be_bytes())?; } 77 | else { patch_bytes(output_file, pos+4+4+4, &(desc.moov_mvhd_duration as u32).to_be_bytes())?; } 78 | } 79 | if let Some(track_desc) = desc.moov_tracks.get(tl_track) { 80 | if typ == fourcc("tkhd") { 81 | if v == 1 { patch_bytes(output_file, pos+8+8+8+4, &track_desc.tkhd_duration.to_be_bytes())?; } 82 | else { patch_bytes(output_file, pos+4+4+4+4, &(track_desc.tkhd_duration as u32).to_be_bytes())?; }; 83 | } 84 | if typ == fourcc("mdhd") { 85 | if v == 1 { patch_bytes(output_file, pos+8+8+4, &track_desc.mdhd_duration.to_be_bytes())?; } 86 | else { patch_bytes(output_file, pos+4+4+4, &(track_desc.mdhd_duration as u32).to_be_bytes())?; } 87 | } 88 | if typ == fourcc("elst") { 89 | let mut elst_duration = track_desc.elst_segment_duration; 90 | if elst_duration != 0 && track_desc.mdhd_duration > elst_duration { 91 | elst_duration = track_desc.mdhd_duration; 92 | } 93 | 94 | if v == 1 { patch_bytes(output_file, pos+4, &elst_duration.to_be_bytes())?; } 95 | else { patch_bytes(output_file, pos+4, &(elst_duration as u32).to_be_bytes())?; } 96 | } 97 | } 98 | 99 | } else if typ == fourcc("stts") || typ == fourcc("stsz") || typ == fourcc("stss") || typ == fourcc("stco") || typ == fourcc("co64") || typ == fourcc("sdtp") || typ == fourcc("stsc") { 100 | log::debug!("Writing new {}, offset: {}, size: {size}", typ_to_str(typ), offs); 101 | 102 | get_first(files).seek(SeekFrom::Current(size as i64 - header_size))?; 103 | 104 | let out_pos = output_file.stream_position()?; 105 | new_size = 12; 106 | output_file.write_all(&0u32.to_be_bytes())?; 107 | let new_typ = if typ == fourcc("stco") { fourcc("co64") } else { typ }; 108 | output_file.write_all(&new_typ.to_be_bytes())?; 109 | output_file.write_all(&0u32.to_be_bytes())?; // flags 110 | 111 | let track_desc = desc.moov_tracks.get_mut(tl_track).unwrap(); 112 | if typ == fourcc("stts") { 113 | let mut new_stts: Vec<(u32, u32)> = Vec::with_capacity(track_desc.stts.len()); 114 | let mut prev_delta = None; 115 | for x in &track_desc.stts { 116 | if let Some(prev_delta) = prev_delta { 117 | if prev_delta == x.1 { new_stts.last_mut().unwrap().0 += x.0; continue; } 118 | } 119 | prev_delta = Some(x.1); 120 | new_stts.push(*x); 121 | } 122 | output_file.write_u32::(new_stts.len() as u32)?; 123 | new_size += 4; 124 | for (count, delta) in &new_stts { 125 | output_file.write_u32::(*count)?; 126 | output_file.write_u32::(*delta)?; 127 | new_size += 8; 128 | } 129 | } 130 | if typ == fourcc("stsz") { 131 | output_file.write_u32::(track_desc.stsz_sample_size)?; // sample_size 132 | output_file.write_u32::(track_desc.stsz_count)?; 133 | new_size += 8; 134 | for x in &track_desc.stsz { output_file.write_u32::(*x)?; new_size += 4; } 135 | } 136 | if typ == fourcc("stss") { 137 | output_file.write_u32::(track_desc.stss.len() as u32)?; 138 | new_size += 4; 139 | for x in &track_desc.stss { output_file.write_u32::(*x)?; new_size += 4; } 140 | } 141 | if typ == fourcc("stco") || typ == fourcc("co64") { 142 | output_file.write_u32::(track_desc.stco.len() as u32)?; 143 | new_size += 4; 144 | track_desc.co64_final_position = output_file.stream_position()?; 145 | for x in &track_desc.stco { 146 | output_file.write_u64::(*x + desc.mdat_final_position)?; 147 | new_size += 8; 148 | } 149 | } 150 | if typ == fourcc("sdtp") { 151 | for x in &track_desc.sdtp { output_file.write_u8(*x)?; new_size += 1; } 152 | } 153 | if typ == fourcc("stsc") { 154 | output_file.write_u32::(track_desc.stsc.len() as u32)?; 155 | new_size += 4; 156 | for x in &track_desc.stsc { 157 | output_file.write_u32::(x.0)?; 158 | output_file.write_u32::(x.1)?; 159 | output_file.write_u32::(x.2)?; 160 | new_size += 12; 161 | } 162 | } 163 | patch_bytes(output_file, out_pos, &(new_size as u32).to_be_bytes())?; 164 | } else { 165 | log::debug!("Writing original {}, offset: {}, size: {size}", typ_to_str(typ), offs); 166 | let d = get_first(files); 167 | 168 | // Copy without changes 169 | d.seek(SeekFrom::Current(-header_size))?; 170 | std::io::copy(&mut d.take(size), output_file)?; 171 | } 172 | total_new_size += new_size; 173 | if total_read_size >= max_read { 174 | break; 175 | } 176 | } 177 | Ok(total_new_size) 178 | } 179 | 180 | pub fn patch_bytes(writer: &mut W, position: u64, bytes: &[u8]) -> Result<()> { 181 | let new_pos = writer.stream_position()?; 182 | writer.seek(SeekFrom::Start(position))?; 183 | writer.write_all(bytes)?; 184 | writer.seek(SeekFrom::Start(new_pos))?; 185 | Ok(()) 186 | } -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------